max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
mc66c.py
|
tcoenraad/micropython-multical66c
| 0
|
12784251
|
<reponame>tcoenraad/micropython-multical66c
from machine import UART
from umqtt.robust import MQTTClient
import uos
from time import sleep
MQTT_HOST = "192.168.1.8"
UART_ID = 0
def fetch_standard_data():
device = UART(UART_ID, baudrate=300, bits=7,
parity=0, stop=2, timeout=3000)
device.write("/#1".encode("utf-8"))
sleep(1)
# Kamstrup Multical 66C docs specify stopbits = 2, however on ESP8266 this results into gibberish
device.init(baudrate=1200, bits=7, parity=0, stop=1, timeout=3000)
response = device.read(87)
# whitespaces are discarded in some readings
# nevertheless, ASCII numbers are always complete,
# so we split it manually
data = response.decode("utf-8").replace(" ", "")
parts = [data[i:i+7] for i in range(0, len(data), 7)]
return {
"energy": int(parts[0]) / 100,
"volume": int(parts[1]) / 100,
"temperature_flow": int(parts[3]) / 100,
"temperature_return": int(parts[4]) / 100
}
def update():
c = MQTTClient("umqtt_client", MQTT_HOST)
c.connect()
standard_data = fetch_standard_data()
print("Publishing: {}".format(standard_data))
c.publish("mc66c/energy", str(standard_data["energy"]), True)
c.publish("mc66c/volume", str(standard_data["volume"]), True)
c.publish("mc66c/temperature_flow",
str(standard_data["temperature_flow"]), True)
c.publish("mc66c/temperature_return",
str(standard_data["temperature_return"]), True)
| 2.71875
| 3
|
fincorpy/__init__.py
|
Fincor-Blockchain/fincorpy
| 0
|
12784252
|
<reponame>Fincor-Blockchain/fincorpy
from hdwallets import BIP32DerivationError as BIP32DerivationError # noqa: F401
from fincorpy._transaction import Transaction as Transaction # noqa: F401
from fincorpy._wallet import generate_wallet as generate_wallet # noqa: F401
from fincorpy._wallet import privkey_to_address as privkey_to_address # noqa: F401
from fincorpy._wallet import privkey_to_pubkey as privkey_to_pubkey # noqa: F401
from fincorpy._wallet import pubkey_to_address as pubkey_to_address # noqa: F401
from fincorpy._wallet import seed_to_privkey as seed_to_privkey # noqa: F401
| 1.554688
| 2
|
tests/test_visitors/test_ast/test_iterables/test_unpacking.py
|
Kvm99/wemake-python-styleguide
| 1
|
12784253
|
<reponame>Kvm99/wemake-python-styleguide
# -*- coding: utf-8 -*-
import pytest
from wemake_python_styleguide.violations.consistency import (
IterableUnpackingViolation,
)
from wemake_python_styleguide.visitors.ast.iterables import (
IterableUnpackingVisitor,
)
args_unpacking_in_call = 'f(*args)'
spread_list_definition = '[1, 2, *numbers, 74]'
spread_set_definition = '{1, 2, *numbers, 74}'
spread_tuple_definition = '(1, 2, *numbers, 74)'
spread_assignment = 'first, *_ = [1, 2, 4, 3]'
wrong_list_definition = '[*numbers]'
wrong_set_definition = '{*numbers}'
wrong_tuple_definition = '(*numbers,)'
wrong_spread_assignment = '*_, = [1, 2, 4, 3]'
@pytest.mark.parametrize('code', [
args_unpacking_in_call,
spread_list_definition,
spread_set_definition,
spread_tuple_definition,
spread_assignment,
])
def test_correct_iterable_unpacking_usage(
assert_errors,
parse_ast_tree,
default_options,
code,
):
"""Testing that correct iterable unpacking is allowed."""
tree = parse_ast_tree(code)
visitor = IterableUnpackingVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
wrong_list_definition,
wrong_set_definition,
wrong_tuple_definition,
wrong_spread_assignment,
])
def test_unneccessary_iterable_unpacking_usage(
assert_errors,
parse_ast_tree,
default_options,
code,
):
"""Testing that unneccessary iterable unpacking is restricted."""
tree = parse_ast_tree(code)
visitor = IterableUnpackingVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [IterableUnpackingViolation])
| 2.328125
| 2
|
mpsci/distributions/t.py
|
WarrenWeckesser/mpsci
| 7
|
12784254
|
<filename>mpsci/distributions/t.py
"""
Student's t distribution
------------------------
"""
import mpmath
__all__ = ['pdf', 'logpdf', 'cdf', 'sf', 'invcdf', 'invsf', 'entropy']
def logpdf(x, df):
"""
Logarithm of the PDF of Student's t distribution.
"""
if df <= 0:
raise ValueError('df must be greater than 0')
with mpmath.extradps(5):
x = mpmath.mpf(x)
df = mpmath.mpf(df)
h = (df + 1) / 2
logp = (mpmath.loggamma(h)
- mpmath.log(df * mpmath.pi)/2
- mpmath.loggamma(df/2)
- h * mpmath.log1p(x**2/df))
return logp
def pdf(x, df):
"""
PDF of Student's t distribution.
"""
if df <= 0:
raise ValueError('df must be greater than 0')
return mpmath.exp(logpdf(x, df))
def cdf(x, df):
"""
CDF of Student's t distribution.
"""
if df <= 0:
raise ValueError('df must be greater than 0')
with mpmath.extradps(5):
half = mpmath.mp.one/2
x = mpmath.mpf(x)
df = mpmath.mpf(df)
h = (df + 1) / 2
p1 = x * mpmath.gamma(h)
p2 = mpmath.hyp2f1(half, h, 3*half, -x**2/df)
return half + p1*p2/mpmath.sqrt(mpmath.pi*df)/mpmath.gamma(df/2)
def sf(x, df):
"""
Survival function of Student's t distribution.
"""
if df <= 0:
raise ValueError('df must be greater than 0')
with mpmath.extradps(5):
half = mpmath.mp.one/2
x = mpmath.mpf(x)
df = mpmath.mpf(df)
h = (df + 1) / 2
p1 = x * mpmath.gamma(h)
p2 = mpmath.hyp2f1(half, h, 3*half, -x**2/df)
return half - p1*p2/mpmath.sqrt(mpmath.pi*df)/mpmath.gamma(df/2)
def invcdf(p, df):
"""
Inverse of the CDF for Student's t distribution.
This function is also known as the quantile function or the percent
point function.
For values far in the tails of the distribution, the solution might
not be accurate. Check the results, and increase the precision of
the calculation if necessary.
"""
if p < 0 or p > 1:
raise ValueError('p must be in the interval [0, 1]')
if df <= 0:
raise ValueError('df must be greater than 0')
if p == 0:
return mpmath.ninf
if p == 1:
return mpmath.inf
with mpmath.extradps(5):
p = mpmath.mpf(p)
if p > 0.5:
p0 = mpmath.mp.one - p
else:
p0 = p
df = mpmath.mpf(df)
x0 = -mpmath.mp.one
count = 0
while cdf(x0, df) > p0:
x0 = 2*x0
count += 1
if count > mpmath.mp.prec / 2:
raise RuntimeError('failed to find a bracketing interval')
def _func(x):
return cdf(x, df) - p0
x = mpmath.findroot(_func, (x0, x0/2), solver='anderson')
if p > 0.5:
x = -x
return x
def invsf(p, df):
"""
Inverse of the survival function for Student's t distribution.
For values far in the tails of the distribution, the solution might
not be accurate. Check the results, and increase the precision of
the calculation if necessary.
"""
if p < 0 or p > 1:
raise ValueError('p must be in the interval [0, 1]')
if df <= 0:
raise ValueError('df must be greater than 0')
return -invcdf(p, df)
def entropy(df):
"""
Entropy of Student's t distribution.
"""
if df <= 0:
raise ValueError('df must be greater than 0')
with mpmath.extradps(5):
df = mpmath.mpf(df)
h = df/2
h1 = (df + 1)/2
return (h1*(mpmath.digamma(h1) - mpmath.digamma(h)) +
mpmath.log(mpmath.sqrt(df)*mpmath.beta(h, 0.5)))
| 2.796875
| 3
|
scripts/UtilitiesConvertCharacter.py
|
CrackerCat/pwndra
| 524
|
12784255
|
# Convert an operand to characters
#@author b0bb
#@category Pwn
#@keybinding shift r
#@menupath Analysis.Pwn.Utilities.Convert to Char
#@toolbar
import ghidra.app.cmd.equate.SetEquateCmd as SetEquateCmd
import ghidra.program.util.OperandFieldLocation as OperandFieldLocation
import ghidra.program.model.lang.OperandType as OperandType
def run():
if type(currentLocation) is not OperandFieldLocation:
return
addr = currentLocation.getAddress()
inst = currentProgram.getListing().getInstructionAt(addr)
opin = currentLocation.getOperandIndex()
if inst.getOperandType(opin) == OperandType.SCALAR:
string = ''
scalar = inst.getScalar(opin)
bvalue = scalar.byteArrayValue()
if not currentProgram.getLanguage().isBigEndian():
bvalue.reverse()
for value in bvalue:
if value < 0x20 or value > 0x7e:
string += '\\x%02x' % value
else:
string += chr(value)
cmd = SetEquateCmd('"%s"' % string, addr, opin, scalar.getValue())
state.getTool().execute(cmd, currentProgram)
run()
| 2.375
| 2
|
invenio_assets/filters.py
|
pazembrz/invenio-assets
| 1
|
12784256
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Filters for webassets."""
from __future__ import absolute_import, print_function
import json
import os
import re
from subprocess import PIPE, Popen
from babel.messages.pofile import read_po
from flask import current_app
from webassets.filter import Filter, register_filter
from webassets.filter.cleancss import CleanCSS
from webassets.filter.requirejs import RequireJSFilter as RequireJSFilterBase
__all__ = ('AngularGettextFilter', 'RequireJSFilter', 'CleanCSSFilter', )
class RequireJSFilter(RequireJSFilterBase):
"""Optimize AMD-style modularized JavaScript into a single asset.
Adds support for exclusion of files already in defined in other bundles.
"""
name = 'requirejsexclude'
def __init__(self, *args, **kwargs):
r"""Initialize filter.
:param \*args: Arguments are forwarded to parent class.
:param \**kwargs: Keyword arguments are forwarded to parent class
except the *exclude* keyword.
"""
self.excluded_bundles = kwargs.pop('exclude', [])
super(RequireJSFilter, self).__init__(*args, **kwargs)
def setup(self):
"""Setup filter (only called when filter is actually used)."""
super(RequireJSFilter, self).setup()
excluded_files = []
for bundle in self.excluded_bundles:
excluded_files.extend(
map(lambda f: os.path.splitext(f)[0],
bundle.contents)
)
if excluded_files:
self.argv.append(
'exclude={0}'.format(','.join(excluded_files))
)
class CleanCSSFilter(CleanCSS):
"""Minify CSS using cleancss.
Implements opener capable of rebasing relative CSS URLs against
``COLLECT_STATIC_ROOT`` using both cleancss v3 or v4.
"""
name = 'cleancssurl'
def setup(self):
"""Initialize filter just before it will be used."""
super(CleanCSSFilter, self).setup()
self.root = current_app.config.get('COLLECT_STATIC_ROOT')
@property
def rebase_opt(self):
"""Determine which option name to use."""
if not hasattr(self, '_rebase_opt'):
# out = b"MAJOR.MINOR.REVISION" // b"3.4.19" or b"4.0.0"
out, err = Popen(
['cleancss', '--version'], stdout=PIPE).communicate()
ver = int(out[:out.index(b'.')])
self._rebase_opt = ['--root', self.root] if ver == 3 else []
return self._rebase_opt
def input(self, _in, out, **kw):
"""Input filtering."""
args = [self.binary or 'cleancss'] + self.rebase_opt
if self.extra_args:
args.extend(self.extra_args)
self.subprocess(args, out, _in)
_re_language_code = re.compile(
r'"Language: (?P<language_code>[A-Za-z_]{2,}(_[A-Za-z]{2,})?)\\n"'
)
"""Match language code group in PO file."""
class AngularGettextFilter(Filter):
"""Compile GNU gettext messages to angular-gettext module."""
name = 'angular-gettext'
options = {
'catalog_name': None,
}
def output(self, _in, out, **kwargs):
"""Wrap translation in Angular module."""
out.write(
'angular.module("{0}", ["gettext"]).run('
'["gettextCatalog", function (gettextCatalog) {{'.format(
self.catalog_name
)
)
out.write(_in.read())
out.write('}]);')
def input(self, _in, out, **kwargs):
"""Process individual translation file."""
language_code = _re_language_code.search(_in.read()).group(
'language_code'
)
_in.seek(0) # move at the begining after matching the language
catalog = read_po(_in)
out.write('gettextCatalog.setStrings("{0}", '.format(language_code))
out.write(json.dumps({
key: value.string for key, value in catalog._messages.items()
if key and value.string
}))
out.write(');')
# Register filters on webassets.
register_filter(AngularGettextFilter)
register_filter(CleanCSSFilter)
register_filter(RequireJSFilter)
| 2.03125
| 2
|
aha/controller/translation.py
|
Letractively/aha-gae
| 0
|
12784257
|
# -*- coding: utf-8 -*-
##############################################################################
#
# translation.py
# Module defining bunch of function to be used for i18n transration
#
# Copyright (c) 2010 Webcore Corp. All Rights Reserved.
#
##############################################################################
""" translation.py - Module defining bunch of function to be used for i18n
transration.
$Id: translation.py 629 2010-06-28 07:57:53Z ats $
"""
__author__ = '<NAME> <<EMAIL>>'
__docformat__ = 'plaintext'
__licence__ = 'BSD'
import os
import gettext
__all__ = ['get_i18ndir', 'get_gettextobject', 'get_languages']
def get_i18ndir():
"""
A function to obtain i18n directory
"""
udir = os.path.dirname(os.path.split(__file__)[0])
dir = os.path.join(udir, 'i18n')
return dir
def get_gettextobject(dimain = 'aha', languages = None):
"""
A function to obtain gettext object
"""
dir = get_i18ndir()
t = gettext.translation(domain = dimain,
languages = languages,
localedir = dir, fallback = True)
return t
def get_languages(s):
"""
A function to obtain language settings via Accept-Language header.
"""
langs = [''.join(x.split(';')[:1]) for x in s]
return langs
def main(): pass;
| 2.203125
| 2
|
carpark_agent/detect_cars.py
|
fetchai/carpark_agent
| 10
|
12784258
|
<filename>carpark_agent/detect_cars.py<gh_stars>1-10
import os
ROOT_DIR = os.path.abspath("../src/Mask_RCNN/")
MODEL_DIR = os.path.abspath("../src/coco/")
COCO_MODEL_PATH = os.path.join(MODEL_DIR, "mask_rcnn_coco.h5")
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
matplotlib.use('PS')
import matplotlib.pyplot as plt
sys.path.append(ROOT_DIR)
import mrcnn.utils
import mrcnn.model
import mrcnn.visualize
import time
"""Load the configuration file for the MS-COCO model. """
sys.path.append(os.path.join(ROOT_DIR, "samples/coco/"))
import coco
class InferenceConfig(coco.CocoConfig):
"""Overwrite the batch size.
Batch size = GPU_COUNT * IMAGES_PER_GPU
For our needs, we need batch size = 1
"""
GPU_COUNT = 1
IMAGES_PER_GPU =1
DETECTION_MIN_CONFIDENCE = 0.2
class Detect(object):
def __init__(self, name):
self.dir = name
self.config = InferenceConfig()
def filter_vehicles(self, detect_objects):
"""Skip boxes that do not indicate carsm busses or trucks.
Coco model, IDs are:
- 3 car,
- 4 motorbike
- 6 bus
- 7 train
- 8 truck
- 9 boat
"""
mask = np.array([i in (3, 8, 6) for i in detect_objects['class_ids']], dtype=bool)
vehicles = {
'rois': detect_objects['rois'][mask],
'class_ids': detect_objects['class_ids'][mask],
'scores': detect_objects['scores'][mask],
'masks': detect_objects['masks'][:,:,mask]
}
return vehicles
def main(self):
print("detecting cars...")
"""Create a model object and load the weights."""
model =mrcnn.model.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=self.config)
model.load_weights(COCO_MODEL_PATH, by_name=True)
"""Check class numbers"""
dataset = coco.CocoDataset()
dataset.load_coco(MODEL_DIR, "train")
dataset.prepare()
"""Load an image"""
IMAGE = self.dir #os.path.abspath("../../resources/images/stjohns.jpg")
image = skimage.io.imread(IMAGE)
results = model.detect([image], verbose=1)
"""Visualize results"""
r = results[0]
r = self.filter_vehicles(r)
"""Save image"""
t = int(time.time())
name = str(t) + ".png"
path = os.path.abspath("../output")
mrcnn.visualize.display_instances(
path,
name,
image,
r['rois'],
r['masks'],
r['class_ids'],
dataset.class_names,
r['scores'],
title='# os detect cars: {}'.format(len(r['class_ids'])))
return len(r['class_ids'])
| 2.265625
| 2
|
tests/test_websocket.py
|
schnitzelbub/bocadillo
| 0
|
12784259
|
from contextlib import suppress
import pytest
from bocadillo import WebSocket, API, WebSocketDisconnect
from bocadillo.constants import WEBSOCKET_CLOSE_CODES
# Basic usage
def test_websocket_route(api: API):
@api.websocket_route("/chat")
async def chat(ws: WebSocket):
async with ws:
assert await ws.receive_text() == "ping"
await ws.send_text("pong")
with api.client.websocket_connect("/chat") as client:
client.send_text("ping")
assert client.receive_text() == "pong"
def test_websocket_route_parameters(api: API):
@api.websocket_route("/chat/{room}")
async def chat_room(ws: WebSocket, room: str):
async with ws:
await ws.send(room)
with api.client.websocket_connect("/chat/foo") as client:
assert client.receive_text() == "foo"
def test_if_route_parameter_fails_validation_then_403(api: API):
@api.websocket_route("/chat/{id:d}")
async def chat_room(ws: WebSocket, id: int):
pass
with pytest.raises(WebSocketDisconnect) as ctx:
with api.client.websocket_connect("/chat/foo"):
pass
assert ctx.value.code == 403
def test_non_existing_endpoint_returns_403_as_per_the_asgi_spec(api: API):
with pytest.raises(WebSocketDisconnect) as ctx:
with api.client.websocket_connect("/foo"):
pass
assert ctx.value.code == 403
def test_reject_closes_with_403(api: API):
@api.websocket_route("/foo")
async def foo(ws: WebSocket):
await ws.reject()
with pytest.raises(WebSocketDisconnect) as ctx:
with api.client.websocket_connect("/foo"):
pass
assert ctx.value.code == 403
def test_iter_websocket(api: API):
@api.websocket_route("/chat")
async def chat(ws: WebSocket):
async with ws:
async for message in ws:
await ws.send_text(f"You said: {message}")
with api.client.websocket_connect("/chat") as ws_client:
ws_client.send_text("ping")
assert ws_client.receive_text() == "You said: ping"
ws_client.send_text("pong")
assert ws_client.receive_text() == "You said: pong"
def test_can_close_within_context(api: API):
@api.websocket_route("/test")
async def test(ws: WebSocket):
async with ws:
await ws.close(4242)
with api.client.websocket_connect("/test") as client:
message = client.receive()
assert message == {"type": "websocket.close", "code": 4242}
def test_websocket_url(api: API):
@api.websocket_route("/test")
async def test(ws: WebSocket):
async with ws:
assert ws.url == "ws://testserver/test"
assert ws.url.path == "/test"
assert ws.url.port is None
assert ws.url.scheme == "ws"
assert ws.url.hostname == "testserver"
assert ws.url.query == ""
assert ws.url.is_secure is False
with api.client.websocket_connect("/test"):
pass
# Encoding / decoding of messages
@pytest.mark.parametrize(
"receive_type, example_message, expected_type",
[
("bytes", b"Hello", bytes),
("text", "Hello", str),
("json", {"message": "Hello"}, dict),
],
)
def test_receive_type(api: API, receive_type, example_message, expected_type):
@api.websocket_route("/chat", receive_type=receive_type)
async def chat(ws: WebSocket):
async with ws:
message = await ws.receive()
assert type(message) == expected_type
with api.client.websocket_connect("/chat") as client:
getattr(client, f"send_{receive_type}")(example_message)
@pytest.mark.parametrize(
"send_type, example_message, expected_type",
[
("bytes", b"Hello", bytes),
("text", "Hello", str),
("json", {"message": "Hello"}, dict),
],
)
def test_send_type(api: API, send_type, example_message, expected_type):
@api.websocket_route("/chat", send_type=send_type)
async def chat(ws: WebSocket):
async with ws:
await ws.send(example_message)
with api.client.websocket_connect("/chat") as client:
message = getattr(client, f"receive_{send_type}")()
assert type(message) == expected_type
assert message == example_message
@pytest.mark.parametrize(
"value_type, example_message, expected_type",
[
("bytes", b"Hello", bytes),
("text", "Hello", str),
("json", {"message": "Hello"}, dict),
],
)
def test_value_type(api: API, value_type, example_message, expected_type):
@api.websocket_route("/chat", value_type=value_type)
async def chat(ws: WebSocket):
async with ws:
message = await ws.receive()
assert type(message) == expected_type
await ws.send(example_message)
with api.client.websocket_connect("/chat") as client:
getattr(client, f"send_{value_type}")(example_message)
assert type(getattr(client, f"receive_{value_type}")()) == expected_type
def test_receive_and_send_event(api: API):
@api.websocket_route("/chat", value_type="event")
async def chat(ws: WebSocket):
async with ws:
message = await ws.receive()
assert message == {"type": "websocket.receive", "text": "ping"}
await ws.send({"type": "websocket.send", "text": "pong"})
with api.client.websocket_connect("/chat") as client:
client.send_text("ping")
assert client.receive_text() == "pong"
# Disconnect errors
@pytest.mark.parametrize(
"close_codes, code, expected_caught",
[
*((None, code, True) for code in (1000, 1001)),
*(
(None, code, False)
for code in WEBSOCKET_CLOSE_CODES
if code not in (1000, 1001)
),
((1000,), 1001, False),
((1000,), 1000, True),
*(((), code, False) for code in WEBSOCKET_CLOSE_CODES),
*((all, code, True) for code in WEBSOCKET_CLOSE_CODES),
],
)
def test_catch_disconnect(api: API, close_codes, code, expected_caught):
caught = False
@api.websocket_route("/chat", caught_close_codes=close_codes)
async def chat(ws: WebSocket):
nonlocal caught
try:
async with ws:
await ws.receive() # will never receive
caught = True
except WebSocketDisconnect as exc:
# The exception should have been raised only if we told the
# WebSocket route not to catch it.
assert exc.code not in ws.caught_close_codes
with api.client.websocket_connect("/chat") as client:
# Close immediately.
client.close(code)
assert caught is expected_caught
# Server error handling
class Oops(Exception):
pass
def test_if_exception_raised_in_context_then_closed_with_1011(api: API):
@api.websocket_route("/fail")
async def fail(ws: WebSocket):
async with ws:
raise Oops
with suppress(Oops):
with api.client.websocket_connect("/fail") as client:
message = client.receive()
assert message == {"type": "websocket.close", "code": 1011}
def test_accepted_and_exception_raised_then_closed_with_1011(api: API):
@api.websocket_route("/fail")
async def fail(ws: WebSocket):
await ws.accept()
raise Oops
with suppress(Oops):
with api.client.websocket_connect("/fail") as client:
message = client.receive()
assert message == {"type": "websocket.close", "code": 1011}
def test_if_not_accepted_and_exception_raised_then_closed_with_1011(api: API):
@api.websocket_route("/fail")
async def fail(_):
raise Oops
with pytest.raises(WebSocketDisconnect) as ctx:
with api.client.websocket_connect("/fail"):
pass
assert ctx.value.code == 1011
def test_context_does_not_silence_exceptions(api: API):
cleaned_up = False
@api.websocket_route("/fail")
async def fail(ws):
nonlocal cleaned_up
async with ws:
raise Oops
cleaned_up = True
with suppress(Oops):
with api.client.websocket_connect("/fail"):
pass
assert not cleaned_up
| 2.140625
| 2
|
anemoi/__init__.py
|
davidkyle210/anemoi
| 18
|
12784260
|
from ._version import __version__
from anemoi.mast import MetMast
import anemoi
import anemoi.io.database
import anemoi.io.read_data
import anemoi.io.write_data
import anemoi.io.references
import anemoi.utils.mast_data
import anemoi.utils.gis
import anemoi.analysis.weibull
import anemoi.analysis.wind_rose
import anemoi.analysis.correlate
import anemoi.analysis.shear
import anemoi.plotting.plotting
import anemoi.plotting.correlate
import anemoi.plotting.shear
import anemoi.plotting.references
| 0.976563
| 1
|
custom_components/arpansa_uv/pyarpansa.py
|
joshuar/ha_arpansa_uv
| 0
|
12784261
|
"""ARPANSA """
from cProfile import run
from multiprocessing.connection import Client
from bs4 import BeautifulSoup
import lxml
import aiohttp
import asyncio
from .const import ARPANSA_URL
class Arpansa:
"""Arpansa class fetches the latest measurements from the ARPANSA site"""
def __init__(
self,session: aiohttp.ClientSession
) -> None:
self._session = session
self.measurements = None
async def fetchLatestMeasurements(self):
"""Retrieve the latest data from the ARPANSA site."""
try:
async with self._session.get(ARPANSA_URL) as response:
t = await response.text()
self.measurements = BeautifulSoup(t,'xml')
if response.status != 200:
raise ApiError(f"Unexpected response from ARPANSA server: {response.status}")
except Exception as err:
raise ApiError from err
def getAllLocations(self) -> list:
"""Get the names of all locations."""
rs = self.measurements.find_all("location")
allLocations = []
for l in rs:
allLocations.append(l.get("id"))
return allLocations
def getAllLatest(self) -> list:
"""Get the latest measurements and details for all locations."""
rs = self.measurements.find_all("location")
allLocations = []
for l in rs:
thisLocation = extractInfo(l)
thisLocation["friendlyname"] = l.get("id")
allLocations.append(thisLocation)
return allLocations
def getLatest(self,name) -> dict:
"""Get the latest measurements and details for a specified location."""
rs = self.measurements.find("location", {"id": name})
info = extractInfo(rs)
info["friendlyname"] = name
return info
def extractInfo(rs) -> dict:
"""Convert a BeautifulSoup ResultSet into a dictionary."""
extracted = {}
for state in rs:
if state.name is not None:
extracted[state.name] = state.text
return extracted
class ApiError(Exception):
"""Raised when there is a problem accessing the ARPANSA data."""
pass
async def main():
"""Example usage of the class"""
async with aiohttp.ClientSession() as session:
arpansa = Arpansa(session)
await arpansa.fetchLatestMeasurements()
for measurement in arpansa.getAllLatest():
print(measurement)
location = arpansa.getLatest("Brisbane")
print(location)
if __name__ == "__main__":
import time
s = time.perf_counter()
asyncio.run(main())
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
| 2.984375
| 3
|
API/src/main.py
|
DeVinci-Innovation-Center/SMART-INVENTORY-DB-API
| 0
|
12784262
|
import os
import crud, models, schemas
from database import SessionLocal
from fastapi import FastAPI, Depends, HTTPException, Request, Form
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import HTMLResponse
from sqlalchemy.orm import Session
from typing import List
app = FastAPI(root_path=os.environ['ROOT_PATH'])
origins = ['*']
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*']
)
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
# MAIN
@app.get("/")
def root():
return {"message": "Welcome to Smart Inventory"}
# USERS
@app.get("/users/", response_model=List[schemas.User])
def read_all_users(db: Session = Depends(get_db)):
return crud.get_all_users(db)
@app.get("/user/{uid}/", response_model=schemas.User)
def read_user_by_uid(uid: str, db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, uid)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@app.post("/user/", response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, user.uid)
if db_user:
raise HTTPException(status_code=400, detail="User already exists")
return crud.create_user(db=db, user=user)
@app.delete("/user/{uid}/")
def delete_user_by_uid(uid: str, db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, uid)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
db.delete(db_user)
db.commit()
return {'Deleted user with uid': uid}
# CABINETS
@app.get("/cabinets/", response_model=List[schemas.Cabinet])
def read_all_cabinets(db: Session = Depends(get_db)):
return crud.get_all_cabinets(db)
@app.get("/cabinet/{id}/", response_model=schemas.Cabinet)
def read_cabinet_by_id(id: str, db: Session = Depends(get_db)):
db_cabinet = crud.get_cabinet_by_id(db, id)
if db_cabinet is None:
raise HTTPException(status_code=404, detail="Cabinet not found")
return db_cabinet
@app.post("/cabinet/", response_model=schemas.Cabinet)
def create_cabinet(cabinet: schemas.CabinetCreate, db: Session = Depends(get_db)):
db_cabinet = crud.get_cabinet_by_id(db, cabinet.id)
if db_cabinet:
raise HTTPException(status_code=400, detail="Cabinet already exists")
return crud.create_cabinet(db, cabinet)
@app.delete("/cabinet/{id}/")
def delete_cabinet_by_id(id: str, db: Session = Depends(get_db)):
db_cabinet = crud.get_cabinet_by_id(db, id)
if db_cabinet is None:
raise HTTPException(status_code=404, detail="Cabinet not found")
db.delete(db_cabinet)
db.commit()
return {'Deleted cabinet with id': id}
# CATEGORIES
@app.get("/categories/", response_model=List[schemas.Category]) # reads all categories
def read_all_categories(db: Session = Depends(get_db)):
return crud.get_all_categories(db)
@app.get("/categories/root/", response_model=List[schemas.Category]) # reads all root categories
def read_root_categories(db: Session = Depends(get_db)):
return crud.get_root_categories(db)
@app.get("/category/{id}/", response_model=schemas.Category)
def read_category_by_id(id: str, db: Session = Depends(get_db)):
db_category = crud.get_category_by_id(db, id)
if db_category is None:
raise HTTPException(status_code=404, detail="Category not found")
return db_category
@app.get("/categories/subcategories/{parent_id}/", response_model=List[schemas.Category]) # reads all sub-categories of a category
def read_sub_categories(parent_id: int, db: Session = Depends(get_db)):
parent_category = crud.get_category_by_id(db, parent_id)
if not parent_category:
raise HTTPException(status_code=404, detail="Parent category not found")
return crud.get_sub_categories(db, parent_id)
@app.post("/category/", response_model=schemas.Category)
def create_category(category: schemas.CategoryCreate, db: Session = Depends(get_db)):
db_category = crud.get_category_by_title(db, category.title)
if db_category:
raise HTTPException(status_code=400, detail="Category already exists")
if category.parent_id is not None:
db_parent_category = crud.get_category_by_id(db, category.parent_id)
if db_parent_category is None:
raise HTTPException(status_code=404, detail="Parent category not found")
return crud.create_category(db, category)
@app.delete("/category/{id}/")
def delete_category_by_id(id: int, db: Session = Depends(get_db)):
db_category = crud.get_category_by_id(db, id)
if db_category is None:
raise HTTPException(status_code=404, detail="Category not found")
db.delete(db_category)
db.commit()
return {'Deleted category with id': id}
# ITEMS
@app.get("/items/", response_model=List[schemas.Item])
def read_all_items(db: Session = Depends(get_db)):
return crud.get_all_items(db)
@app.get("/item/{id}/", response_model=schemas.Item)
def read_item_by_id(id: int, db: Session = Depends(get_db)):
db_item = crud.get_item_by_id(db, id)
if db_item is None:
raise HTTPException(status_code=404, detail="Item not found")
return db_item
@app.get("/categories/{category_id}/items/", response_model=List[schemas.Item]) # reads all items under a category
def read_all_items(category_id: int, db: Session = Depends(get_db)):
category = crud.get_category_by_id(db, category_id)
if not category:
raise HTTPException(status_code=404, detail="Category not found")
return crud.get_items_by_category_id(db, category_id)
@app.post("/item/", response_model=schemas.Item)
def create_item(item: schemas.ItemCreate, db: Session = Depends(get_db)):
if item.category_id is not None:
db_category = crud.get_category_by_id(db, item.category_id)
if not db_category:
raise HTTPException(status_code=404, detail="Category not found")
db_item = crud.get_item_by_title(db, item.title)
if db_item:
raise HTTPException(status_code=400, detail="Item already exists")
return crud.create_item(db, item)
@app.delete("/item/{id}/")
def delete_item_by_id(id: int, db: Session = Depends(get_db)):
db_item = crud.get_item_by_id(db, id)
if db_item is None:
raise HTTPException(status_code=404, detail="Item not found")
db.delete(db_item)
db.commit()
return {'Deleted item with id': id}
# ORDER REQUESTS
@app.get("/order-requests/", response_model=List[schemas.OrderRequest])
def read_all_order_requests(db: Session = Depends(get_db)):
return crud.get_all_order_requests(db)
@app.get("/order-requests/item/{id}/", response_model=List[schemas.OrderRequest])
def read_order_requests_by_item_id(id: int, db: Session = Depends(get_db)):
db_item = crud.get_item_by_id(db, id)
if db_item is None:
raise HTTPException(status_code=404, detail="Item not found")
return crud.get_order_requests_by_item_id(db, id)
@app.get("/order-requests/user/{uid}/", response_model=List[schemas.OrderRequest])
def read_order_requests_by_user_id(uid: str, db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, uid)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return crud.get_order_requests_by_user_id(db, uid)
@app.get("/order-requests/state/{state}/", response_model=List[schemas.OrderRequest])
def read_order_requests_by_state(state: int, db: Session = Depends(get_db)):
return crud.get_order_requests_by_state(db, state)
@app.post("/order-request/", response_model=schemas.OrderRequest)
def create_order_request(order_request: schemas.OrderRequestCreate, db: Session = Depends(get_db)):
db_item = crud.get_item_by_id(db, order_request.item_id)
db_user = crud.get_user_by_uid(db, order_request.user_id)
if db_item is None or db_user is None:
raise HTTPException(status_code=404, detail="Item or user not found")
db_order_request = crud.get_order_requests_by_item_and_user_id(db, order_request.item_id, order_request.user_id)
if db_order_request:
raise HTTPException(status_code=400, detail="Order already requested by this user")
return crud.create_order_request(db, order_request)
@app.delete("/order-request/{id}/")
def delete_order_request_by_id(id: int, db: Session = Depends(get_db)):
db_order_request = crud.get_order_request_by_id(db, id)
if db_order_request is None:
raise HTTPException(status_code=404, detail="Order request not found")
db.delete(db_order_request)
db.commit()
return {'Deleted order request with id': id}
# STORAGE UNITS
@app.get("/storage-units/", response_model=List[schemas.StorageUnit])
def read_all_storage_units(db: Session = Depends(get_db)):
return crud.get_all_storage_units(db)
@app.get("/storage-unit/{id}/", response_model=schemas.StorageUnit)
def read_storage_unit_by_id(id: int, db: Session = Depends(get_db)):
db_storage_unit = crud.get_storage_unit_by_id(db, id)
if db_storage_unit is None:
raise HTTPException(status_code=404, detail="Storage unit not found")
return db_storage_unit
@app.get("/storage-units/cabinet/{cabinet_id}/", response_model=List[schemas.StorageUnit])
def read_storage_units_by_cabinet_id(cabinet_id: str, db: Session = Depends(get_db)):
db_cabinet = crud.get_cabinet_by_id(db, cabinet_id)
if db_cabinet is None:
raise HTTPException(status_code=404, detail="Cabinet not found")
return crud.get_storage_units_by_cabinet_id(db, cabinet_id)
@app.post("/storage-unit/", response_model=schemas.StorageUnit)
def create_storage_unit(storage_unit: schemas.StorageUnitCreate, db: Session = Depends(get_db)):
db_item = crud.get_item_by_id(db, storage_unit.item_id)
if db_item is None:
raise HTTPException(status_code=404, detail="Item not found")
if storage_unit.cabinet_id is not None:
db_cabinet = crud.get_cabinet_by_id(db, storage_unit.cabinet_id)
if db_cabinet is None:
raise HTTPException(status_code=404, detail="Cabinet not found")
db_storage_unit = crud.get_storage_unit_by_id(db, storage_unit.id)
if db_storage_unit:
raise HTTPException(status_code=400, detail="Storage unit ID already assigned")
return crud.create_storage_unit(db, storage_unit)
@app.delete("/storage-unit/{id}/")
def delete_storage_unit_by_id(id: int, db: Session = Depends(get_db)):
db_storage_unit = crud.get_storage_unit_by_id(db, id)
if db_storage_unit is None:
raise HTTPException(status_code=404, detail="Storage unit not found")
db.delete(db_storage_unit)
db.commit()
return {'Deleted storage unit with id': id}
# CABINETS UNLOCK ATTEMPTS
@app.get("/unlock-attempts/", response_model=List[schemas.CabinetUnlockAttempt])
def read_all_unlock_attempts(db: Session = Depends(get_db)):
return crud.get_all_unlock_attempts(db)
@app.get("/unlock-attempts/cabinet/{cabinet_id}/", response_model=List[schemas.CabinetUnlockAttempt])
def read_unlock_attempts_by_cabinet_id(cabinet_id: str, db: Session = Depends(get_db)):
db_cabinet = crud.get_cabinet_by_id(db, cabinet_id)
if db_cabinet is None:
raise HTTPException(status_code=404, detail="Cabinet not found")
return crud.get_unlock_attempts_by_cabinet_id(db, cabinet_id)
@app.get("/unlock-attempts/user/{uid}/", response_model=List[schemas.CabinetUnlockAttempt])
def read_unlock_attempts_by_user_id(uid: str, db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, uid)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return crud.get_unlock_attempts_by_user_id(db, uid)
@app.get("/unlock-attempts/cabinet/{cabinet_id}/user/{uid}/", response_model=List[schemas.CabinetUnlockAttempt])
def read_unlock_attempts_by_cabinet_and_user_id(cabinet_id, uid: str, db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, uid)
db_cabinet = crud.get_cabinet_by_id(db, cabinet_id)
if db_user is None or db_cabinet is None:
raise HTTPException(status_code=404, detail="User or cabinet not found")
return crud.get_unlock_attempts_by_cabinet_and_user_id(db, cabinet_id, uid)
@app.post("/unlock-attempt/", response_model=schemas.CabinetUnlockAttempt)
def create_unlock_attempt(unlock_attempt: schemas.CabinetUnlockAttemptCreate , db: Session = Depends(get_db)):
db_user = crud.get_user_by_uid(db, unlock_attempt.user_id)
db_cabinet = crud.get_cabinet_by_id(db, unlock_attempt.cabinet_id)
if db_user is None or db_cabinet is None:
raise HTTPException(status_code=404, detail="User or cabinet not found")
return crud.create_unlock_attempt(db, unlock_attempt)
@app.delete("/unlock-attempts/days/{n}/")
def delete_unlock_attempts_older_than(n: int, db: Session = Depends(get_db)):
db.execute(f"delete from cabinets_unlock_attempts where date < now() - interval '{n} days';")
db.commit()
return {'Deleted all cabinets unlock attempts older than number of days': n}
| 2.34375
| 2
|
tests/tests.py
|
penzance/student_locations
| 1
|
12784263
|
from unittest import TestCase
from mock import patch, ANY, DEFAULT, Mock, MagicMock
from django.test import RequestFactory
from django_auth_lti import const
from student_locations.views import index, lti_launch, main
@patch.multiple('student_locations.views', render=DEFAULT)
class TestMapView(TestCase):
longMessage = True
def setUp(self):
self.resource_link_id = '1234abcd'
self.section_id = 5678
self.sis_section_id = 8989
self.request = RequestFactory().get('/fake-path')
self.request.user = Mock(name='user_mock')
self.request.user.is_authenticated.return_value = True
self.request.session = {
'LTI_LAUNCH': {
'resource_link_id': self.resource_link_id,
'roles': [const.INSTRUCTOR],
'user_id' : 'user123'
}
}
def getpostrequest(self):
request = RequestFactory().post('/fake-path')
request.user = Mock(name='user_mock')
request.user.is_authenticated.return_value = True
request.session = {
'LTI_LAUNCH': {
'resource_link_id': self.resource_link_id,
'roles': [const.INSTRUCTOR],
'user_id' : 'user123'
}
}
return request
# def get_render_context_value(self, render_mock):
# """ Returns the value of the context dictionary key associated with the render mock object """
# context = render_mock.call_args[0][2]
# return context.get(context_key)
def test_view_index(self, render):
""" test that the index view renders the index page """
request = self.request
index(request)
render.assert_called_with(request, 'student_locations/index.html')
@patch('student_locations.views.redirect')
def test_view_lti_launch_success(self, redirect_mock, render):
""" test that the lti_launch view renders the main view on success """
request = self.getpostrequest()
lti_launch(request)
redirect_mock.assert_called_with('sl:main')
@patch('student_locations.views.validaterequiredltiparams')
def test_view_lti_launch_user_not_authenticated(self, valid_lti_params_mock, render):
""" test that the lti_launch view renders the error page if the required
LTI params are not present in the session """
request = self.getpostrequest()
valid_lti_params_mock.return_value = False
lti_launch(request)
render.assert_called_with(request, 'student_locations/error.html', ANY)
def test_view_main(self, render):
""" test that the main view renders the map_view page """
request = self.request
main(request)
render.assert_called_with(request, 'student_locations/map_view.html', ANY)
| 2.296875
| 2
|
exhaust/tests/posts/test_sitemaps.py
|
lewiscollard/exhaust
| 0
|
12784264
|
from datetime import timedelta
from xml.etree import ElementTree
from django.test import TestCase
from django.urls import reverse
from django.utils.timezone import now
from exhaust.tests.factories import CategoryFactory, PostFactory
class SitemapsTestCase(TestCase):
def test_posts_sitemap(self):
PostFactory.create_batch(5, online=True)
# Check that offline ones aren't being shown.
PostFactory.create(online=False, date=now() - timedelta(minutes=1))
# ...and ones with a future publication date.
PostFactory.create(online=True, date=now() + timedelta(days=1))
response = self.client.get(reverse('django.contrib.sitemaps.views.sitemap'))
self.assertEqual(response.status_code, 200)
tree = ElementTree.fromstring(response.content.decode('utf-8'))
self.assertEqual(len(list(tree)), 5)
def test_categories_sitemap(self):
# Ensure unused categories are not shown in the sitemap, which
# includes those that are only assigned to a post that is offline.
used_category, unused_category, unused_category_2 = CategoryFactory.create_batch(3)
post = PostFactory.create(online=True)
post.categories.set([used_category])
offline_post = PostFactory.create(online=False)
offline_post.categories.set([unused_category_2])
response = self.client.get(reverse('django.contrib.sitemaps.views.sitemap'))
self.assertEqual(response.status_code, 200)
tree = ElementTree.fromstring(response.content.decode('utf-8'))
child_items = list(tree)
self.assertEqual(len(child_items), 2)
nsinfo = {'sitemaps': 'http://www.sitemaps.org/schemas/sitemap/0.9'}
for obj in [post, used_category]:
self.assertEqual(len([
True for child in child_items
if child.find('sitemaps:loc', nsinfo).text == f'http://testserver{obj.get_absolute_url()}'
]), 1)
for obj in [unused_category, unused_category_2]:
self.assertEqual(len([
True for child in child_items
if child.find('sitemaps:loc', nsinfo).text == f'http://testserver{obj.get_absolute_url()}'
]), 0)
| 2.3125
| 2
|
exonum_client/proofs/list_proof/list_proof.py
|
aleksuss/exonum-python-client
| 5
|
12784265
|
"""Proof Verification Module for Exonum `ProofListIndex`."""
from typing import Dict, List, Tuple, Any, Callable
import itertools
from logging import getLogger
from exonum_client.crypto import Hash
from ..utils import is_field_hash, is_field_int, calculate_height
from ..hasher import Hasher
from .key import ProofListKey
from .errors import MalformedListProofError, ListProofVerificationError
# pylint: disable=C0103
logger = getLogger(__name__)
class HashedEntry:
""" Element of a proof with a key and a hash. """
def __init__(self, key: ProofListKey, entry_hash: Hash):
self.key = key
self.entry_hash = entry_hash
@classmethod
def parse(cls, data: Dict[Any, Any]) -> "HashedEntry":
""" Creates a HashedEntry object from the provided dict. """
if not isinstance(data, dict) or not is_field_hash(data, "hash"):
err = MalformedListProofError.parse_error(str(data))
logger.warning(
"Could not parse `hash` from dict, which is required for HashedEntry object creation. %s", str(err)
)
raise err
key = ProofListKey.parse(data)
return HashedEntry(key, Hash(bytes.fromhex(data["hash"])))
def __eq__(self, other: object) -> bool:
if not isinstance(other, HashedEntry):
raise TypeError("Attempt to compare HashedEntry with an object of a different type.")
return self.key == other.key and self.entry_hash == other.entry_hash
def _hash_layer(layer: List[HashedEntry], last_index: int) -> List[HashedEntry]:
""" Takes a layer as a list of hashed entries and the last index as an int and returns a new layer. """
new_len = (len(layer) + 1) // 2
new_layer: List[HashedEntry] = []
for i in range(new_len):
left_idx = 2 * i
right_idx = 2 * i + 1
# Check if there are both right and left indices in the layer:
if len(layer) > right_idx:
# Verify that entries are in the correct order:
if not layer[left_idx].key.is_left() or layer[right_idx].key.index != layer[left_idx].key.index + 1:
err = MalformedListProofError.missing_hash()
logger.warning(str(err))
raise err
left_hash = layer[left_idx].entry_hash
right_hash = layer[right_idx].entry_hash
new_entry = HashedEntry(layer[left_idx].key.parent(), Hasher.hash_node(left_hash, right_hash))
else:
# If there is an odd number of entries, the index of the last one should be equal to provided last_index:
full_layer_length = last_index + 1
if full_layer_length % 2 == 0 or layer[left_idx].key.index != last_index:
err = MalformedListProofError.missing_hash()
logger.warning(str(err))
raise err
left_hash = layer[left_idx].entry_hash
new_entry = HashedEntry(layer[left_idx].key.parent(), Hasher.hash_single_node(left_hash))
new_layer.append(new_entry)
return new_layer
class ListProof:
"""ListProof class provides an interface to parse and verify proofs for ProofListIndex retrieved
from the Exonum blockchain.
Example workflow:
>>> proof_json = {
>>> "proof": [
>>> {"index": 1, "height": 1, "hash": "eae60adeb5c681110eb5226a4ef95faa4f993c4a838d368b66f7c98501f2c8f9"}
>>> ],
>>> "entries": [[0, "6b70d869aeed2fe090e708485d9f4b4676ae6984206cf05efc136d663610e5c9"]],
>>> "length": 2,
>>> }
>>> expected_hash = "07df67b1a853551eb05470a03c9245483e5a3731b4b558e634908ff356b69857"
>>> proof = ListProof.parse(proof_json)
>>> result = proof.validate(bytes.fromhex(expected_hash))
>>> assert result == [(0, stored_val)]
"""
def __init__(
self,
proof: List[HashedEntry],
entries: List[Tuple[int, Any]],
length: int,
value_to_bytes: Callable[[Any], bytes],
):
"""
Constructor of the ListProof.
It is not intended to be used directly, use ListProof.Parse instead.
Parameters
----------
proof : List[HashedEntry]
Proof entries.
entries: List[Tuple[int, Any]]
Unhashed entries (leaves).
length: int
Length of the proof list.
value_to_bytes: Callable[[str], bytes]
A function that converts the stored value to bytes for hashing.
"""
self._proof = proof
self._entries = entries
self._length = length
self._value_to_bytes = value_to_bytes
@classmethod
def parse(cls, proof_dict: Dict[str, Any], value_to_bytes: Callable[[Any], bytes] = bytes.fromhex) -> "ListProof":
"""
Method to parse ListProof from the dict.
Expected dict format:
>>>
{
'proof': [
{'index': 1, 'height': 1, 'hash': 'eae60adeb5c681110eb5226a4ef95faa4f993c4a838d368b66f7c98501f2c8f9'}
],
'entries': [
[0, '6b70d869aeed2fe090e708485d9f4b4676ae6984206cf05efc136d663610e5c9']
],
'length': 2
}
If no errors occured during parsing, a ListProof object will be returned.
However, successfull parsing does not mean that the proof is not malformed (it only means that the provided
dict structure matches the expected one).
Actual checks for the proof contents correctness will be performed in the `validate` method.
To convert value to bytes, ListProof attemts to use bytes.fromhex by default.
If your type should be converted to bytes using Protobuf, you can generate a converter function with the use of
`build_encoder_function` from encoder.py.
Otherwise, you have to implement the converter function by yourself.
Parameters
----------
proof_dict : Dict[str, Any]
Proof as a dict.
value_to_bytes: Callable[[str], bytes]
A function that converts the stored value to bytes for hashing.
By default, `bytes.fromhex` is used.
Raises
------
MalformedListProofError
If the structure of the provided dict does not match the expected one,
an exception `MalformedListProofError` is raised.
"""
if (
not isinstance(proof_dict.get("proof"), list)
or not isinstance(proof_dict.get("entries"), list)
or not is_field_int(proof_dict, "length")
):
err = MalformedListProofError.parse_error(str(proof_dict))
logger.warning("The structure of the provided dict does not match the expected one. %s", str(err))
raise err
proof = [HashedEntry.parse(entry) for entry in proof_dict["proof"]]
entries = [cls._parse_entry(entry) for entry in proof_dict["entries"]]
length = proof_dict["length"]
logger.debug("Successfully parsed ListProof from the dict.")
return ListProof(proof, entries, length, value_to_bytes)
def validate(self, expected_hash: Hash) -> List[Tuple[int, Any]]:
"""
This method validates the provided proof against the given expected hash.
Parameters
----------
expected_hash: Hash
Expected root hash.
Returns
-------
result: List[Tuple[int, Any]]
If the hash is correct, a list of the collected values with indices is returned.
Raises
------
ListProofVerificationError
If verification fails, an exception `ListProofVerificationError` is raised.
MalformedListProofError
If the proof is malformed, an exception `MalformedListProofError` is raised.
"""
if not isinstance(expected_hash, Hash):
raise TypeError("`expected_hash` should be of type Hash.")
tree_root = self._collect()
calculated_hash = Hasher.hash_list_node(self._length, tree_root)
if calculated_hash != expected_hash:
logger.warning("Provided root hash does not match the calculated one.")
raise ListProofVerificationError(expected_hash.value, calculated_hash.value)
logger.debug("Successfully validated the provided proof against the given expected hash.")
return self._entries
@staticmethod
def _parse_entry(data: List[Any]) -> Tuple[int, Any]:
if not isinstance(data, list) or not len(data) == 2:
err = MalformedListProofError.parse_error(str(data))
logger.warning("Could not parse a list. %s", err)
raise err
return data[0], data[1]
@staticmethod
def _tree_height_by_length(length: int) -> int:
if length == 0:
return 0
return calculate_height(length)
@staticmethod
def _check_duplicates(entries: List[Any]) -> None:
for idx in range(1, len(entries)):
if entries[idx][0] == entries[idx - 1][0]:
err = MalformedListProofError.duplicate_key()
logger.warning(str(err))
raise err
def _collect(self) -> Hash:
def _hash_entry(entry: Tuple[int, Any]) -> HashedEntry:
""" Creates a hash entry from the value. """
key = ProofListKey(1, entry[0])
entry_hash = Hasher.hash_leaf(self._value_to_bytes(entry[1]))
return HashedEntry(key, entry_hash)
def _split_hashes_by_height(
hashes: List[HashedEntry], height: int
) -> Tuple[List[HashedEntry], List[HashedEntry]]:
""" Splits a list of the hashed entries into two lists by the given height. """
current = list(itertools.takewhile(lambda x: x.key.height == height, hashes))
remaining = hashes[len(current) :]
return current, remaining
tree_height = self._tree_height_by_length(self._length)
# Check an edge case when the list contains no elements:
if tree_height == 0 and (not self._proof or not self._entries):
err = MalformedListProofError.non_empty_proof()
logger.warning(str(err))
raise err
# If there are no entries, the proof should contain only a single root hash:
if not self._entries:
if len(self._proof) != 1:
if self._proof:
err = MalformedListProofError.missing_hash()
logger.warning(str(err))
raise err
err = MalformedListProofError.unexpected_branch()
logger.warning(str(err))
raise err
if self._proof[0].key == ProofListKey(tree_height, 0):
return self._proof[0].entry_hash
err = MalformedListProofError.unexpected_branch()
logger.warning(str(err))
raise err
# Sort the entries and the proof:
self._entries.sort(key=lambda el: el[0])
self._proof.sort(key=lambda el: el.key)
# Check that there are no duplicates:
self._check_duplicates(self._entries)
self._check_duplicates(self._proof)
# Check that the hashes at each height have indices in the allowed range:
for entry in self._proof:
height = entry.key.height
if height == 0:
err = MalformedListProofError.unexpected_leaf()
logger.warning(str(err))
raise err
# self._length -1 is the index of the last element at `height = 1`.
# This index is divided by 2 with each new height:
if height >= tree_height or entry.key.index > (self._length - 1) >> (height - 1):
err = MalformedListProofError.unexpected_branch()
logger.warning(str(err))
raise err
# Create the first layer:
layer = list(map(_hash_entry, self._entries))
hashes = list(self._proof)
last_index = self._length - 1
for height in range(1, tree_height):
# Filter the hashes of the current height and the rest heights (to be processed later):
hashes, remaining_hashes = _split_hashes_by_height(hashes, height)
# Merge the current layer with the hashes that belong to this layer:
layer = sorted(layer + hashes, key=lambda x: x.key)
# Calculate a new layer:
layer = _hash_layer(layer, last_index)
# Size of the next layer is two times smaller:
last_index //= 2
# Make remaining_hashes hashes to be processed:
hashes = remaining_hashes
assert len(layer) == 1, "Result layer length is not 1"
return layer[0].entry_hash
| 2.734375
| 3
|
rllab/envs/mujoco/hill/terrain.py
|
RussellM2020/maml_gps
| 1,838
|
12784266
|
from scipy.stats import multivariate_normal
from scipy.signal import convolve2d
import matplotlib
try:
matplotlib.pyplot.figure()
matplotlib.pyplot.close()
except Exception:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
# the colormap should assign light colors to low values
TERRAIN_CMAP = 'Greens'
DEFAULT_PATH = '/tmp/mujoco_terrains'
STEP = 0.1
def generate_hills(width, height, nhills):
'''
@param width float, terrain width
@param height float, terrain height
@param nhills int, #hills to gen. #hills actually generted is sqrt(nhills)^2
'''
# setup coordinate grid
xmin, xmax = -width/2.0, width/2.0
ymin, ymax = -height/2.0, height/2.0
x, y = np.mgrid[xmin:xmax:STEP, ymin:ymax:STEP]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
# generate hilltops
xm, ym = np.mgrid[xmin:xmax:width/np.sqrt(nhills), ymin:ymax:height/np.sqrt(nhills)]
mu = np.c_[xm.flat, ym.flat]
sigma = float(width*height)/(nhills*8)
for i in range(mu.shape[0]):
mu[i] = multivariate_normal.rvs(mean=mu[i], cov=sigma)
# generate hills
sigma = sigma + sigma*np.random.rand(mu.shape[0])
rvs = [ multivariate_normal(mu[i,:], cov=sigma[i]) for i in range(mu.shape[0]) ]
hfield = np.max([ rv.pdf(pos) for rv in rvs ], axis=0)
return x, y, hfield
def clear_patch(hfield, box):
''' Clears a patch shaped like box, assuming robot is placed in center of hfield
@param box: rllab.spaces.Box-like
'''
if box.flat_dim > 2:
raise ValueError("Provide 2dim box")
# clear patch
h_center = int(0.5 * hfield.shape[0])
w_center = int(0.5 * hfield.shape[1])
fromrow, torow = w_center + int(box.low[0]/STEP), w_center + int(box.high[0] / STEP)
fromcol, tocol = h_center + int(box.low[1]/STEP), h_center + int(box.high[1] / STEP)
hfield[fromrow:torow, fromcol:tocol] = 0.0
# convolve to smoothen edges somewhat, in case hills were cut off
K = np.ones((10,10)) / 100.0
s = convolve2d(hfield[fromrow-9:torow+9, fromcol-9:tocol+9], K, mode='same', boundary='symm')
hfield[fromrow-9:torow+9, fromcol-9:tocol+9] = s
return hfield
def _checkpath(path_):
if path_ is None:
path_ = DEFAULT_PATH
if not os.path.exists(path_):
os.makedirs(path_)
return path_
def save_heightfield(x, y, hfield, fname, path=None):
'''
@param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure the path + fname match the <file> attribute
of the <asset> element in the env XML where the height field is defined
'''
path = _checkpath(path)
plt.figure()
plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP) # terrain_cmap is necessary to make sure tops get light color
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.close()
def save_texture(x, y, hfield, fname, path=None):
'''
@param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure this matches the <texturedir> of the
<compiler> element in the env XML
'''
path = _checkpath(path)
plt.figure()
plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP)
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
# for some reason plt.grid does not work here, so generate gridlines manually
for i in np.arange(xmin,xmax,0.5):
plt.plot([i,i], [ymin,ymax], 'k', linewidth=0.1)
for i in np.arange(ymin,ymax,0.5):
plt.plot([xmin,xmax],[i,i], 'k', linewidth=0.1)
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.close()
| 2.59375
| 3
|
test/parOimparTestCase.py
|
uip-pc3/numero-par-impar-ArielLK
| 0
|
12784267
|
<filename>test/parOimparTestCase.py
import unittest
from app.parOimpar import verificar
class PoIC(object):
pass
class parOimparTestCase(unittest.TestCase):
# IMPAR = FALSE , PAR = TRUE
def test_impar(self):
retorno = self.PoIC = verificar(15)
self.assertEquals(retorno, False)
def test_par(self):
retorno = self.PoIC = verificar(20)
self.assertEquals(retorno, True)
if __name__ == '__main__':
unittest.main()
| 2.6875
| 3
|
gui/fonts/font14.py
|
seelpro/micropython-micro-gui
| 37
|
12784268
|
<gh_stars>10-100
# Code generated by font_to_py.py.
# Font: FreeSans.ttf Char set: !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~£¬°Ωαβγδθλμπωϕ
# Cmd: ./font_to_py.py -x -k extended FreeSans.ttf 23 font14.py
version = '0.33'
def height():
return 23
def baseline():
return 18
def max_width():
return 23
def hmap():
return True
def reverse():
return False
def monospaced():
return False
def min_ch():
return 32
def max_ch():
return 981
_font =\
b'\x0d\x00\x00\x00\x0f\xc0\x1f\xe0\x38\x70\x30\x30\x30\x30\x00\x30'\
b'\x00\x70\x00\x60\x01\xc0\x01\x80\x03\x00\x03\x00\x03\x00\x00\x00'\
b'\x00\x00\x03\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x18\x18\x18\x18'\
b'\x18\x18\x18\x18\x18\x18\x18\x18\x18\x00\x00\x18\x18\x00\x00\x00'\
b'\x00\x00\x08\x00\x00\x00\x66\x66\x66\x66\x44\x44\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00'\
b'\x00\x04\x60\x04\x60\x0c\x60\x0c\x40\x7f\xf8\x7f\xf8\x08\xc0\x18'\
b'\xc0\x18\xc0\x18\x80\xff\xf0\xff\xf0\x31\x80\x31\x80\x31\x00\x33'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x02\x00\x0f'\
b'\x80\x3f\xe0\x32\x60\x62\x30\x62\x30\x62\x00\x62\x00\x3a\x00\x3f'\
b'\x00\x0f\xe0\x02\xf0\x02\x30\x62\x30\x62\x30\x72\x70\x3f\xe0\x0f'\
b'\x80\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x14\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x02\x00\x1e\x02\x00\x3f\x04\x00\x73\x8c\x00\x61'\
b'\x88\x00\x73\x98\x00\x3f\x10\x00\x1e\x20\x00\x00\x23\xc0\x00\x47'\
b'\xe0\x00\xce\x70\x00\x8c\x30\x01\x8c\x30\x01\x0e\x70\x02\x07\xe0'\
b'\x02\x03\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0f\x00\x00\x00\x00\x00\x07\x80\x0f\xc0\x1c\xe0\x18\x60'\
b'\x18\x60\x0c\xc0\x0f\x80\x0f\x00\x1b\x98\x31\xd8\x60\xf8\x60\x70'\
b'\x60\x30\x70\xf8\x3f\xd8\x1f\x0c\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x05\x00\x00\x00\x60\x60\x60\x60\x40\x40\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x02\x04'\
b'\x0c\x08\x18\x18\x10\x30\x30\x30\x30\x30\x30\x30\x30\x18\x18\x18'\
b'\x0c\x0c\x04\x02\x08\x00\x00\x40\x20\x30\x10\x18\x18\x08\x0c\x0c'\
b'\x0c\x0c\x0c\x0c\x0c\x0c\x18\x18\x18\x30\x30\x20\x40\x09\x00\x00'\
b'\x00\x08\x00\x08\x00\x6b\x00\x3e\x00\x1c\x00\x36\x00\x24\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x03'\
b'\x00\x03\x00\x03\x00\x3f\xf0\x3f\xf0\x03\x00\x03\x00\x03\x00\x03'\
b'\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x30'\
b'\x30\x10\x10\x20\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x7c\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x30\x30\x00\x00\x00\x00\x00\x06\x00\x00\x04\x0c\x08\x08\x18'\
b'\x10\x10\x10\x30\x20\x20\x20\x60\x40\x40\xc0\x80\x00\x00\x00\x00'\
b'\x00\x0d\x00\x00\x00\x00\x00\x0f\x80\x1f\xc0\x38\xe0\x30\x60\x60'\
b'\x30\x60\x30\x60\x30\x60\x30\x60\x30\x60\x30\x60\x30\x60\x30\x30'\
b'\x60\x38\xe0\x1f\xc0\x0f\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0d\x00\x00\x00\x00\x00\x01\x00\x03\x00\x07\x00\x1f\x00\x1f'\
b'\x00\x03\x00\x03\x00\x03\x00\x03\x00\x03\x00\x03\x00\x03\x00\x03'\
b'\x00\x03\x00\x03\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0d\x00\x00\x00\x00\x00\x0f\x80\x3f\xe0\x30\x70\x60\x30\x60'\
b'\x30\x00\x30\x00\x30\x00\xe0\x01\xc0\x07\x80\x0e\x00\x18\x00\x10'\
b'\x00\x20\x00\x3f\xf0\x3f\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0d\x00\x00\x00\x00\x00\x0f\xc0\x3f\xe0\x30\x70\x60\x30\x60'\
b'\x30\x00\x70\x07\xe0\x07\xe0\x00\x70\x00\x30\x00\x30\x60\x30\x60'\
b'\x30\x30\x60\x3f\xe0\x0f\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0d\x00\x00\x00\x00\x00\x00\xc0\x00\xc0\x01\xc0\x03\xc0\x06'\
b'\xc0\x04\xc0\x0c\xc0\x18\xc0\x10\xc0\x20\xc0\x3f\xf0\x3f\xf0\x00'\
b'\xc0\x00\xc0\x00\xc0\x00\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0d\x00\x00\x00\x00\x00\x3f\xe0\x3f\xe0\x30\x00\x30\x00\x30'\
b'\x00\x37\x80\x3f\xe0\x70\xe0\x00\x70\x00\x30\x00\x30\x00\x30\x60'\
b'\x30\x70\xe0\x3f\xc0\x0f\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0d\x00\x00\x00\x00\x00\x07\x80\x1f\xe0\x38\x60\x30\x70\x20'\
b'\x00\x60\x00\x67\x80\x7f\xe0\x70\x60\x60\x30\x60\x30\x60\x30\x20'\
b'\x30\x38\x60\x1f\xe0\x0f\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0d\x00\x00\x00\x00\x00\x7f\xe0\x7f\xe0\x00\x40\x00\xc0\x00'\
b'\x80\x01\x80\x03\x00\x03\x00\x06\x00\x06\x00\x04\x00\x0c\x00\x0c'\
b'\x00\x08\x00\x18\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0d\x00\x00\x00\x00\x00\x0f\x80\x1f\xc0\x38\xe0\x30\x60\x30'\
b'\x60\x38\xe0\x1f\xc0\x1f\xc0\x38\xe0\x60\x30\x60\x30\x60\x30\x60'\
b'\x30\x30\x60\x1f\xc0\x0f\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0d\x00\x00\x00\x00\x00\x0f\x00\x3f\xc0\x30\xc0\x60\x60\x60'\
b'\x60\x60\x60\x60\x60\x30\xe0\x3f\xe0\x1f\x60\x00\x60\x00\x60\x60'\
b'\xc0\x31\xc0\x3f\x80\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x06\x00\x00\x00\x00\x00\x00\x00\x30\x30\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x30\x30\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00'\
b'\x00\x00\x30\x30\x00\x00\x00\x00\x00\x00\x00\x00\x30\x30\x10\x10'\
b'\x20\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x00\xf0\x03\xc0\x0f\x00\x78\x00\x60\x00\x3c'\
b'\x00\x0f\x00\x03\xe0\x00\x70\x00\x10\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x7f\xf0\x7f\xf0\x00\x00\x00\x00\x7f'\
b'\xf0\x7f\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x60\x00\x78\x00\x1e\x00\x07\xc0\x00\xf0\x00\x30\x00'\
b'\xf0\x07\x80\x1e\x00\x78\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x0d\x00\x00\x00\x0f\xc0\x1f\xe0\x38\x70\x30\x30\x30'\
b'\x30\x00\x30\x00\x70\x00\x60\x01\xc0\x01\x80\x03\x00\x03\x00\x03'\
b'\x00\x00\x00\x00\x00\x03\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x17\x00\x00\x00\x00\x00\x7f\x00\x03\xff\xc0\x07\x81'\
b'\xe0\x0e\x00\x70\x1c\x38\x38\x38\xfd\x98\x30\xc7\x8c\x31\x83\x0c'\
b'\x63\x83\x0c\x63\x03\x0c\x63\x02\x0c\x63\x06\x18\x63\x06\x18\x73'\
b'\x8e\x30\x31\xff\xf0\x38\xe3\xc0\x1c\x00\x00\x0f\x00\x00\x03\xff'\
b'\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x03\x80'\
b'\x03\xc0\x07\xc0\x06\xc0\x06\xe0\x0e\x60\x0c\x60\x0c\x70\x18\x30'\
b'\x18\x30\x1f\xf8\x3f\xf8\x30\x18\x30\x1c\x70\x0c\x60\x0c\x60\x0e'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x3f\xe0'\
b'\x3f\xf0\x30\x38\x30\x18\x30\x18\x30\x18\x30\x30\x3f\xe0\x3f\xf0'\
b'\x30\x18\x30\x0c\x30\x0c\x30\x0c\x30\x0c\x30\x18\x3f\xf8\x3f\xf0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x03\xf0'\
b'\x0f\xf8\x1c\x1c\x38\x0e\x30\x06\x60\x00\x60\x00\x60\x00\x60\x00'\
b'\x60\x00\x60\x00\x60\x06\x30\x06\x38\x0c\x1c\x1c\x0f\xf8\x03\xe0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x3f\xe0'\
b'\x3f\xf8\x30\x38\x30\x0c\x30\x0c\x30\x06\x30\x06\x30\x06\x30\x06'\
b'\x30\x06\x30\x06\x30\x06\x30\x0c\x30\x0c\x30\x38\x3f\xf0\x3f\xe0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x3f\xf8'\
b'\x3f\xf8\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x3f\xf8\x3f\xf8'\
b'\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x3f\xfc\x3f\xfc'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x3f\xf8'\
b'\x3f\xf8\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x3f\xf0\x3f\xf0'\
b'\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12\x00\x00\x00\x00\x03'\
b'\xf0\x00\x0f\xfc\x00\x1c\x0e\x00\x38\x07\x00\x30\x03\x00\x70\x00'\
b'\x00\x60\x00\x00\x60\x00\x00\x60\x3f\x00\x60\x3f\x00\x60\x03\x00'\
b'\x70\x03\x00\x30\x03\x00\x38\x07\x00\x1c\x1f\x00\x0f\xfb\x00\x03'\
b'\xe1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x11\x00\x00\x00\x00\x30\x06\x00\x30\x06\x00\x30\x06\x00\x30'\
b'\x06\x00\x30\x06\x00\x30\x06\x00\x30\x06\x00\x3f\xfe\x00\x3f\xfe'\
b'\x00\x30\x06\x00\x30\x06\x00\x30\x06\x00\x30\x06\x00\x30\x06\x00'\
b'\x30\x06\x00\x30\x06\x00\x30\x06\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x30\x30\x30\x30\x30'\
b'\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x0c\x00\x00\x00\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00'\
b'\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x60\xc0\x60'\
b'\xc0\x71\xc0\x3f\x80\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0f\x00\x00\x00\x30\x0e\x30\x1c\x30\x38\x30\x70\x30\xe0\x31'\
b'\xc0\x33\x80\x37\x80\x3f\x80\x3c\xc0\x38\xe0\x30\x60\x30\x30\x30'\
b'\x38\x30\x18\x30\x0c\x30\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0d\x00\x00\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30'\
b'\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30'\
b'\x00\x30\x00\x3f\xf0\x3f\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x13\x00\x00\x00\x00\x38\x01\xc0\x38\x01\xc0\x3c\x03\xc0\x3c'\
b'\x03\xc0\x36\x02\xc0\x36\x06\xc0\x36\x06\xc0\x33\x04\xc0\x33\x0c'\
b'\xc0\x33\x0c\xc0\x31\x98\xc0\x31\x98\xc0\x31\x98\xc0\x30\xf0\xc0'\
b'\x30\xf0\xc0\x30\xf0\xc0\x30\x60\xc0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x38\x06\x00'\
b'\x38\x06\x00\x3c\x06\x00\x3c\x06\x00\x36\x06\x00\x37\x06\x00\x33'\
b'\x06\x00\x31\x86\x00\x31\xc6\x00\x30\xc6\x00\x30\x66\x00\x30\x66'\
b'\x00\x30\x36\x00\x30\x3e\x00\x30\x1e\x00\x30\x0e\x00\x30\x0e\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12'\
b'\x00\x00\x00\x00\x03\xf0\x00\x0f\xfc\x00\x1c\x0e\x00\x38\x07\x00'\
b'\x30\x03\x00\x70\x03\x80\x60\x01\x80\x60\x01\x80\x60\x01\x80\x60'\
b'\x01\x80\x60\x01\x80\x70\x03\x80\x30\x03\x00\x38\x07\x00\x1c\x0e'\
b'\x00\x0f\xfc\x00\x03\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x3f\xe0\x3f\xf8\x30\x18'\
b'\x30\x0c\x30\x0c\x30\x0c\x30\x0c\x30\x1c\x3f\xf8\x3f\xf0\x30\x00'\
b'\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x12\x00\x00\x00\x00\x03\xf0\x00\x0f\xfc'\
b'\x00\x1c\x0e\x00\x38\x07\x00\x30\x03\x00\x70\x03\x80\x60\x01\x80'\
b'\x60\x01\x80\x60\x01\x80\x60\x01\x80\x60\x01\x80\x70\x03\x80\x30'\
b'\x13\x00\x38\x1f\x00\x1c\x1e\x00\x0f\xff\x00\x03\xf3\x00\x00\x01'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00'\
b'\x00\x3f\xf0\x3f\xf8\x30\x1c\x30\x0c\x30\x0c\x30\x0c\x30\x0c\x30'\
b'\x18\x3f\xf0\x3f\xf8\x30\x1c\x30\x0c\x30\x0c\x30\x0c\x30\x0c\x30'\
b'\x0c\x30\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00'\
b'\x00\x07\xe0\x1f\xf8\x18\x38\x30\x0c\x30\x0c\x30\x00\x38\x00\x1f'\
b'\x00\x0f\xf0\x00\xf8\x00\x1c\x00\x0c\x60\x0c\x60\x0c\x38\x18\x1f'\
b'\xf8\x07\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00'\
b'\x00\x7f\xf8\x7f\xf8\x03\x00\x03\x00\x03\x00\x03\x00\x03\x00\x03'\
b'\x00\x03\x00\x03\x00\x03\x00\x03\x00\x03\x00\x03\x00\x03\x00\x03'\
b'\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00'\
b'\x00\x00\x30\x06\x00\x30\x06\x00\x30\x06\x00\x30\x06\x00\x30\x06'\
b'\x00\x30\x06\x00\x30\x06\x00\x30\x06\x00\x30\x06\x00\x30\x06\x00'\
b'\x30\x06\x00\x30\x06\x00\x30\x06\x00\x38\x0e\x00\x1c\x1c\x00\x0f'\
b'\xf8\x00\x07\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x0f\x00\x00\x00\x60\x0c\x60\x0c\x60\x1c\x30\x18'\
b'\x30\x18\x38\x38\x18\x30\x18\x30\x1c\x70\x0c\x60\x0c\x60\x0e\xe0'\
b'\x06\xc0\x06\xc0\x07\xc0\x03\x80\x03\x80\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x16\x00\x00\x00\x00\xe0\x70\x18\x60\x70\x38\x60'\
b'\x78\x38\x60\x78\x30\x70\xd8\x30\x30\xd8\x30\x30\xcc\x70\x30\xcc'\
b'\x60\x39\x8c\x60\x19\x86\x60\x19\x86\xe0\x1b\x86\xc0\x1f\x06\xc0'\
b'\x0f\x03\xc0\x0f\x03\xc0\x0e\x03\x80\x0e\x03\x80\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x70'\
b'\x0c\x30\x18\x38\x38\x1c\x30\x0c\x60\x0e\xe0\x06\xc0\x03\x80\x03'\
b'\x80\x07\xc0\x06\xc0\x0e\x60\x1c\x70\x18\x30\x38\x18\x70\x1c\x60'\
b'\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x60'\
b'\x0e\x70\x0c\x38\x1c\x18\x18\x1c\x38\x0c\x30\x06\x60\x07\xe0\x03'\
b'\xc0\x03\xc0\x01\x80\x01\x80\x01\x80\x01\x80\x01\x80\x01\x80\x01'\
b'\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x7f'\
b'\xf8\x7f\xf8\x00\x38\x00\x30\x00\x70\x00\xe0\x01\xc0\x01\x80\x03'\
b'\x00\x07\x00\x0e\x00\x1c\x00\x18\x00\x38\x00\x70\x00\x7f\xf8\x7f'\
b'\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x78\x78'\
b'\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60'\
b'\x60\x60\x78\x78\x06\x00\x00\x80\xc0\x40\x40\x60\x20\x20\x20\x30'\
b'\x10\x10\x10\x18\x08\x08\x0c\x04\x00\x00\x00\x00\x00\x06\x00\x00'\
b'\x78\x78\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x18\x18\x18\x18\x78\x78\x0b\x00\x00\x00\x00\x00\x0c\x00\x0e\x00'\
b'\x0a\x00\x1b\x00\x13\x00\x31\x00\x31\x80\x20\x80\x60\xc0\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\xff\xf8\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x06\x00\x00\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x80\x3f'\
b'\xc0\x60\xe0\x00\x60\x00\x60\x0f\xe0\x3e\x60\x60\x60\x60\x60\x61'\
b'\xe0\x3f\x70\x1e\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0d'\
b'\x00\x00\x00\x60\x00\x60\x00\x60\x00\x60\x00\x60\x00\x67\x80\x7f'\
b'\xe0\x78\xe0\x70\x70\x60\x30\x60\x30\x60\x30\x60\x30\x70\x70\x78'\
b'\xe0\x6f\xc0\x67\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x80\x1f'\
b'\xc0\x30\xe0\x70\x60\x60\x00\x60\x00\x60\x00\x60\x00\x70\x60\x30'\
b'\xe0\x3f\xc0\x0f\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0d'\
b'\x00\x00\x00\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30\x0f\x30\x1f'\
b'\xf0\x38\xf0\x70\x70\x60\x30\x60\x30\x60\x30\x60\x30\x70\x70\x38'\
b'\xf0\x1f\xb0\x0f\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x80\x1f'\
b'\xc0\x38\x60\x70\x30\x60\x30\x7f\xf0\x7f\xf0\x60\x00\x60\x30\x30'\
b'\x60\x1f\xe0\x0f\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06'\
b'\x00\x00\x18\x38\x30\x30\x30\x78\x78\x30\x30\x30\x30\x30\x30\x30'\
b'\x30\x30\x30\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x0f\x60\x3f\xe0\x30\xe0\x70\xe0\x60\x60'\
b'\x60\x60\x60\x60\x60\x60\x70\xe0\x30\xe0\x3f\x60\x0e\x60\x00\x60'\
b'\x00\xe0\x60\xc0\x3f\xc0\x1f\x00\x0c\x00\x00\x00\x60\x00\x60\x00'\
b'\x60\x00\x60\x00\x60\x00\x67\x80\x6f\xe0\x78\xe0\x70\x60\x60\x60'\
b'\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x30\x30\x00\x00\x00'\
b'\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x06\x00\x00\x30\x30\x00\x00\x00\x30\x30\x30\x30\x30\x30\x30'\
b'\x30\x30\x30\x30\x30\x30\x30\x30\xf0\xe0\x0c\x00\x00\x00\x60\x00'\
b'\x60\x00\x60\x00\x60\x00\x60\x00\x60\xc0\x61\x80\x63\x00\x66\x00'\
b'\x6e\x00\x7e\x00\x77\x00\x63\x00\x61\x80\x61\xc0\x60\xc0\x60\x60'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x60\x60\x60'\
b'\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x00\x00'\
b'\x00\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x67\x1f\x00\x6f\xbf\x80\x71\xe3\x80'\
b'\x60\xc1\x80\x60\xc1\x80\x60\xc1\x80\x60\xc1\x80\x60\xc1\x80\x60'\
b'\xc1\x80\x60\xc1\x80\x60\xc1\x80\x60\xc1\x80\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x67\x80\x6f\xe0\x78\xe0\x70\x60'\
b'\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x80\x1f\xc0\x38\xe0\x70\x70'\
b'\x60\x30\x60\x30\x60\x30\x60\x30\x70\x70\x38\xe0\x1f\xc0\x0f\x80'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x67\x80\x6f\xc0\x78\xe0\x70\x70'\
b'\x60\x30\x60\x30\x60\x30\x60\x30\x70\x70\x78\xe0\x7f\xc0\x67\x80'\
b'\x60\x00\x60\x00\x60\x00\x60\x00\x00\x00\x0d\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x30\x1f\xf0\x38\xf0\x70\x70'\
b'\x60\x30\x60\x30\x60\x30\x60\x30\x70\x70\x38\xf0\x3f\xf0\x0f\x30'\
b'\x00\x30\x00\x30\x00\x30\x00\x30\x00\x00\x08\x00\x00\x00\x00\x00'\
b'\x00\x00\x66\x6e\x78\x60\x60\x60\x60\x60\x60\x60\x60\x60\x00\x00'\
b'\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x1f\x80\x3f\xe0\x70\x60\x60\x00\x70\x00\x3f\x00\x0f\xc0\x00'\
b'\xe0\x60\x60\x70\xe0\x3f\xc0\x1f\x80\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x06\x00\x00\x00\x00\x00\x30\x30\x78\x78\x30\x30\x30'\
b'\x30\x30\x30\x30\x30\x38\x38\x00\x00\x00\x00\x00\x0c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x60\x60\x60\x60\x60\x60'\
b'\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\xe0\x71\xe0\x7f\x60'\
b'\x1e\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x60\x60\xe0\x60\xc0'\
b'\x70\xc0\x31\x80\x31\x80\x19\x80\x1b\x00\x1b\x00\x0f\x00\x0e\x00'\
b'\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xc1\xc3\x00\x61\xc3\x00\x63\xc3\x00\x63\xc6\x00\x33\x66\x00\x33'\
b'\x66\x00\x32\x66\x00\x36\x6c\x00\x1e\x3c\x00\x1e\x3c\x00\x1c\x38'\
b'\x00\x0c\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x60\xc0\x71\xc0\x31\x80\x1b\x00\x0e\x00\x0e\x00\x0e\x00\x1b'\
b'\x00\x1b\x00\x31\x80\x60\xc0\x60\xc0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\xc0\xe0\x60\xc0\x60\xc0\x61\x80\x31\x80\x31\x80\x33\x00\x1b'\
b'\x00\x1b\x00\x1e\x00\x0e\x00\x0e\x00\x0c\x00\x0c\x00\x18\x00\x78'\
b'\x00\x70\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x7f\xc0\x7f\xc0\x01\xc0\x01\x80\x03\x00\x06\x00\x0c\x00\x18'\
b'\x00\x30\x00\x60\x00\x7f\xc0\x7f\xc0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x08\x00\x00\x0c\x1c\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x70\x70\x18\x18\x18\x18\x18\x18\x18\x18\x1c\x0c\x06\x00\x00\x30'\
b'\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30'\
b'\x30\x30\x30\x30\x30\x08\x00\x00\x30\x38\x18\x18\x18\x18\x18\x18'\
b'\x18\x18\x0e\x0e\x18\x18\x18\x18\x18\x18\x18\x18\x38\x30\x0c\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x30\x00'\
b'\x7c\x00\x4e\x40\x07\xc0\x01\x80\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0d\x00'\
b'\x00\x00\x0f\xc0\x1f\xe0\x38\x60\x60\x30\x60\x30\x60\x00\x70\x00'\
b'\x38\x00\xff\x00\xff\x00\x1c\x00\x0c\x00\x0c\x00\x18\x00\x2f\x10'\
b'\x7f\xf0\x61\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0d\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x3f\xf8\x3f\xf8\x00\x18\x00\x18\x00\x18\x00\x18\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00'\
b'\x00\x00\x00\x00\x07\x00\x0f\x80\x18\xc0\x10\x40\x18\xc0\x0f\x80'\
b'\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00'\
b'\x00\x00\x00\x03\xe0\x00\x0f\xf8\x00\x1c\x1c\x00\x38\x0e\x00\x30'\
b'\x06\x00\x60\x03\x00\x60\x03\x00\x60\x03\x00\x60\x03\x00\x60\x03'\
b'\x00\x60\x03\x00\x30\x06\x00\x30\x0e\x00\x18\x0c\x00\x0c\x38\x00'\
b'\x7e\x3f\x00\x7e\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x0f\x60\x3f\xe0\x30\xe0\x60\xe0\x60\x60\x60\x60\x60'\
b'\x60\x60\x60\x60\xe0\x30\xe0\x3f\xf0\x0f\x70\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x1f\x80\x3f\xc0\x70\xe0\x60'\
b'\x60\x60\x60\x60\xc0\x63\x80\x63\xc0\x60\x60\x60\x30\x60\x30\x60'\
b'\x30\x60\x30\x60\x30\x70\x60\x7f\xe0\x6f\x80\x60\x00\x60\x00\x60'\
b'\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\xe0\x60\xe0\x60\x70\xc0\x30\xc0\x30\xc0\x39\x80\x19'\
b'\x80\x1b\x80\x0f\x00\x0f\x00\x06\x00\x06\x00\x06\x00\x06\x00\x06'\
b'\x00\x06\x00\x06\x00\x0d\x00\x00\x00\x3f\xe0\x3f\xe0\x18\x00\x0e'\
b'\x00\x03\x00\x0f\x80\x3f\xc0\x38\xe0\x70\x70\x60\x30\x60\x30\x60'\
b'\x30\x60\x30\x70\x70\x30\xe0\x3f\xc0\x0f\x80\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x0f\x00\x1f\x80\x30\xc0\x30'\
b'\xc0\x60\x60\x60\x60\x60\x60\x7f\xe0\x7f\xe0\x60\x60\x60\x60\x60'\
b'\x60\x60\x60\x30\xc0\x30\xc0\x1f\x80\x0f\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x70\x00\x78\x00\x0c\x00\x0c'\
b'\x00\x06\x00\x06\x00\x0e\x00\x0f\x00\x1b\x00\x19\x00\x19\x80\x31'\
b'\x80\x30\xc0\x30\xc0\x60\xc0\x60\x70\xe0\x70\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60\x60'\
b'\x60\x60\x60\x60\x60\x70\xe0\x7f\xe0\x6f\x60\x60\x00\x60\x00\x60'\
b'\x00\x60\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x7f\xf8\x7f\xf8\x30\x30\x30\x30\x30\x30\x30\x30\x30'\
b'\x30\x30\x30\x30\x30\x30\x30\x30\x3c\x30\x1c\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x0c\x00\x30\x0e\x00\x30'\
b'\x06\x00\x61\x87\x00\x61\x83\x00\x61\x83\x00\x61\x83\x00\x61\x83'\
b'\x00\x61\x83\x00\x33\xc6\x00\x3f\x7e\x00\x1e\x38\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00'\
b'\x00\x00\x01\x80\x01\x80\x01\x80\x01\x80\x0f\xf0\x1f\xf8\x39\x9c'\
b'\x71\x8e\x61\x86\x61\x86\x61\x86\x61\x86\x71\x8e\x39\x9c\x1f\xf8'\
b'\x0f\xf0\x01\x80\x01\x80\x01\x80\x01\x80\x00\x00'
_sparse =\
b'\x20\x00\x30\x00\x21\x00\x49\x00\x22\x00\x62\x00\x23\x00\x7b\x00'\
b'\x24\x00\xab\x00\x25\x00\xdb\x00\x26\x00\x22\x01\x27\x00\x52\x01'\
b'\x28\x00\x6b\x01\x29\x00\x84\x01\x2a\x00\x9d\x01\x2b\x00\xcd\x01'\
b'\x2c\x00\xfd\x01\x2d\x00\x16\x02\x2e\x00\x2f\x02\x2f\x00\x48\x02'\
b'\x30\x00\x61\x02\x31\x00\x91\x02\x32\x00\xc1\x02\x33\x00\xf1\x02'\
b'\x34\x00\x21\x03\x35\x00\x51\x03\x36\x00\x81\x03\x37\x00\xb1\x03'\
b'\x38\x00\xe1\x03\x39\x00\x11\x04\x3a\x00\x41\x04\x3b\x00\x5a\x04'\
b'\x3c\x00\x73\x04\x3d\x00\xa3\x04\x3e\x00\xd3\x04\x3f\x00\x03\x05'\
b'\x40\x00\x33\x05\x41\x00\x7a\x05\x42\x00\xaa\x05\x43\x00\xda\x05'\
b'\x44\x00\x0a\x06\x45\x00\x3a\x06\x46\x00\x6a\x06\x47\x00\x9a\x06'\
b'\x48\x00\xe1\x06\x49\x00\x28\x07\x4a\x00\x41\x07\x4b\x00\x71\x07'\
b'\x4c\x00\xa1\x07\x4d\x00\xd1\x07\x4e\x00\x18\x08\x4f\x00\x5f\x08'\
b'\x50\x00\xa6\x08\x51\x00\xd6\x08\x52\x00\x1d\x09\x53\x00\x4d\x09'\
b'\x54\x00\x7d\x09\x55\x00\xad\x09\x56\x00\xf4\x09\x57\x00\x24\x0a'\
b'\x58\x00\x6b\x0a\x59\x00\x9b\x0a\x5a\x00\xcb\x0a\x5b\x00\xfb\x0a'\
b'\x5c\x00\x14\x0b\x5d\x00\x2d\x0b\x5e\x00\x46\x0b\x5f\x00\x76\x0b'\
b'\x60\x00\xa6\x0b\x61\x00\xbf\x0b\x62\x00\xef\x0b\x63\x00\x1f\x0c'\
b'\x64\x00\x4f\x0c\x65\x00\x7f\x0c\x66\x00\xaf\x0c\x67\x00\xc8\x0c'\
b'\x68\x00\xf8\x0c\x69\x00\x28\x0d\x6a\x00\x41\x0d\x6b\x00\x5a\x0d'\
b'\x6c\x00\x8a\x0d\x6d\x00\xa3\x0d\x6e\x00\xea\x0d\x6f\x00\x1a\x0e'\
b'\x70\x00\x4a\x0e\x71\x00\x7a\x0e\x72\x00\xaa\x0e\x73\x00\xc3\x0e'\
b'\x74\x00\xf3\x0e\x75\x00\x0c\x0f\x76\x00\x3c\x0f\x77\x00\x6c\x0f'\
b'\x78\x00\xb3\x0f\x79\x00\xe3\x0f\x7a\x00\x13\x10\x7b\x00\x43\x10'\
b'\x7c\x00\x5c\x10\x7d\x00\x75\x10\x7e\x00\x8e\x10\xa3\x00\xbe\x10'\
b'\xac\x00\xee\x10\xb0\x00\x1e\x11\xa9\x03\x4e\x11\xb1\x03\x95\x11'\
b'\xb2\x03\xc5\x11\xb3\x03\xf5\x11\xb4\x03\x25\x12\xb8\x03\x55\x12'\
b'\xbb\x03\x85\x12\xbc\x03\xb5\x12\xc0\x03\xe5\x12\xc9\x03\x15\x13'\
b'\xd5\x03\x5c\x13'
_mvfont = memoryview(_font)
_mvsp = memoryview(_sparse)
ifb = lambda l : l[0] | (l[1] << 8)
def bs(lst, val):
while True:
m = (len(lst) & ~ 7) >> 1
v = ifb(lst[m:])
if v == val:
return ifb(lst[m + 2:])
if not m:
return 0
lst = lst[m:] if v < val else lst[:m]
def get_ch(ch):
doff = bs(_mvsp, ord(ch))
width = ifb(_mvfont[doff : ])
next_offs = doff + 2 + ((width - 1)//8 + 1) * 23
return _mvfont[doff + 2:next_offs], 23, width
| 1.960938
| 2
|
src/c3nav/mapdata/migrations/0016_auto_20161208_2023.py
|
bate/c3nav
| 1
|
12784269
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-08 20:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0015_auto_20161208_2020'),
]
operations = [
migrations.RenameField(
model_name='lineobstacle',
old_name='altitude',
new_name='width',
),
]
| 1.476563
| 1
|
output/process.py
|
googlx/lasertagger
| 1
|
12784270
|
<filename>output/process.py
import os
import pandas as pd
from tqdm import tqdm
names = ['text', 'decoded', 'reference']
df = pd.read_csv('pred.tsv', sep='\t', header=None, names=names)
df['text'] = df['text'].apply(lambda x: ' '.join(list(x)))
df['reference'] = df['reference'].apply(lambda x: ' '.join(list(x)))
char_dict = {}
with open('vocab.txt', 'r') as f:
i = 1
for line in f.readlines():
char = line.strip()
char_dict[char] = str(i)
i += 1
for name in names:
df[name] = df[name].apply(lambda x: ' '.join([char_dict[char] if char in char_dict.keys() else '0' for char in x.split(' ')]))
if not os.path.exists(name):
os.mkdir(name)
idx = 0
for data in tqdm(df[name]):
with open(os.path.join(name, '{0:04d}.txt'.format(idx)), 'w') as f:
f.write(data)
idx += 1
| 2.53125
| 3
|
lisa/training.py
|
mjirik/lisa
| 22
|
12784271
|
<reponame>mjirik/lisa
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © %YEAR% <>
#
# Distributed under terms of the %LICENSE% license.
"""
Training module. Default setup makes nothing. Use --all to make all
"""
from loguru import logger
# logger = logging.getLogger()
import argparse
import organ_localizator
import organ_model
def main():
# logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
logger.addHandler(ch)
# create file handler which logs even debug messages
# fh = logging.FileHandler('log.txt')
# fh.setLevel(logging.DEBUG)
# formatter = logging.Formatter(
# '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# fh.setFormatter(formatter)
# logger.addHandler(fh)
# logger.debug('start')
# input parser
parser = argparse.ArgumentParser(
description=__doc__
)
parser.add_argument(
'-i', '--inputfile',
default=None,
# required=True,
help='input file'
)
parser.add_argument(
'-d', '--debug', action='store_true',
help='Debug mode')
parser.add_argument(
'-lm', '--liver-model', action='store_true',
help='Train liver model')
parser.add_argument(
'-ll', '--liver-localizator', action='store_true',
help='Train liver localizator')
parser.add_argument(
'--all', action='store_true',
help='Train all')
args = parser.parse_args()
if args.debug:
ch.setLevel(logging.DEBUG)
if args.liver_localizator or args.all:
organ_localizator.train_liver_localizator_from_sliver_data()
if args.liver_model or args.all:
organ_model.train_liver_model_from_sliver_data()
if __name__ == "__main__":
main()
| 2.03125
| 2
|
6/solution.py
|
thesketh/advent-of-code-2021
| 0
|
12784272
|
<filename>6/solution.py
"""
Solution to the sixth challenge, calculating the proliferation of
lanternfish.
"""
from collections import Counter
from os import PathLike
from pathlib import Path
from typing import Sequence, Iterable
ROOT = Path(__file__).absolute().parent
LanternFish = int
"""A lanternfish, represented by the number of days until spawn."""
def parse_input(path: PathLike) -> Sequence[LanternFish]:
"""Read in a sequence of lanternfish from a file."""
with open(path, "r", encoding="utf-8") as file:
return list(map(int, next(file).rstrip().split(",")))
def count_lanternfish_after(lanternfish: Iterable[LanternFish], n_days: int) -> int:
"""Count the number of lanternfish after a given number of days."""
fish_counter = Counter(lanternfish)
for _ in range(n_days):
n_spawned = fish_counter[0]
for days_til_spawn in range(1, 9):
fish_counter[days_til_spawn - 1] = fish_counter[days_til_spawn]
fish_counter[6] += n_spawned
fish_counter[8] = n_spawned
return sum(fish_counter.values())
def main():
"""Perform the lanternfish population analysis."""
all_lanternfish = parse_input(ROOT.joinpath("data", "input_1.txt"))
n_lanternfish = count_lanternfish_after(all_lanternfish, n_days=80)
print(f"{n_lanternfish} fish after 80 days.")
n_lanternfish = count_lanternfish_after(all_lanternfish, n_days=256)
print(f"{n_lanternfish} fish after 256 days.")
if __name__ == "__main__":
main()
| 3.921875
| 4
|
FaceTime/frida/replay.py
|
googleprojectzero/Street-Party
| 226
|
12784273
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
import frida
import sys
import os
vid_index=0
aud_index = 0
def on_message(message, data):
global vid_index
global aud_index
print(message)
session = frida.attach("avconferenced")
code = open('replay.js', 'r').read()
script = session.create_script(code);
script.on("message", on_message)
script.load()
print("Press Ctrl-C to quit")
sys.stdin.read()
| 1.828125
| 2
|
esimulation/eobjects/test/test_electricity_provider.py
|
brunoknittel/python-epower-simulation
| 1
|
12784274
|
# Copyright 2020 <NAME> <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ---- END OF LICENSE TEXT ----
import unittest
from datetime import datetime, timezone, timedelta
from ..electricity_provider import ElectricityProvider
class ElectricityProviderTest(unittest.TestCase):
def setUp(self):
config = {
"price_per_kWh_sold": 0.147,
"price_per_kWh_bought": 0.10
}
p = ElectricityProvider()
p.load(config)
self._p = p
self._now = datetime.now(timezone.utc)
self._dt = timedelta(1)
def test_not_loaded(self):
"""
Ensures that an error is raised when trying to use an
electricity provider that hasn't been configured.
This doesn't need the setUp method
"""
p = ElectricityProvider()
when = datetime.now(timezone.utc)
dt = timedelta(1)
with self.assertRaises(RuntimeError):
p.consume_optional(when, dt, 100)
with self.assertRaises(RuntimeError):
p.consume_required(when, dt)
with self.assertRaises(RuntimeError):
p.produce_always(when, dt)
with self.assertRaises(RuntimeError):
p.produce_on_demand(when, dt, 100)
def test_produce_always(self):
self.assertEquals(0, self._p.produce_always(self._now, self._dt))
def test_produce_on_demand(self):
res = self._p.produce_on_demand(self._now, self._dt, 100.0)
self.assertEquals(res[0], 100.0)
self.assertEquals(res[1], 100.0 * 0.147)
def test_consume_required(self):
self.assertEquals(0, self._p.consume_required(self._now, self._dt))
def test_consume_optional(self):
res = self._p.consume_optional(self._now, self._dt, 100.0)
self.assertEquals(res[0], 100.0)
self.assertEquals(res[1], 100.0 * 0.10)
| 2.171875
| 2
|
tutorials/W2D3_DecisionMaking/solutions/W2D3_Tutorial1_Solution_be044152.py
|
liuxiaomiao123/NeuroMathAcademy
| 2
|
12784275
|
sigma = 3.95
num_repeats = 500 # number of simulations to run for each error rate
alpha_list = np.power(10,list(range(-5,0)) + np.linspace(-0.9,-0.1,9).tolist()) # list of error rates
threshold_list = list(map(threshold_from_errorrate, alpha_list))
def simulate_accuracy_vs_speed(sigma, threshold_list, num_sample=100):
"""
Calculate the average decision accuracy vs. average decision speed by running repeated SPRT simulations
with thresholding stopping rule for each threshold
Args:
sigma (float): standard deviation for observation model
threshold_list (list-like object): a list of evidence thresholds to run over
num_sample (int): number of simulations to run per stopping time
Returns:
accuracy_list: a list of average accuracies corresponding to input `stop_time_list`
decision_speed_list: a list of average decision speeds
"""
decision_speed_list = [] # container for average decision speed for each alpha
accuracy_list = [] # container for decision accuracy for each alpha
for threshold in threshold_list:
decision_time_list = [] # container for decision time for each simulation
decision_list = [] # container for decision for every simulation
for i in range(num_repeats):
# run simulation and get decision of current simulation
_, decision, data = simulate_SPRT_threshold(sigma, threshold)
decision_time = len(data)
decision_list.append(decision)
decision_time_list.append(decision_time)
decision_speed = np.mean(1.0 / np.asarray(decision_time_list))
decision_accuracy = sum(decision_list) / len(decision_list)
decision_speed_list.append(decision_speed)
accuracy_list.append(decision_accuracy)
return accuracy_list, decision_speed_list
accuracy_list, decision_speed_list = simulate_accuracy_vs_speed(sigma, threshold_list, num_sample=num_sample)
with plt.xkcd():
simulate_and_plot_speed_vs_accuracy(sigma, threshold_list, num_sample)
| 3.203125
| 3
|
tests/test_module_path_finder.py
|
swinkels/skempy
| 0
|
12784276
|
<reponame>swinkels/skempy
import unittest
from skempy.module_path_finder import ModulePathFinder
from utils import get_abs_path
class TestModulePathFinder(unittest.TestCase):
def test_python_file_in_non_package_directory(self):
test_file_path = get_abs_path("source_code.py", __file__)
module_path = ModulePathFinder().find_path(test_file_path)
self.assertEqual("source_code", module_path)
def test_python_file_in_package_directory(self):
test_file_path = get_abs_path("package/source_code.py", __file__)
module_path = ModulePathFinder().find_path(test_file_path)
self.assertEqual("package.source_code", module_path)
def test_python_file_in_package_directory_tree(self):
test_file_path = get_abs_path("package/sub_package/source_code.py",
__file__)
module_path = ModulePathFinder().find_path(test_file_path)
self.assertEqual("package.sub_package.source_code", module_path)
| 2.65625
| 3
|
bin/apache-hive-3.1.2-bin/lib/py/thrift/__init__.py
|
ptrick/hdfs-hive-sql-playground
| 0
|
12784277
|
version https://git-lfs.github.com/spec/v1
oid sha256:4483dc4e2110743c7c39245cfd5626a1e46f9b4007e129bbf2b7c923d52056ea
size 817
| 0.769531
| 1
|
databricksapi/SQLEndpoints.py
|
lotnikov/databricks_api
| 0
|
12784278
|
from . import Databricks
class SQLEndpoints(Databricks.Databricks):
def __init__(self, url, token=None):
super().__init__(token)
self._url = url
self._api_type = 'sql'
def createEndpoint(self, name, cluster_size, min_num_clusters=1, max_num_clusters=1, auto_stop_mins=10, spot_instance_policy=None,
enable_photon=True, enable_serverless_compute=False, tags=None):
endpoint = 'endpoints'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
"name": name,
"cluster_size": cluster_size,
"min_num_clusters": min_num_clusters,
"max_num_clusters": max_num_clusters,
"spot_instance_policy":spot_instance_policy,
"enable_photon": enable_photon,
"enable_serverless_compute": enable_serverless_compute,
"tags": tags
}
return self._post(url, payload)
def listEndpoints(self):
endpoint = 'endpoints'
url = self._set_url(self._url, self._api_type, endpoint)
return self._get(url)
def deleteEndpoint(self, endpoint_id):
endpoint = 'endpoints/'+endpoint_id
url = self._set_url(self._url, self._api_type, endpoint)
return self._delete(url, payload)
def getEndpoint(self, endpoint_id):
endpoint = 'endpoints/'+endpoint_id
url = self._set_url(self._url, self._api_type, endpoint)
return self._get(url)
def updateEndpoint(self, endpoint_id, name=None, cluster_size=None, min_num_clusters=None, max_num_clusters=None, auto_stop_mins=None, tags=None,
spot_instance_policy=None, enable_photon=None, enable_serverless_compute=None):
endpoint = 'endpoints/'+endpoint_id+'/edit'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {}
if endpoint_id != None : payload["id"] = endpoint_id
if name != None : payload["name"] = name
if cluster_size != None : payload["cluster_size"] = cluster_size
if min_num_clusters != None : payload["min_num_clusters"] = min_num_clusters
if max_num_clusters != None : payload["max_num_clusters"] = max_num_clusters
if auto_stop_mins != None : payload["auto_stop_mins"] = auto_stop_mins
if tags != None : payload["tags"] = tags
if spot_instance_policy != None : payload["spot_instance_policy"] = spot_instance_policy
if enable_photon != None : payload["enable_photon"] = enable_photon
if enable_serverless_compute != None : payload["enable_serverless_compute"] = enable_serverless_compute
return self._post(url, payload)
def startEndpoint(self, endpoint_id):
endpoint = 'endpoints'+endpoint_id+'/start'
url = self._set_url(self._url, self._api_type, endpoint)
return self._post(url)
def stopEndpoint(self, endpoint_id):
endpoint = 'endpoints'+endpoint_id+'/stop'
url = self._set_url(self._url, self._api_type, endpoint)
return self._post(url)
def listGlobalEndpoints(self):
endpoint = 'config/endpoints'
url = self._set_url(self._url, self._api_type, endpoint)
return self._get(url)
def updateGlobalEndpoints(self, security_policy, data_access_config, instance_profile_arn):
endpoint = 'config/endpoints'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
"security_policy": security_policy,
"data_access_config": data_access_config,
"instance_profile_arn": instance_profile_arn
}
return self._put(url, payload)
| 2.28125
| 2
|
src/node_read_item_forecast.py
|
clarify/data-science-tutorials-orchest
| 0
|
12784279
|
from datetime import datetime, timedelta
from pyclarify import APIClient
import orchest
import pandas as pd
import numpy as np
from merlion.utils import TimeSeries
from merlion.models.forecast.prophet import Prophet, ProphetConfig
from merlion.transform.base import Identity
def pipeline_data(times, values, new_id,new_name, original_id, original_name):
labels = {"source":["Orchest pipelines"], "original_id":[original_id]}
var_name = "clfy_"+new_id
data = {
"name" : new_name,
"labels" : labels,
"times" : times,
"series" : values,
"kargs" : {"sourceType" : "prediction",
"data-source": ["Orchest"],
"description" : f"Forecast for {original_name}"
}
}
return {var_name : data }
def generate_future_timestamps(n_future, timestamps, start):
deltas = [x-timestamps[0] for x in timestamps]
avg_delta=np.mean(deltas)
future = [(i+1)*avg_delta+start for i in range(n_future)]
return future
client = APIClient("./clarify-credentials.json")
inputs = orchest.get_inputs()
invars = [x for x in inputs.keys() if x.startswith("read_config_forecast")]
print(invars)
output_dict={}
for name in invars:
item_id = inputs[name]['item_id']
days = inputs[name]['lag_days']
test_lag = inputs[name]['time_split']
future = inputs[name]['future']
data_params = {
"items": {
"include": True,
"filter": {
"id": {
"$in": [
item_id
]
}
}
},
"data": {
"include": True,
"notBefore": (datetime.now() - timedelta(days=days)).astimezone().isoformat()
}
}
response = client.select_items(data_params)
signal_name = list(response.result.items.values())[0].name
print(f"Name {signal_name} and id {item_id}")
times = response.result.data.times
series = response.result.data.series
df = pd.DataFrame(series)
df.index = [time.replace(tzinfo=None) for time in times]
if len(times) > 0:
tzinfo = times[0].tzinfo
test_data = TimeSeries.from_pd(df[-test_lag:])
train_data = TimeSeries.from_pd(df[0:-test_lag])
config = ProphetConfig(max_forecast_steps=test_lag, add_seasonality="auto", transform=Identity())
model = Prophet(config)
model.train(train_data=train_data)
test_times = test_data.time_stamps
if future > 0:
test_times=test_times+generate_future_timestamps(future, test_data.time_stamps, start=test_data.time_stamps[-1])
test_pred, test_err = model.forecast(time_stamps=test_times)
col = test_pred.names[0]
col_err = test_err.names[0]
forecast_name=col+"_pred"
forecast_name_upper=col+"_upper"
forecast_name_lower=col+"_lower"
forecast_values = test_pred.univariates[col].values
forecast_upper_values= [x+y for x,y in zip(test_pred.univariates[col].values, test_err.univariates[col_err].values)]
forecast_lower_values= [x-y for x,y in zip(test_pred.univariates[col].values, test_err.univariates[col_err].values)]
output_dict.update(pipeline_data(test_pred.time_stamps,forecast_values, forecast_name, f"Forecast {signal_name}", col, signal_name ))
output_dict.update(pipeline_data(test_err.time_stamps,forecast_upper_values, forecast_name_upper, f"Forecast {signal_name} upper bound", col, signal_name ))
output_dict.update(pipeline_data(test_err.time_stamps,forecast_lower_values, forecast_name_lower, f"Forecast {signal_name} lower bound", col, signal_name ))
orchest.output(output_dict, "clfy_dict")
| 2.796875
| 3
|
test/rql_test/drivers/driver_test.py
|
zadcha/rethinkdb
| 21,684
|
12784280
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from driver import bag, compare, err, err_regex, partial, uuid
class PythonTestDriverTest(unittest.TestCase):
def compare(self, expected, result, options=None):
self.assertTrue(compare(expected, result, options=options))
def compareFalse(self, expected, result, options=None):
self.assertFalse(compare(expected, result, options=options))
def test_string(self):
# simple
self.compare('a', 'a')
self.compare('á', 'á')
self.compare('something longer\nwith two lines', 'something longer\nwith two lines')
self.compareFalse('a', 'b')
self.compareFalse('a', 1)
self.compareFalse('a', [])
self.compareFalse('a', None)
self.compareFalse('a', ['a'])
self.compareFalse('a', {'a': 1})
def test_array(self):
# simple pass
self.compare([1, 2, 3], [1, 2, 3])
# out of order
self.compareFalse([1, 2, 3], [1, 3, 2])
# totally mistmatched lists
self.compareFalse([1, 2, 3], [3, 4, 5])
# missing items
self.compareFalse([1, 2, 3], [1, 2])
self.compareFalse([1, 2, 3], [1, 3])
# extra items
self.compareFalse([1, 2, 3], [1, 2, 3, 4])
# empty array
self.compare([], [])
self.compareFalse([1], [])
self.compareFalse([], [1])
self.compareFalse([], None)
# strings
self.compare(['a', 'b'], ['a', 'b'])
self.compareFalse(['a', 'c'], ['a', 'b'])
# multiple of a single value
self.compare([1, 2, 2, 3, 3, 3], [1, 2, 2, 3, 3, 3])
self.compareFalse([1, 2, 2, 3, 3, 3], [1, 2, 3])
self.compareFalse([1, 2, 3], [1, 2, 2, 3, 3, 3])
def test_array_partial(self):
'''note that these are all in-order'''
# simple
self.compare(partial([1]), [1, 2, 3])
self.compare(partial([2]), [1, 2, 3])
self.compare(partial([3]), [1, 2, 3])
self.compare(partial([1, 2]), [1, 2, 3])
self.compare(partial([1, 3]), [1, 2, 3])
self.compare(partial([1, 2, 3]), [1, 2, 3])
self.compareFalse(partial([4]), [1, 2, 3])
# ordered
self.compareFalse(partial([3, 2, 1], ordered=True), [1, 2, 3])
self.compareFalse(partial([1, 3, 2], ordered=True), [1, 2, 3])
# empty array
self.compare(partial([]), [1, 2, 3])
# multiple of a single items
self.compare(partial([1, 2, 2]), [1, 2, 2, 3, 3, 3])
self.compareFalse(partial([1, 2, 2, 2]), [1, 2, 2, 3, 3, 3])
def test_array_unordered(self):
# simple
self.compare(bag([1, 2]), [1, 2])
self.compare(bag([2, 1]), [1, 2])
self.compareFalse(bag([1, 2]), [1, 2, 3])
self.compareFalse(bag([1, 3]), [1, 2, 3])
self.compareFalse(bag([3, 1]), [1, 2, 3])
# empty array
self.compare(bag([]), [])
def test_dict(self):
# simple
self.compare({'a': 1, 'b': 2, 'c': 3}, {'a': 1, 'b': 2, 'c': 3})
self.compare({'a': 1, 'b': 2, 'c': 3}, {'c': 3, 'a': 1, 'b': 2})
self.compareFalse({'a': 1, 'b': 2, 'c': 3}, {'a': 1})
self.compareFalse({'a': 1}, {'a': 1, 'b': 2, 'c': 3})
# empty
self.compare({}, {})
self.compareFalse({}, {'a': 1})
self.compareFalse({'a': 1}, {})
def test_dict_partial(self):
# simple
self.compare(partial({'a': 1}), {'a': 1})
self.compare(partial({'a': 1}), {'a': 1, 'b': 2})
self.compareFalse(partial({'a': 2}), {'a': 1, 'b': 2})
self.compareFalse(partial({'c': 1}), {'a': 1, 'b': 2})
self.compareFalse(partial({'a': 1, 'b': 2}), {'b': 2})
# empty
self.compare(partial({}), {})
self.compare(partial({}), {'a': 1})
self.compareFalse(partial({'a': 1}), {})
def test_compare_dict_in_array(self):
# simple
self.compare([{'a': 1}], [{'a': 1}])
self.compare([{'a': 1, 'b': 2}], [{'a': 1, 'b': 2}])
self.compare([{'a': 1}, {'b': 2}], [{'a': 1}, {'b': 2}])
self.compareFalse([{'a': 1}], [{'a': 1, 'b': 2}])
self.compareFalse([{'a': 2, 'b': 2}], [{'a': 1, 'b': 2}])
self.compareFalse([{'a': 2, 'c': 3}], [{'a': 1, 'b': 2}])
self.compareFalse([{'a': 2, 'c': 3}], [{'a': 1}])
self.compareFalse([{'a': 1}, {'b': 2}], [{'a': 1, 'b': 2}])
# order
self.compareFalse([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# partial
self.compare(partial([{}]), [{'a': 1, 'b': 2}])
self.compare(partial([{}]), [{'a': 1, 'b': 2}])
self.compare(partial([{'a': 1}]), [{'a': 1, 'b': 2}])
self.compare(partial([{'a': 1, 'b': 2}]), [{'a': 1, 'b': 2}])
self.compare(partial([{'a': 1}, {'b': 2}]), [{'a': 1}, {'b': 2}, {'c': 3}])
self.compareFalse(partial([{'a': 2}]), [{'a': 1, 'b': 2}])
self.compareFalse(partial([{'a': 1, 'b': 2}]), [{'a': 1}])
# partial order
self.compareFalse(partial([{'a': 1}, {'b': 2}], ordered=True), [{'b': 2}, {'a': 1}])
# partial unordered
self.compare(partial([{'a': 1}, {'b': 2}]), [{'b': 2}, {'a': 1}])
self.compare(partial([{'a': 1}, {'b': 2}], ordered=False), [{'b': 2}, {'a': 1}])
def test_compare_partial_items_in_array(self):
self.compare([{'a': 1, 'b': 1}, partial({'a': 2})], [{'a': 1, 'b': 1}, {'a': 2, 'b': 2}])
def test_compare_array_in_dict(self):
pass
def test_exception(self):
# class only
self.compare(KeyError, KeyError())
self.compare(KeyError(), KeyError())
self.compare(err('KeyError'), KeyError())
self.compare(err(KeyError), KeyError())
self.compareFalse(KeyError, NameError())
self.compareFalse(KeyError(), NameError())
self.compareFalse(err('KeyError'), NameError())
self.compareFalse(err(KeyError), NameError())
# subclass
self.compare(LookupError, KeyError())
self.compare(LookupError(), KeyError())
self.compare(err('LookupError'), KeyError())
self.compare(err(LookupError), KeyError())
self.compareFalse(KeyError, LookupError())
self.compareFalse(KeyError(), LookupError())
self.compareFalse(err('KeyError'), LookupError())
self.compareFalse(err(KeyError), LookupError())
# message
self.compare(err(KeyError), KeyError('alpha'))
self.compare(err(KeyError, 'alpha'), KeyError('alpha'))
self.compareFalse(err(KeyError, 'alpha'), KeyError('beta'))
# regex message
self.compare(err(KeyError), KeyError('alpha'))
# regex message with debug/assertion text
self.compare(err_regex(KeyError, 'alpha'), KeyError('alpha'))
self.compare(err_regex(KeyError, 'alp'), KeyError('alpha'))
self.compare(err_regex(KeyError, '.*pha'), KeyError('alpha'))
self.compareFalse(err_regex(KeyError, 'beta'), KeyError('alpha'))
# ToDo: frames (when/if we support them)
def test_compare_uuid(self):
# simple
self.compare(uuid(), '4e9e5bc2-9b11-4143-9aa1-75c10e7a193a')
self.compareFalse(uuid(), '4')
self.compareFalse(uuid(), '*')
self.compareFalse(uuid(), None)
def test_numbers(self):
# simple
self.compare(1, 1)
self.compare(1, 1.0)
self.compare(1.0, 1)
self.compare(1.0, 1.0)
self.compareFalse(1, 2)
self.compareFalse(1, 2.0)
self.compareFalse(1.0, 2)
self.compareFalse(1.0, 2.0)
# precision
precision = {'precision': 0.5}
self.compare(1, 1.4, precision)
self.compare(1.0, 1.4, precision)
self.compareFalse(1, 2, precision)
self.compareFalse(1, 1.6, precision)
self.compareFalse(1.0, 2, precision)
self.compareFalse(1.0, 1.6, precision)
if __name__ == '__main__':
unittest.main()
| 2.96875
| 3
|
xinshuo_io/google_api_io.py
|
xinshuoweng/cv_ml_tool
| 31
|
12784281
|
<reponame>xinshuoweng/cv_ml_tool
# Author: <NAME>
# email: <EMAIL>
# this file includes functions for google gpi, such as google sheet
import os, httplib2
from oauth2client import client, tools
from oauth2client.file import Storage
from googleapiclient import discovery
from xinshuo_miscellaneous import isstring, islistoflist, islist
"""
BEFORE RUNNING:
---------------
1. If not already done, enable the Google Sheets API
and check the quota for your project at
https://console.developers.google.com/apis/api/sheets
2. Install the Python client library for Google APIs by running
`pip install --upgrade `
"""
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def get_sheet_service():
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/sheets.googleapis.com-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
# Authorize using one of the following scopes:
# 'https://www.googleapis.com/auth/drive'
# 'https://www.googleapis.com/auth/drive.file'
# 'https://www.googleapis.com/auth/drive.readonly'
# 'https://www.googleapis.com/auth/spreadsheets'
# 'https://www.googleapis.com/auth/spreadsheets.readonly'
# 'https://www.googleapis.com/auth/drive'
# 'https://www.googleapis.com/auth/drive.file'
# 'https://www.googleapis.com/auth/spreadsheets'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Sheets API Python'
# TODO: Change placeholder below to generate authentication credentials. See
# https://developers.google.com/sheets/quickstart/python#step_3_set_up_the_sample
# credentials = None
credentials = get_credentials()
service = discovery.build('sheets', 'v4', credentials=credentials)
return service
def update_patchs2sheet(service, sheet_id, starting_position, data, debug=True):
'''
update a list of list data to a google sheet continuously
parameters:
service: a service request to google sheet
sheet_di: a string to identify the sheet uniquely
starting_position: a string existing in the sheet to represent the let-top corner of patch to fill in
data: a list of list data to fill
'''
if debug:
isstring(sheet_id), 'the sheet id is not a string'
isstring(starting_position), 'the starting position is not correct'
islistoflist(data), 'the input data is not a list of list'
# How the input data should be interpreted.
value_input_option = 'RAW' # TODO: Update placeholder value.
value_range_body = {'values': data}
request = service.spreadsheets().values().update(spreadsheetId=sheet_id, range=starting_position, valueInputOption=value_input_option, body=value_range_body)
response = request.execute()
def update_row2sheet(service, sheet_id, row_starting_position, data, debug=True):
'''
update a list of data to a google sheet continuously
parameters:
service: a service request to google sheet
sheet_di: a string to identify the sheet uniquely
starting_position: a string existing in the sheet to represent the let-top corner of patch to fill in
data: a of list data to fill
'''
if debug:
isstring(sheet_id), 'the sheet id is not a string'
isstring(row_starting_position), 'the starting position is not correct'
islist(data), 'the input data is not a list'
# How the input data should be interpreted.
value_input_option = 'RAW' # TODO: Update placeholder value.
value_range_body = {'values': [data]}
request = service.spreadsheets().values().update(spreadsheetId=sheet_id, range=row_starting_position, valueInputOption=value_input_option, body=value_range_body)
response = request.execute()
def get_data_from_sheet(service, sheet_id, search_range, debug=True):
'''
get a list of data from a google sheet continuously
parameters:
service: a service request to google sheet
sheet_di: a string to identify the sheet uniquely
search_range: a list of position queried
'''
if debug:
isstring(sheet_id), 'the sheet id is not a string'
islist(search_range), 'the search range is not a list'
# print(search_range)
# How the input data should be interpreted.
# value_input_option = 'RAW' # TODO: Update placeholder value.
# value_range_body = {'values': [data]}
request = service.spreadsheets().values().batchGet(spreadsheetId=sheet_id, ranges=search_range)
while True:
try:
response = request.execute()
break
except:
continue
data = list()
# print(response['valueRanges'])
for raw_data in response['valueRanges']:
if 'values' in raw_data:
data.append(raw_data['values'][0][0])
else:
data.append('')
return data
| 2.453125
| 2
|
src/dataset/normalizers.py
|
mug-auth/ssl-chewing
| 0
|
12784282
|
<reponame>mug-auth/ssl-chewing<filename>src/dataset/normalizers.py
from abc import ABC, abstractmethod
import numpy as np
from dataset.template.commons import PureAbstractError
class BaseNormalizer(ABC):
"""
Base class for normalizers.
"""
@abstractmethod
def normalize(self, x: np.ndarray) -> np.ndarray:
raise PureAbstractError()
class ZeroMeanNormalizer(BaseNormalizer):
def normalize(self, x: np.ndarray) -> np.ndarray:
assert isinstance(x, np.ndarray)
return x - np.mean(x)
class StandardizerNormalizer(BaseNormalizer):
def __init__(self, min_sigma: float = 1e-9):
assert isinstance(min_sigma, float)
# self._min_sigma: np.ndarray = np.array(min_sigma)
self._min_sigma: float = min_sigma
def normalize(self, x: np.ndarray) -> np.ndarray:
assert isinstance(x, np.ndarray)
sigma = np.max((np.std(x), self._min_sigma))
return (x - np.mean(x)) / sigma
| 2.84375
| 3
|
compare_hosts_lists.py
|
ayumi-cloud/hosts
| 0
|
12784283
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import argparse
import os.path
from collections import namedtuple
from HostsTools import hosts_tools
class HostList(namedtuple('HostList', 'filename set')):
pass
def parse_args() -> sys.argv:
parser = argparse.ArgumentParser()
parser.add_argument('filename_a', type=str, help='First list to compare')
parser.add_argument('filename_b', type=str, help='Second list to compare')
parser.add_argument('--diff', default=False, action='store_true',
help='Show a full diff of the lists')
args = parser.parse_args()
if not (args.filename_a and args.filename_b):
parser.print_help()
exit(1)
validate_filename_args(args)
return args
def validate_filename_args(args) -> None:
if not os.path.isfile(args.filename_a):
raise Exception('Invalid host file: ', args.filename_a)
if not os.path.isfile(args.filename_b):
raise Exception('Invalid host file: ', args.filename_b)
def main() -> None:
args = parse_args()
filename_a = args.filename_a
filename_b = args.filename_b
set_a = hosts_tools.load_domains_from_list(filename_a)
set_b = hosts_tools.load_domains_from_list(filename_b)
list_a = HostList(filename_a, set_a)
list_b = HostList(filename_b, set_b)
print()
print_list_size(list_a, list_b)
print()
print_list_difference(list_a, list_b)
if args.diff:
print()
print_list_diff(list_a, list_b)
def print_list_size(list_a: HostList, list_b: HostList) -> None:
size_a = len(list_a.set)
size_b = len(list_b.set)
difference = size_a - size_b
print('Number of unique host entries: %s' % difference)
print_list_fact(list_a.filename, size_a)
print_list_fact(list_b.filename, size_b)
def print_list_difference(list_a: HostList, list_b: HostList) -> None:
unique_list_a = list_a.set - list_b.set
size_unique_a = len(unique_list_a)
percentage_unique_a = round((size_unique_a / len(list_a.set)) * 100, 2)
unique_list_b = list_b.set - list_a.set
size_unique_b = len(unique_list_b)
percentage_unique_b = round((size_unique_b / len(list_b.set)) * 100, 2)
print('Number of unique hosts not in the other list:')
print_list_fact(list_a.filename, f'{size_unique_a} ({percentage_unique_a}%)')
print_list_fact(list_b.filename, f'{size_unique_b} ({percentage_unique_b}%)')
def print_list_fact(list_name, fact) -> None:
print('{:<30}{:<30}'.format(list_name, fact))
def print_list_diff(list_a: HostList, list_b: HostList) -> None:
full_set = list_a.set.union(list_b.set)
full_set_sorted = hosts_tools.sort_domains(list(full_set))
print('Lists Diff:')
print('{:<50}{:<50}'.format(list_a.filename, list_b.filename))
for domain in full_set_sorted:
list_a_value = domain if domain in list_a.set else ''
list_b_value = domain if domain in list_b.set else ''
if list_a_value != list_b_value:
print('{:<50}{:<50}'.format(list_a_value, list_b_value))
if __name__ == "__main__":
main()
| 2.875
| 3
|
src/ffmeta/__init__.py
|
stephen-bunn/ffmeta
| 0
|
12784284
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2021 st37 <<EMAIL>>
# ISC License <https://choosealicense.com/licenses/isc>
"""FFMeta.
Tool to write media metadata using ffmpeg.
"""
| 0.914063
| 1
|
tests/test_middleware.py
|
TheNeonProject/trivia-game-dialogflow
| 0
|
12784285
|
<filename>tests/test_middleware.py
import os
import unittest
from middleware import is_token_valid
class MiddlewareTestCase(unittest.TestCase):
def setUp(self):
os.environ.setdefault('TOKEN', 'token')
def test_is_token_valid_empty_token(self):
is_valid = is_token_valid(None)
self.assertFalse(is_valid)
def test_is_token_valid_invalid_token(self):
is_valid = is_token_valid('invalid_token')
self.assertFalse(is_valid)
def test_is_token_valid_return_true(self):
is_valid = is_token_valid('token')
self.assertTrue(is_valid)
| 3.109375
| 3
|
infer.py
|
kevakil/mesh-transformer-jax
| 0
|
12784286
|
<reponame>kevakil/mesh-transformer-jax
import os
import requests
from jax.config import config
colab_tpu_addr = os.environ['COLAB_TPU_ADDR'].split(':')[0]
url = f'http://{colab_tpu_addr}:8475/requestversion/tpu_driver0.1_dev20210607'
requests.post(url)
# The following is required to use TPU Driver as JAX's backend.
config.FLAGS.jax_xla_backend = "tpu_driver"
config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
print(jax.devices())
import time
import jax
from jax.experimental import maps
import numpy as np
import optax
import transformers
from mesh_transformer.checkpoint import read_ckpt_lowmem
from mesh_transformer.sampling import nucleaus_sample
from mesh_transformer.transformer_shard import CausalTransformer
params = {
"layers": 28,
"d_model": 4096,
"n_heads": 16,
"n_vocab": 50400,
"norm": "layernorm",
"pe": "rotary",
"pe_rotary_dims": 64,
"seq": 2048,
"cores_per_replica": 8,
"per_replica_batch": 1,
}
per_replica_batch = params["per_replica_batch"]
cores_per_replica = params["cores_per_replica"]
seq = params["seq"]
params["sampler"] = nucleaus_sample
# here we "remove" the optimizer parameters from the model (as we don't need them for inference)
params["optimizer"] = optax.scale(0)
mesh_shape = (jax.device_count() // cores_per_replica, cores_per_replica)
devices = np.array(jax.devices()).reshape(mesh_shape)
maps.thread_resources.env = maps.ResourceEnv(maps.Mesh(devices, ('dp', 'mp')))
tokenizer = transformers.GPT2TokenizerFast.from_pretrained('gpt2')
total_batch = per_replica_batch * jax.device_count() // cores_per_replica
network = CausalTransformer(params)
network.state = read_ckpt_lowmem(network.state, "step_383500/", devices.shape[1])
network.state = network.move_xmap(network.state, np.zeros(cores_per_replica))
def infer(context, top_p=0.9, temp=1.0, gen_len=512):
tokens = tokenizer.encode(context)
provided_ctx = len(tokens)
pad_amount = seq - provided_ctx
padded_tokens = np.pad(tokens, ((pad_amount, 0),)).astype(np.uint32)
batched_tokens = np.array([padded_tokens] * total_batch)
length = np.ones(total_batch, dtype=np.uint32) * len(tokens)
start = time.time()
output = network.generate(batched_tokens, length, gen_len, {"top_p": np.ones(total_batch) * top_p, "temp": np.ones(total_batch) * temp})
samples = []
decoded_tokens = output[1][0]
for o in decoded_tokens[:, :, 0]:
samples.append(f"\033[1m{context}\033[0m{tokenizer.decode(o)}")
print(f"completion done in {time.time() - start:06}s")
return samples
print(infer("EleutherAI is")[0])
| 2.046875
| 2
|
tests/test_champion_rates.py
|
Canisback/solari
| 16
|
12784287
|
from solari import Leona
from solari.stats import ChampionPickrate, ChampionWinrate, ChampionPickCount, ChampionBanrate, ChampionPresenceRate, ChampionBanCount
def test_champion_pickrate(match_set_2):
l = Leona([
ChampionPickrate()
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira got picked 5 times out of 20 games
assert stats["Pickrate"].loc[777] == 5/20
def test_champion_winrate(match_set_2):
l = Leona([
ChampionWinrate()
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira won 4 times out of 5 games
assert stats["Winrate"].loc[777] == 4/5
def test_champion_banrate(match_set_2):
l = Leona([
ChampionBanrate()
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira was banned in 9 out of 19 games
assert stats["Banrate"].loc[777] == 9/19
def test_champion_banrate_teamwise(match_set_2):
l = Leona([
ChampionBanrate(team_wise=True)
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira was banned in 10 times in 19 games
assert stats["Banrate"].loc[777] == 10/19
def test_champion_pick_count(match_set_2):
l = Leona([
ChampionPickCount()
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira was picked 5 times
assert stats["Pick Count"].loc[777] == 5
def test_champion_ban_count(match_set_2):
l = Leona([
ChampionBanCount()
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira was banned in 9 games
assert stats["Ban Count"].loc[777] == 9
def test_champion_ban_count_teamwise(match_set_2):
l = Leona([
ChampionBanCount(team_wise=True)
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira was banned 10 times
assert stats["Ban Count"].loc[777] == 10
def test_champion_presence(match_set_2):
l = Leona([
ChampionPickrate(),
ChampionBanrate(),
ChampionPresenceRate()
])
for m in match_set_2:
l.push_match(m)
stats = l.get_stats()
# Samira was banned in 9 games and picked in 5 games out of 20
assert stats["Presence"].loc[777] == (5 + 9) / 20
| 2.4375
| 2
|
_scripts/paste/getdecoproperties.py
|
Son-Guhun/Titan-Land-Lands-of-Plenty
| 12
|
12784288
|
"""Gets properties from all decorations a .ini database and generates code to be used in the
UnitTypeDefaultValues library. This code is copied to the clipboard and can be pasted in a text
editor or inside the World Editor.
"""
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
#______________________________________________________________________________________________
from myconfigparser import MyConfigParser, load_unit_data, get_decorations
import pyperclip
keys = ['modelScale','red','green','blue','animProps','maxRoll']
def do(file_path='../../development/table/unit.ini'):
with open(file_path) as f:
unit_data = load_unit_data(f)
result = []
decos = get_decorations(unit_data)
for deco in decos:
for key in keys:
if key in unit_data[deco]:
if key == 'animProps':
result.append(" set UnitTypeDefaultValues('{}').{} = {}".format(deco,
key,
unit_data[deco][key].replace(',', ' ')))
elif key != 'maxRoll' or float(unit_data[deco]['maxRoll']) < 0:
result.append(" set UnitTypeDefaultValues('{}').{} = {}".format(deco,
key,
unit_data[deco][key]))
pyperclip.copy('\n'.join(result))
| 2.296875
| 2
|
labml/internal/tracker/writers/screen.py
|
conanjm/labml
| 1
|
12784289
|
<reponame>conanjm/labml<gh_stars>1-10
from typing import Dict
import numpy as np
from labml import logger
from .. import Writer, Indicator
from ..indicators.artifacts import Artifact
from ..indicators.numeric import NumericIndicator
from labml.logger import Text
class ScreenWriter(Writer):
def __init__(self):
super().__init__()
self._estimates = {}
self._beta = 0.9
self._beta_pow = {}
self._last_printed_value = {}
def update_estimate(self, k, v):
if k not in self._estimates:
self._estimates[k] = 0
self._beta_pow[k] = 1.
self._estimates[k] *= self._beta
self._estimates[k] += (1 - self._beta) * v
self._beta_pow[k] *= self._beta
def get_empty_string(self, length, decimals):
return ' ' * (length - 2 - decimals) + '-.' + '-' * decimals
def get_value_string(self, k, v):
if k not in self._estimates:
assert v is None
return self.get_empty_string(8, 2)
estimate = self._estimates[k] / (1 - self._beta_pow[k])
if abs(estimate) < 1e-9 or np.isnan(estimate):
lg = 0
else:
lg = int(np.ceil(np.log10(abs(estimate)))) + 1
decimals = 7 - lg
decimals = max(1, decimals)
decimals = min(6, decimals)
fmt = "{v:8,." + str(decimals) + "f}"
if v is None:
return self.get_empty_string(8, decimals)
else:
return fmt.format(v=v)
@staticmethod
def __format_artifact(length: int, value: str):
fmt = "{v:>" + str(length + 1) + "}"
return fmt.format(v=value)
def _get_indicator_string(self, indicators: Dict[str, Indicator]):
parts = []
for ind in indicators.values():
if not isinstance(ind, NumericIndicator):
continue
if not ind.is_print:
continue
parts.append((f" {ind.name}: ", None))
if not ind.is_empty():
v = ind.get_mean()
self.update_estimate(ind.name, v)
value = self.get_value_string(ind.name, v)
self._last_printed_value[ind.name] = value
parts.append((value, Text.value))
elif ind.name in self._last_printed_value:
value = self._last_printed_value[ind.name]
parts.append((value, Text.subtle))
else:
value = self.get_value_string(ind.name, None)
parts.append((value, Text.subtle))
return parts
@staticmethod
def _print_artifacts_list(table: Dict[str, int], artifacts: Dict[str, Artifact]):
order = list(table.keys())
if not len(order):
return
keys = {k for name in order for k in artifacts[name].keys()}
for k in keys:
for name in order:
value = artifacts[name].get_string(k, artifacts)
logger.log([(name, Text.key),
": ",
(value, Text.value)])
def _print_artifacts_table(self, table: Dict[str, int], artifacts: Dict[str, Artifact]):
order = list(table.keys())
if not len(order):
return
keys = []
keys_set = set()
for name in order:
for k in artifacts[name].keys():
if k not in keys_set:
keys_set.add(k)
keys.append(k)
parts = [self.__format_artifact(table[name], name) for name in order]
logger.log('|'.join(parts), Text.heading)
for k in keys:
parts = []
for name in order:
value = artifacts[name].get_string(k, artifacts)
parts.append(self.__format_artifact(table[name], value))
logger.log('|'.join(parts), Text.value)
def _print_artifacts(self, indicators: Dict[str, Indicator]):
table = {}
artifacts = {}
for ind in indicators.values():
if not isinstance(ind, Artifact):
continue
if not ind.is_print:
continue
if ind.is_empty():
continue
if not ind.is_indexed:
ind.print_all()
continue
table[ind.name] = ind.get_print_length()
artifacts[ind.name] = ind
if sum(table.values()) > 100:
self._print_artifacts_list(table, artifacts)
else:
self._print_artifacts_table(table, artifacts)
def write(self, *,
global_step: int,
indicators: Dict[str, Indicator]):
self._print_artifacts(indicators)
return self._get_indicator_string(indicators)
| 2.34375
| 2
|
tests/unit/test_order.py
|
Aspire1Inspire2/td-ameritrade-python-api
| 610
|
12784290
|
<gh_stars>100-1000
import unittest
import td.enums as td_enums
from unittest import TestCase
from configparser import ConfigParser
from td.orders import Order
from td.orders import OrderLeg
from td.client import TDClient
from td.stream import TDStreamerClient
class TDSession(TestCase):
"""Will perform a unit test for the TD session."""
def setUp(self) -> None:
"""Set up the Robot."""
# Grab configuration values.
config = ConfigParser()
config.read('config/config.ini')
CLIENT_ID = config.get('main', 'CLIENT_ID')
REDIRECT_URI = config.get('main', 'REDIRECT_URI')
JSON_PATH = config.get('main', 'JSON_PATH')
ACCOUNT_NUMBER = config.get('main', 'ACCOUNT_NUMBER')
# Initalize the session.
self.td_session = TDClient(
client_id=CLIENT_ID,
redirect_uri=REDIRECT_URI,
credentials_path=JSON_PATH,
account_number=ACCOUNT_NUMBER
)
self.td_order = Order()
self.td_order_leg = OrderLeg()
def test_creates_instance_of_session(self):
"""Create an instance and make sure it's a robot."""
self.assertIsInstance(self.td_session, TDClient)
self.assertIsInstance(self.td_order, Order)
self.assertIsInstance(self.td_order_leg, OrderLeg)
def test_define_simple_order(self):
"""Test creating a simple order."""
# Add the Order session.
self.td_order.order_session(
session=td_enums.ORDER_SESSION.NORMAL
)
# Add the Order duration.
self.td_order.order_duration(
duration=td_enums.DURATION.GOOD_TILL_CANCEL
)
# Add the Order Leg Instruction.
self.td_order_leg.order_leg_instruction(
instruction=td_enums.ORDER_INSTRUCTIONS.SELL
)
# Add the Order Leg price.
self.td_order_leg.order_leg_price(
price=112.50
)
# Add the Order Leg quantity.
self.td_order_leg.order_leg_quantity(
quantity=10
)
# Add the Order Leg Asset.
self.td_order_leg.order_leg_asset(
asset_type=td_enums.ORDER_ASSET_TYPE.EQUITY,
symbol='MSFT'
)
# Add the Order Leg.
self.td_order.add_order_leg(
order_leg=self.td_order_leg
)
correct_dict = {
"session": "NORMAL",
"duration": "GOOD_TILL_CANCEL",
"orderLegCollection": [
{
"instruction": "SELL",
"price": 112.5, "quantity": 10,
"instrument": {
"assetType":
"EQUITY",
"symbol": "MSFT"
}
}
]
}
self.assertDictEqual(correct_dict, self.td_order._grab_order())
def tearDown(self):
"""Clean Up."""
self.td_session = None
self.td_order = None
self.td_order_leg = None
if __name__ == '__main__':
unittest.main()
| 2.859375
| 3
|
tests/test_utils.py
|
afonsobspinto/pyecore
| 1
|
12784291
|
import pytest
from pyecore.ecore import *
from pyecore.utils import DynamicEPackage
@pytest.fixture(scope='module')
def simplemm():
A = EClass('A')
B = EClass('B')
Root = EClass('Root')
pack = EPackage('pack', nsURI='http://pack/1.0', nsPrefix='pack')
pack.eClassifiers.extend([Root, A, B])
return pack
@pytest.fixture(scope='module')
def complexmm():
A = EClass('A')
B = EClass('B')
Root = EClass('Root')
pack = EPackage('pack', nsURI='http://pack/1.0', nsPrefix='pack')
pack.eClassifiers.extend([Root, A, B])
innerpackage = EPackage('inner', nsURI='http://inner', nsPrefix='inner')
C = EClass('C')
D = EClass('D')
innerpackage.eClassifiers.extend([C, D])
pack.eSubpackages.append(innerpackage)
return pack
def test_dynamic_access_eclasses(simplemm):
SimpleMM = DynamicEPackage(simplemm)
assert SimpleMM.A
assert SimpleMM.B
def test_dynamic_access_innerpackage(complexmm):
ComplexMM = DynamicEPackage(complexmm)
assert ComplexMM.A
assert ComplexMM.B
assert ComplexMM.inner.C
assert ComplexMM.inner.D
def test_dynamic_addition_eclasses(complexmm):
ComplexMM = DynamicEPackage(complexmm)
E = EClass('E')
complexmm.eClassifiers.append(E)
assert ComplexMM.E
F = EClass('F')
complexmm.eSubpackages[0].eClassifiers.append(F)
assert ComplexMM.inner.F
G = EClass('G')
H = EClass('H')
complexmm.eClassifiers.extend([G, H])
assert ComplexMM.G
assert ComplexMM.H
def test_dynamic_removal_eclasses(complexmm):
ComplexMM = DynamicEPackage(complexmm)
assert ComplexMM.Root
complexmm.eClassifiers.remove(ComplexMM.Root)
with pytest.raises(AttributeError):
ComplexMM.Root
assert ComplexMM.A
complexmm.eClassifiers[0].delete()
with pytest.raises(AttributeError):
ComplexMM.A
| 2.15625
| 2
|
custom_layers.py
|
martinpilat/RBFolutionalLayer
| 3
|
12784292
|
import tensorflow as tf
import numpy as np
class RBFolution(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size=(1, 3, 3, 1), padding="VALID", strides=(1, 1, 1, 1), name="RBFolution",
dilation_rate=(1,1),
ccs_initializer=tf.keras.initializers.RandomUniform(0,1),
beta_initilizer=tf.keras.initializers.RandomUniform(0,1)):
super(RBFolution, self).__init__(name=name)
self.padding = padding
self.strides = strides
self.filters = filters
self.kernel_size = kernel_size
self.ccs_initializer = ccs_initializer
self.beta_initilizer = beta_initilizer
self.dilation_rate = dilation_rate
def build(self, input_shape):
self.input_s = input_shape
self.output_s = self.compute_output_shape(input_shape)
patch_dim = np.prod(self.kernel_size[1:])
self.ccs_tensor = self.add_weight("cluster_centers", shape=(patch_dim, self.filters), dtype="float32", initializer=self.ccs_initializer)
self.beta = self.add_weight("beta", shape=[self.filters], dtype="float32", initializer=self.beta_initilizer)
def call(self, input, **kwargs):
return tf.reshape(self.__rbfolution(input), self.output_s)
def compute_output_shape(self, input_shape):
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = RBFolution.conv_output_length(
space[i],
self.kernel_size[1:-1][i],
padding=self.padding.lower(),
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim.value)
return (-1,) + tuple(new_space) + (self.filters,)
def __rbfolution(self, inputs):
batch_size = tf.shape(inputs)[0]
patch_dim = np.prod(self.kernel_size[1:])
# Patches extracted from the images (convolution-like).
# shape=[batch_size, new_height, new_width, patch_dim] (i. e. individual
# patches are flattened)
# tf.extract_image_patches "Only supports ksizes across space" -> we change
# kernel_size[3] to 1.
patches = tf.extract_image_patches(
inputs,
ksizes=list(self.kernel_size[:3]) + [1],
strides=self.strides,
rates=[1, 1, 1, 1],
padding=self.padding
)
patches_shape = tf.shape(patches)
new_height = patches_shape[1]
new_width = patches_shape[2]
# shape=[batch_size, num_patches, patch_dim]
reshaped_patches = tf.reshape(patches, [batch_size, -1, patch_dim])
# all_scores[i,j,k] = sum_{l=0}^{patch_dim-1} (
# (ccs_tensor[l,k] - reshaped_patches[i,j,l]) ** 2
# )
# shape=[batch_size, num_patches, filters]
all_scores = (
tf.reduce_sum(tf.square(reshaped_patches), 2, keepdims=True) -
2 * tf.einsum("aij,jk->aik", reshaped_patches, self.ccs_tensor) +
tf.reduce_sum(tf.square(self.ccs_tensor), 0, keepdims=True)
)
res = tf.reshape(
tf.exp(tf.multiply(-self.beta, all_scores)),
[batch_size, new_height, new_width, self.filters],
name="rbfolution_activation"
)
return res
@staticmethod
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full", "causal"
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full', 'causal'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding in ['same', 'causal']:
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
| 2.4375
| 2
|
main.py
|
StanislavPetrovV/Advanced_RayMarching
| 11
|
12784293
|
<gh_stars>10-100
import moderngl_window as mglw
class App(mglw.WindowConfig):
window_size = 1600, 900
resource_dir = 'programs'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.quad = mglw.geometry.quad_fs()
self.program = self.load_program(vertex_shader='vertex.glsl', fragment_shader='fragment.glsl')
self.u_scroll = 3.0
# textures
self.texture1 = self.load_texture_2d('../textures/test0.png')
self.texture2 = self.load_texture_2d('../textures/hex.png') # floor
self.texture3 = self.load_texture_2d('../textures/white_marble1.png') # walls
self.texture4 = self.load_texture_2d('../textures/roof/texture3.jpg') # roof
self.texture5 = self.load_texture_2d('../textures/black_marble1.png') # pedestal
self.texture6 = self.load_texture_2d('../textures/green_marble1.png') # sphere
self.texture7 = self.load_texture_2d('../textures/roof/height3.png') # roof bump
# uniforms
self.program['u_scroll'] = self.u_scroll
self.program['u_resolution'] = self.window_size
self.program['u_texture1'] = 1
self.program['u_texture2'] = 2
self.program['u_texture3'] = 3
self.program['u_texture4'] = 4
self.program['u_texture5'] = 5
self.program['u_texture6'] = 6
self.program['u_texture7'] = 7
def render(self, time, frame_time):
self.ctx.clear()
self.program['u_time'] = time
self.texture1.use(location=1)
self.texture2.use(location=2)
self.texture3.use(location=3)
self.texture4.use(location=4)
self.texture5.use(location=5)
self.texture6.use(location=6)
self.texture7.use(location=7)
self.quad.render(self.program)
def mouse_position_event(self, x, y, dx, dy):
self.program['u_mouse'] = (x, y)
def mouse_scroll_event(self, x_offset, y_offset):
self.u_scroll = max(1.0, self.u_scroll + y_offset)
self.program['u_scroll'] = self.u_scroll
if __name__ == '__main__':
mglw.run_window_config(App)
| 2.15625
| 2
|
listener.py
|
vinhquevu/angora
| 0
|
12784294
|
<reponame>vinhquevu/angora
"""
Angora Queue
"""
import socket
from typing import Dict, Optional
import kombu # type: ignore
class Queue:
"""
An object representing a queue in RabbitMQ.
A listener can listen to one or more queues and have one or more
callbacks. Consumer expects a sequence of queues. Angora is designed to
only have one listener per queue. A kombu.Queue object is passed as a
list to kombu.Consumer for this reason.
Once you create a Queue object, you can do two things: start up the
listener or clear the queue.
"""
def __init__(
self,
queue_name: str,
routing_key: str,
queue_args: Optional[Dict] = None,
user: str = "guest",
password: str = "<PASSWORD>",
host: str = "localhost",
port: int = 5672,
exchange_name: str = "angora",
exchange_type: str = "direct",
) -> None:
self.queue_name = queue_name
self.routing_key = routing_key
self.queue_args = queue_args
self.user = user
self.password = password
self.host = host
self.port = port
self.exchange_name = exchange_name
self.exchange_type = exchange_type
@property
def queue(self) -> kombu.Queue:
return kombu.Queue(
self.queue_name,
kombu.Exchange(self.exchange_name, type=self.exchange_type),
self.routing_key,
queue_arguments=self.queue_args,
)
@property
def connection_str(self) -> str:
return "amqp://{}:{}@{}:{}//".format(
self.user, self.password, self.host, self.port
)
def listen(self, callbacks: Optional[list] = None) -> None:
"""
Start a listener and handle messeages with the callback(s). If the
queue does not already exist in the exchange, it will be created.
"""
with kombu.Connection(self.connection_str) as conn:
with kombu.Consumer(conn, [self.queue], callbacks=callbacks, no_ack=True):
try:
print("STARTING LISTENER")
for _ in kombu.eventloop(conn):
pass
except KeyboardInterrupt:
print("\nExiting\n")
def clear(self) -> None:
"""
Clear a queue of messages. If the queue does not exist in the exchange,
it will be created. If the queue doesn't exist, this is the same as
creating an empty queue with no listener.
"""
with kombu.Connection(self.connection_str) as conn:
with kombu.Consumer(conn, [self.queue], no_ack=True):
try:
conn.drain_events(timeout=2)
except (socket.timeout, NotImplementedError):
print("\nQueue has been drained\n")
| 3.265625
| 3
|
components/services/appengine/stub/beaker/exceptions.py
|
appcelerator-archive/entourage
| 1
|
12784295
|
<filename>components/services/appengine/stub/beaker/exceptions.py
class BeakerException(Exception):
pass
class InvalidCacheBackendError(BeakerException):
pass
class MissingCacheParameter(BeakerException):
pass
class LockError(BeakerException):
pass
| 1.6875
| 2
|
ilias2nbgrader/preprocessors/createfolderstructure.py
|
DigiKlausur/ilias2nbgrader
| 4
|
12784296
|
import os
from .preprocessor import Preprocessor
from traitlets import Unicode
from shutil import move
import glob
class CreateFolderStructure(Preprocessor):
directory = Unicode('restructured', help='Subfolder where processed files go')
def __init__(self):
super(CreateFolderStructure, self).__init__()
def mkdir(self, path):
if not os.path.exists(path):
os.makedirs(path)
def preprocess_student(self, student, resources):
self.init_logging('Create Folder Structure')
src = os.path.join(self.src, student)
dst = os.path.join(self.dst, student, resources['assignment'])
self.mkdir(os.path.join(self.dst, student))
move(src, dst)
self.log.info('Moved submission to subfolder {}'.format(resources['assignment']))
self.terminate_logging(os.path.join(self.dst, student, resources['assignment'], self.logname))
return student, resources
| 2.53125
| 3
|
ms/collect_figures.py
|
yoavram/Milpitas
| 0
|
12784297
|
<gh_stars>0
# import tempfile
import shutil
import os.path
from os.path import join as join_path
from os.path import split as split_path
import re
from zipfile import ZipFile
if __name__ == '__main__':
ms_dir = '../ms'
figures_dir = '../figures'
ms_figures_dir = join_path(ms_dir, 'figures')
zip_fname = '/Users/yoavram/Dropbox/Milpitas/figures.zip'
tex_files = ['figures.tex', 'figures_supp.tex']
pattern = re.compile(r'\\includegraphics(?:\[.*\])?\{(.*\.\w{3})\}')
if os.path.exists(ms_figures_dir):
shutil.rmtree(ms_figures_dir)
os.mkdir(ms_figures_dir)
figures = []
for fn in tex_files:
with open(join_path(ms_dir, fn)) as f:
matches = (pattern.match(line) for line in f)
matches = (m for m in matches if m is not None)
filenames = (m.groups()[0] for m in matches)
filenames = (split_path(fn)[-1] for fn in filenames)
filenames = (join_path(figures_dir, fn) for fn in filenames)
figures.extend(filenames)
with ZipFile(zip_fname, 'w') as z:
for fn in figures:
print(fn)
shutil.copy(fn, join_path(ms_figures_dir, split_path(fn)[-1]))
z.write(fn)
print("{} figures copied to {} and zipped to {}".format(
len(figures), ms_figures_dir, zip_fname))
| 2.640625
| 3
|
resources/mechanics_lib/Fulcrum.py
|
PRECISE/ROSLab
| 7
|
12784298
|
from api.component import Component
class Fulcrum(Component):
def defComponents(self):
# Subcomponents used in this assembly
self.addSubcomponent("stem", "Hinge")
self.addSubcomponent("left", "RectBeam")
self.addSubcomponent("right", "RectBeam")
self.addSubcomponent("t", "TJoint")
def defParameters(self):
# Subcomponent free parameters are inherited by default
# Subcomponent parameters that are no longer free in this assembly are deleted
'''
self.delParameter("length")
self.delParameter("width")
self.delParameter("depth")
self.delParameter("angle")
self.delParameter("rangle")
self.delParameter("langle")
self.delParameter("phase")
self.delParameter("noflap")
self.delParameter("faces")
'''
# New free parameters specific to this assembly are added
self.newParameter("leftlength")
self.newParameter("rightlength")
self.newParameter("stemwidth")
self.newParameter("crosswidth")
self.newParameter("thickness")
def defInterfaces(self):
# Locations on FixedLegs component that higher order components can use for assembly
self.newInterface("stemedge")
self.newInterface("leftedge")
self.newInterface("rightedge")
self.newInterface("lefttab")
def defConstraints(self):
### Set specific relationships between parameters
self.addConstraint(("stem", "perimeter"), ("stemwidth", "thickness"), "2 * sum(x)")
self.addConstraint(("stem", "top"), ("stemwidth", "thickness"), "(x[1]-x[0]) * 1.0 / sum(x)")
self.addConstraint(("stem", "bot"), ("stemwidth", "thickness"), "(x[1]-x[0]) * 1.0 / sum(x)")
self.addConstraint(("left", "depth"), ("thickness"))
self.addConstraint(("left", "width"), ("crosswidth"))
self.addConstraint(("left", "length"), ("leftlength"))
self.addConstraint(("right", "depth"), ("thickness"))
self.addConstraint(("right", "width"), ("crosswidth"))
self.addConstraint(("right", "length"), ("rightlength"))
self.addConstraint(("t", "thickness"), "thickness")
self.addConstraint(("t", "crosswidth"), "crosswidth")
self.addConstraint(("t", "stemwidth"), "stemwidth")
def defConnections(self):
self.addConnection(("t", "leftedge"),
("left", "botedge.0.3"), "Flat")
self.addConnection(("t", "rightedge"),
("right", "topedge.0.3"), "Flat")
self.addConnection(("t", "stemedge"),
("stem", "topedge.1"),
"Fold", angle=(-70.5/2))
# XXX Not well shaped -- leaves overhang
self.addConnection(("t", "stemtab"),
("stem", "topedge.3"),
"Tab",
name="tab", depth=10, angle=(-70.5/2))
def defInterfaces(self):
# Define interface locations in terms of subcomponent interfaces
self.inheritInterface("stemedge", ("stem", "botedge"))
self.inheritInterface("lefttab", ("left", "tabedge"))
self.inheritInterface("leftedge", ("left", "topedge"))
self.inheritInterface("rightedge", ("right", "botedge"))
if __name__ == "__main__":
# Instantiate new object
f = Fulcrum()
# Define free parameters
f.setParameter("thickness", 10)
f.setParameter("stemwidth", 20)
f.setParameter("crosswidth", 30)
f.setParameter("leftlength", 50)
f.setParameter("rightlength", 100)
# Generate outputs
f.make()
f.drawing.graph.toSTL("output/tbar.stl")
f.drawing.transform(relative=(0,0))
import utils.display
utils.display.displayTkinter(f.drawing)
| 2.5625
| 3
|
poc/db_schema.py
|
fabienleite/IOMMU-dumper
| 0
|
12784299
|
#!/bin/false
# -*- coding: utf-8 -*-
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Device(Base):
__tablename__ = 'device'
id = Column(Integer, primary_key=True)
name = Column(String(255), unique=True)
bdf = Column(String(255), unique=True)
class Mapping(Base):
__tablename__ = 'mapping'
id = Column(Integer, primary_key=True)
iova = Column(String(255))
phys_addr = Column(String(255))
size = Column(Integer)
device_id = Column(Integer, ForeignKey('device.id'))
device = relationship('Device', backref='mapping')
engine = create_engine('sqlite:///iommu.db')
Base.metadata.create_all(engine)
| 2.75
| 3
|
guide/train_operator_ppo_ray.py
|
jgori-ouistiti/interaction-agents
| 0
|
12784300
|
<reponame>jgori-ouistiti/interaction-agents<gh_stars>0
from pointing.envs import SimplePointingTask
from pointing.assistants import ConstantCDGain
from pointing.users import CarefulPointer
from coopihc.policy import Policy
from coopihc.bundle import PlayUser, Train
from gym.wrappers import FlattenObservation
from collections import OrderedDict
import gym
import numpy
class ThisActionWrapper(gym.ActionWrapper):
def __init__(self, env):
super().__init__(env)
self.N = env.action_space[0].n
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(1,))
def action(self, action):
(action * self.N / 2) + (self.N - 1) / 2
return int(numpy.round((action * self.N / 2) + (self.N - 1) / 2)[0])
class Pointing(gym.Env):
def __init__(self, env_config):
task = SimplePointingTask(gridsize=31, number_of_targets=8)
unitcdgain = ConstantCDGain(1)
policy = Policy(
action_space=[gym.spaces.Discrete(10)],
action_set=[-5 + i for i in range(5)] + [i + 1 for i in range(5)],
action_values=None,
)
user = CarefulPointer(agent_policy=policy)
bundle = PlayUser(task, user, unitcdgain)
if env_config["observation_mode"] == "flat":
observation_mode = "flat"
elif env_config["observation_mode"] is None:
observation_mode = None
else:
observation_mode = OrderedDict(
{
"task_state": OrderedDict({"Position": 0}),
"user_state": OrderedDict({"Goal": 0}),
}
)
env = Train(bundle, observation_mode=observation_mode)
env = ThisActionWrapper(env)
self.env = env
self.action_space = env.action_space
self.observation_space = env.observation_space
def reset(self):
return self.env.reset()
def step(self, action):
return self.env.step(action)
env_config = {"observation_mode": "flat"}
flat_env = Pointing(env_config)
env_config = {"observation_mode": None}
natural_env = Pointing(env_config)
env_config = {"observation_mode": "dict"}
dict_env = Pointing(env_config)
exit()
import ray
import ray.rllib.agents.ppo as ppo
ray.shutdown()
ray.init(ignore_reinit_error=True)
import shutil
CHECKPOINT_ROOT = "tmp/ppo/pointing"
shutil.rmtree(CHECKPOINT_ROOT, ignore_errors=True, onerror=None)
ray_results = "/home/jgori/ray_results/"
shutil.rmtree(ray_results, ignore_errors=True, onerror=None)
config = ppo.DEFAULT_CONFIG.copy()
config["env_config"] = {}
config["log_level"] = "WARN"
agent = ppo.PPOTrainer(config, env=Pointing)
N_ITER = 1
s = "{:3d} reward {:6.2f}/{:6.2f}/{:6.2f} len {:6.2f} saved {}"
for n in range(N_ITER):
result = agent.train()
file_name = agent.save(CHECKPOINT_ROOT)
| 2.265625
| 2
|
tests/data/test_bo_remove_user_from_group.py
|
c17r/TagTrain
| 0
|
12784301
|
import pytest
from . import db
from .db import database
from tagtrain import data
def test_unknown_owner(database):
with pytest.raises(data.Group.DoesNotExist):
group = data.by_owner.remove_user_from_group('non-existent', db.GROUP_NAME, 'doesnt-matter')
def test_unknown_group(database):
with pytest.raises(data.Group.DoesNotExist):
group = data.by_owner.remove_user_from_group(db.OWNER_NAME, 'non-existent', 'doesnt-matter')
def test_unknown_member(database):
with pytest.raises(data.Member.DoesNotExist):
group = data.by_owner.remove_user_from_group(db.OWNER_NAME, db.GROUP_NAME, 'non-existent')
def test_good_non_empty(database):
group = data.by_owner.find_group(db.OWNER_NAME, db.GROUP_NAME)
assert group.member_count == 4
assert len(list(group.members)) == 4
group = data.by_owner.remove_user_from_group(db.OWNER_NAME, db.GROUP_NAME, 'one')
assert group.name == db.GROUP_NAME
assert group.reddit_name == db.OWNER_NAME
assert group.member_count == 3
assert len(list(group.members)) == 3
assert group.members[0].reddit_name == 'two'
group = data.by_owner.find_group(db.OWNER_NAME, db.GROUP_NAME)
assert group.member_count == 3
assert len(list(group.members)) == 3
def test_good_empty(database):
group = data.by_owner.find_group(db.OWNER_NAME, db.GROUP_NAME)
assert group.member_count == 4
assert len(list(group.members)) == 4
members_to_delete = [m.reddit_name for m in group.members]
for m in members_to_delete:
group = data.by_owner.remove_user_from_group(db.OWNER_NAME, db.GROUP_NAME, m)
assert group.name == db.GROUP_NAME
assert group.reddit_name == db.OWNER_NAME
assert group.member_count == 0
assert len(list(group.members)) == 0
group = data.by_owner.find_group(db.OWNER_NAME, db.GROUP_NAME)
assert group.member_count == 0
assert len(list(group.members)) == 0
| 2.421875
| 2
|
psite/urls.py
|
jrrobertson/psite
| 0
|
12784302
|
<reponame>jrrobertson/psite
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from django.views.generic import RedirectView
from resume import views
urlpatterns = [
path('', RedirectView.as_view(url='resume/')),
path('admin/', admin.site.urls),
path('resume/', views.view_resume, name='v_resume'),
path('aboutme/', views.view_aboutme, name='v_aboutme'),
path('blog/', include('blog.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 1.875
| 2
|
elderflower/task.py
|
NGC4676/minister
| 1
|
12784303
|
<reponame>NGC4676/minister
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import warnings
import numpy as np
from pathlib import Path
from functools import partial
from astropy.io import fits
from astropy.table import Table
from .io import logger
from .io import find_keyword_header, check_save_path, clean_pickling_object
from .detection import default_SE_config, default_conv, default_nnw
from .mask import mask_param_default
from . import DF_pixel_scale, DF_raw_pixel_scale, DF_Gain
def Run_Detection(hdu_path,
obj_name,
band,
threshold=5,
work_dir='./',
config_path=None,
executable=None,
ZP_keyname='REFZP',
ZP=None,
pixel_scale=DF_pixel_scale,
ref_cat='APASSref.cat',
apass_dir=None,
sep_match=2,
**SE_kwargs):
"""
Run a first-step source detection with SExtractor. This step generates a SExtractor catalog
and segementation map for the cross-match and measurement in Match_Mask_Measure.
Magnitudes are converted using the zero-point stored in the header ('ZP_keyname'). If not
stored in the header, it will try to compute the zero-point by cross-match with the APASS
catalog. In this case, the directory to the APASS catalogs is needed ('apass_dir'). If a
reference catalog already exists, it can be provided ('ref_cat') to save time.
Parameters
----------
hdu_path : str
Full path of hdu data
obj_name : str
Object name
band : str, ‘G’, ‘g’, ‘R’, ‘r’
Filter name
threshold : int, optional, default 5
Detection and analysis threshold of SExtractor
work_dir : str, optional, default current directory
Full path of directory for saving
config_path : str, optional, None
Full path of configuration file of running SExtractor.
By default it uses the one stored in configs/
executable : str, optional, None
Full path of the SExtractor executable. If SExtractor is installed
this can be obtained by typing '$which source-extractor' or
'$which sex' in the shell.
By default it will searched with an attempt.
ZP_keyname : str, optional, default REFZP
Keyword names of zero point in the header.
If not found, a value can be passed by ZP.
ZP : float or None, optional, default None
Zero point value. If None, it finds ZP_keyname in the header.
If not provided either, it will compute a zero point by
cross-match with the APASS catalog.
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pixel.
ref_cat : str, optional, default 'APASSref.cat'
Full path file name of the APASS reference catalog.
If not found, it will generate a reference catalog.
apass_dir : str, optional, default None
Full path of the diectory of the APASS catalogs.
sep_match : float, optional, default 2
Maximum separation (in arcsec) for crossmatch with APASS.
Not used if ZP is given in the header.
Returns
-------
ZP: float
Zero point value from the header, or a crossmatch with APASS, or a user-input.
Notes
-----
SExtractor must be installed and the local executable path needs to be correct.
A configuration file can be passed by config_path than default, but parameters can be
overwritten by passing them as kwargs, e.g. (note SExtractor keywords are in capital):
Run_Detection(..., DETECT_THRESH=10)
will override threshold.
"""
from .detection import run as run_sextractor
from .io import update_SE_kwargs, get_SExtractor_path
logger.info(f"Run SExtractor on {hdu_path}...")
check_save_path(work_dir, overwrite=True, verbose=False)
band = band.lower()
segname = os.path.join(work_dir, f'{obj_name}-{band}_seg.fits')
catname = os.path.join(work_dir, f'{obj_name}-{band}.cat')
header = fits.getheader(hdu_path)
if config_path is None: config_path = default_SE_config
if executable is None: executable = get_SExtractor_path()
SE_extra_params = ['NUMBER','X_WORLD','Y_WORLD','FLUXERR_AUTO','MAG_AUTO',
'MU_MAX','CLASS_STAR','ELLIPTICITY']
# Find zero-point in the fits header
if ZP_keyname not in header.keys():
logger.warning("ZP_keyname is not found in the header")
# If not in the header, check kwargs
if type(ZP) is not float:
# If not available in kwargs, compute by crossmatch with refcat
try:
from dfreduce.utils.catalogues import (match_catalogues, load_apass_in_region)
except ImportError:
msg = "Crossmatch is currently not available because dfreduce is not installed. A ZP_keyname is required in the header."
logger.error(msg)
sys.exit()
logger.info("Compute zero-point from crossmatch with APASS catalog...")
# alias for CDELT and CD
for axis in [1, 2]:
cd = 'CD{0}_{1}'.format(axis, axis)
if cd not in header.keys():
header[cd] = header['PC{0}_{1}'.format(axis, axis)]
# Run sextractor with free zero-point
SE_catalog = run_sextractor(hdu_path,
extra_params=SE_extra_params,
config_path=config_path,
catalog_path=catname,
executable=executable,
DETECT_THRESH=10, ANALYSIS_THRESH=10,
PIXEL_SCALE=pixel_scale,
FILTER_NAME=default_conv,
STARNNW_NAME=default_nnw)
# Load (APASS) reference catalog
ref_cat = os.path.join(work_dir, "{0}.{1}".format(*os.path.basename(ref_cat).rsplit('.', 1)))
if os.path.exists(ref_cat):
refcat = Table.read(ref_cat, format='ascii')
else:
logger.info("Generate APASS reference catalog... It will take some time.")
ra_range = abs(header['NAXIS1'] * header['CD1_1'])
dec_range = abs(header['NAXIS2'] * header['CD2_2'])
maxra = header['CRVAL1'] - header['CRPIX1'] * header['CD1_1']
mindec = header['CRVAL2'] - header['CRPIX2'] * header['CD2_2']
minra = maxra - ra_range
maxdec = mindec + dec_range
bounds_cat = [mindec, maxdec, minra, maxra]
if apass_dir!=None:
if os.path.exists(apass_dir):
refcat = load_apass_in_region(apass_dir,
bounds=bounds_cat)
refcat.write(ref_cat, format='ascii')
else:
raise FileNotFoundError('APASS directory not available.')
# Crossmatch SE catalog with reference catalog
imagecat_match, refcat_match = match_catalogues(SE_catalog, refcat, band, sep_max=sep_match)
# Get the median ZP from the crossmatched catalog
ZP = np.median(refcat_match[band] - imagecat_match[band])
logger.info("Matched median zero-point = {:.3f}".format(ZP))
else:
ZP = np.float(header[ZP_keyname])
logger.info("Read zero-point from header : ZP = {:.3f}".format(ZP))
logger.info("Pixel scale = {:.2f}".format(pixel_scale))
logger.info("Detection threshold = {:.1f}".format(threshold))
SE_kwargs_update = {'DETECT_THRESH':threshold,
'ANALYSIS_THRESH':threshold,
'MAG_ZEROPOINT':ZP,
'PIXEL_SCALE':pixel_scale}
SE_kwargs = update_SE_kwargs(SE_kwargs, SE_kwargs_update)
SE_catalog = run_sextractor(hdu_path,
extra_params=SE_extra_params,
config_path=config_path,
catalog_path=catname,
executable=executable,
CHECKIMAGE_TYPE='SEGMENTATION',
CHECKIMAGE_NAME=segname, **SE_kwargs)
if not (os.path.isfile(catname)) & (os.path.isfile(segname)):
raise FileNotFoundError('SE catalog/segmentation not saved properly.')
logger.info(f"CATALOG saved as {catname}")
logger.info(f"SEGMENTATION saved as {segname}")
return ZP
def Match_Mask_Measure(hdu_path,
bounds_list,
obj_name,
band,
pixel_scale=DF_pixel_scale,
ZP=None,
bkg=None,
field_pad=50,
r_scale=12,
mag_limit=15,
mag_saturate=13.5,
width_ring=1.5,
width_cross=10,
draw=True,
save=True,
use_PS1_DR2=False,
work_dir='./'):
"""
Generate a series of files as preparations for the fitting.
The function completes by the following steps:
1) Identify bright extended sources empirically and mask them.
2) Crossmatch the SExtractor table with the PANSTARRS catalog.
3) Correct the catalogued magnitudes to the used filter.
4) Add saturated stars missing in the crossmatch by a correction.
5) Make mask maps for dim stars with empirical apertures enlarged from SExtractor.
6) Measure brightness in annuli around bright stars
The output files are saved in:
work_dir/obj_name/Measure-PS1 or work_dir/obj_name/Measure-PS2
Parameters
----------
hdu_path : str
Full path of hdu data.
bounds_list : 2D list / turple
List of boundaries of regions to be fit (Nx4).
[[X min, Y min, X max, Y max],[...],...]
obj_name : str
Object name.
band : str, 'g', 'G', 'r', 'R'
Filter name.
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pixel.
ZP : float or None, optional, default None
Zero point value (if None, read ZP from header).
bkg : float or None, optional, default None
Background estimated value (if None, read BACKVAL from header).
field_pad : int, optional, default 100
Padding size (in pix) of the field for crossmatch.
Only used if use_PS1_DR2=False
r_scale : int, optional, default 12
Radius (in pix) at which the brightness is measured.
Default is 30" for Dragonfly.
mag_limit : float, optional, default 15
Magnitude upper limit below which are measured.
mag_saturate : float, optional, default 13.5
Estimate of magnitude at which the image is saturated.
The exact value will be fit.
width_ring : float, optional, default 1.5
Half-width in arcsec of ring used to measure the scaling.
width_cross : float, optional, default 4 * 2.5
Half-width in arcsec of the spike mask when measuring the scaling.
draw : bool, optional, default True
Whether to draw diagnostic plots.
save : bool, optional, default True
Whether to save results.
use_PS1_DR2 : bool, optional, default False
Whether to use PANSTARRS DR2. Crossmatch with DR2 is done by MAST query,
which could easily fail if a field is too large (> 1 deg^2).
work_dir : str, optional, default current directory
Full path of directory for saving.
Returns
-------
None
None
"""
band = band.lower()
bounds_list = np.atleast_2d(bounds_list).astype(int)
##################################################
# Read and Display
##################################################
from .utils import crop_image, crop_catalog, background_stats
from astropy import wcs
# Read hdu
if not os.path.isfile(hdu_path):
msg = "Image does not exist. Check path."
logger.error(msg)
raise FileNotFoundError()
with fits.open(hdu_path) as hdul:
logger.info(f"Read Image: {hdu_path}")
data = hdul[0].data
header = hdul[0].header
wcs_data = wcs.WCS(header)
# Read output from SExtractor detection
SE_cat_full = Table.read(os.path.join(work_dir, f'{obj_name}-{band}.cat'), format="ascii.sextractor")
seg_map = fits.getdata(os.path.join(work_dir, f'{obj_name}-{band}_seg.fits'))
# Get ZP from header
if ZP is None: ZP = find_keyword_header(header, "ZP", raise_error=True)
# Get background from header or simple stats
bkg, std = background_stats(data, header, mask=(seg_map>0), bkg_keyname="BACKVAL")
# Convert SE measured flux into mag
flux = SE_cat_full["FLUX_AUTO"]
mag = -2.5 * np.ma.log10(flux).filled(flux[flux>0].min()) + ZP
SE_cat_full["MAG_AUTO"] = np.around(mag, 5)
field_bounds = [field_pad, field_pad,
data.shape[1]-field_pad,
data.shape[0]-field_pad]
if not use_PS1_DR2: logger.info("Match field %r with catalog"%field_bounds)
logger.info("Measure Sky Patch [X min, Y min, X max, Y max] :")
[logger.info(" - Bounds: %r"%b) for b in bounds_list.tolist()]
# Display field_bounds and sub-regions to be matched
patch = crop_image(data, field_bounds,
sub_bounds=bounds_list,
seg_map=seg_map, draw=draw)
# Crop parent SE catalog
SE_cat = crop_catalog(SE_cat_full, field_bounds)
##################################################
# Crossmatch with Star Catalog (across the field)
##################################################
import astropy.units as u
from .utils import (identify_extended_source,
calculate_color_term,
add_supplementary_atlas,
add_supplementary_SE_star)
from .crossmatch import cross_match_PS1
# Identify bright extended sources and enlarge their mask
SE_cat_target, ext_cat, mag_saturate = identify_extended_source(SE_cat, draw=draw,
mag_limit=mag_limit,
mag_saturate=mag_saturate)
# Use PANSTARRS DR1 or DR2?
if use_PS1_DR2:
mag_name = mag_name_cat = band+'MeanPSFMag'
bounds_crossmatch = bounds_list
dir_name = os.path.join(work_dir, 'Measure-PS2/')
else:
mag_name = band+'mag'
mag_name_cat = mag_name+'_PS'
bounds_crossmatch = field_bounds
dir_name = os.path.join(work_dir, 'Measure-PS1/')
# Crossmatch with PANSTRRS mag < mag_limit
tab_target, tab_target_full, catalog_star = \
cross_match_PS1(band, wcs_data,
SE_cat_target,
bounds_crossmatch,
pixel_scale=pixel_scale,
sep=DF_pixel_scale,
mag_limit=mag_limit,
use_PS1_DR2=use_PS1_DR2)
# Calculate color correction between PANSTARRS and DF filter
CT = calculate_color_term(tab_target_full, mag_range=[mag_saturate,18],
mag_name=mag_name_cat, draw=draw)
catalog_star["MAG_AUTO_corr"] = catalog_star[mag_name] + CT # corrected MAG_AUTO
tab_target["MAG_AUTO_corr"] = tab_target[mag_name_cat] + CT
# Mannually add stars missed in the crossmatch or w/ weird mag to table
tab_target = add_supplementary_SE_star(tab_target, SE_cat_target,
mag_saturate, mag_limit, draw=draw)
##################################################
# Save matched table and catalog
##################################################
if save:
check_save_path(dir_name, overwrite=True, verbose=False)
tab_target_name = os.path.join(dir_name,
'%s-catalog_match_%smag%d.txt'%(obj_name, band, mag_limit))
tab_target.write(tab_target_name,
overwrite=True, format='ascii')
catalog_star_name = os.path.join(dir_name,
f'{obj_name}-catalog_PS_{band}_all.txt')
catalog_star.write(catalog_star_name,
overwrite=True, format='ascii')
logger.info(f"Saved PANSTARRS catalog & matched sources in {dir_name}")
##################################################
# Build Mask & Measure Scaling (in selected patch)
##################################################
from .utils import (fit_empirical_aperture,
make_segm_from_catalog,
measure_Rnorm_all,
make_global_stack_PSF)
from .plotting import plot_bright_star_profile
# Empirical enlarged aperture size from magnitude based on matched SE detection
estimate_radius = fit_empirical_aperture(tab_target_full, seg_map,
mag_name=mag_name_cat,
mag_range=[10,22], K=2,
R_max=int(200/pixel_scale),
degree=2, draw=draw)
for bounds in bounds_list:
# Catalog bound slightly wider than the region
catalog_bounds = (bounds[0]-50, bounds[1]-50,
bounds[2]+50, bounds[3]+50)
# Crop the star catalog and matched SE catalog
catalog_star_patch = crop_catalog(catalog_star, catalog_bounds,
sortby=mag_name,
keys=("X_CATALOG", "Y_CATALOG"))
tab_target_patch = crop_catalog(tab_target, catalog_bounds,
sortby=mag_name_cat,
keys=("X_IMAGE", "Y_IMAGE"))
# Make segmentation map from catalog based on SE seg map of one band
seg_map_c = make_segm_from_catalog(catalog_star_patch,
bounds,
estimate_radius,
mag_name=mag_name,
obj_name=obj_name,
band=band,
ext_cat=ext_cat,
draw=draw,
save=save,
dir_name=dir_name)
tab_norm, res_thumb = measure_Rnorm_all(tab_target_patch, bounds,
wcs_data, data, seg_map,
mag_limit=mag_limit,
mag_saturate=mag_saturate,
r_scale=r_scale,
width_cross=(width_cross/pixel_scale),
width_ring=(width_ring/pixel_scale),
obj_name=obj_name,
mag_name=mag_name_cat,
save=save, dir_name=dir_name)
if draw:
plot_bright_star_profile(tab_target_patch,
tab_norm, res_thumb,
bkg_sky=bkg, std_sky=std, ZP=ZP,
pixel_scale=pixel_scale)
make_global_stack_PSF(dir_name, bounds_list, obj_name, band)
def Run_PSF_Fitting(hdu_path,
bounds_list,
obj_name,
band,
pixel_scale=DF_pixel_scale,
ZP=None,
bkg=None,
G_eff=None,
pad=50,
r_scale=12,
mag_limit=15,
mag_threshold=[13.5,11.],
mask_param=mask_param_default,
resampling_factor=1,
n_spline=3,
cutoff=True,
n_cutoff=4,
theta_cutoff=1200,
core_param={"frac":0.3, "beta":6.},
theta_0=5,
n0_=None,
fit_n0=True,
fit_n0_range=[20,40],
theta0_range=[30,300],
fix_n0=False,
fit_sigma=True,
fit_frac=False,
leg2d=False,
draw_real=True,
brightest_only=False,
parallel=True,
n_cpu=None,
nlive_init=None,
sample_method='auto',
print_progress=True,
draw=True,
save=True,
stop=False,
clean_measure=True,
use_PS1_DR2=False,
work_dir='./'):
"""
Run the wide-angle PSF fitting.
Parameters
----------
hdu_path : str
Full path of hdu data
bounds_list : 2D int list / turple
List of boundaries of regions to be fit (Nx4)
[[X min, Y min, X max, Y max],[...],...]
obj_name : str
Object name
band : str, 'g', 'G', 'r', 'R'
Filter name
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pixel
ZP : float or None, optional, default None
Zero point value (if None, read ZP from header)
bkg : float or None, optional, default None
Background estimated value (if None, read BACKVAL from header)
G_eff : float or None (default)
Effective gain (e-/ADU)
pad : int, optional, default 50
Padding size of the field for fitting
r_scale : int, optional, default 12
Radius (in pix) at which the brightness is measured.
Default is 30" for Dragonfly.
mag_limit : float, optional, default 15
Magnitude upper limit below which are measured
mag_threshold : [float, float], default: [14, 11]
Magnitude theresholds to classify faint stars, medium bright stars and
very bright stars. The conversion from brightness is using a static PSF.
mask_param: dict, optional
Parameters setting up the mask map.
See doc string of image.make_mask for details.
n_spline : int, optional, default 3
Number of power-law component for the aureole models.
The speed goes down as n_spline goes up. Default is 3.
cutoff : bool, optional, default True
If True, the aureole will be cutoff at theta_cutoff.
n_cutoff : float, optional, default 4
Cutoff slope for the aureole model.
Default is 4 for Dragonfly.
theta_cutoff : float, optional, default 1200
Cutoff range (in arcsec) for the aureole model.
Default is 20' for Dragonfly.
core_param: dict, optional
Initial estimate of parameters of the PSF core (not needed to be accurate).
The values will be fitted from stacked PSF.
"frac": fraction of aureole
"beta": moffat beta
"fwhm": moffat fwhm, in arcsec (optional)
theta_0 : float, optional, default 5
Flattened radius. Arbitrary but need to be small. in arcsec
n0_ : float, optional, default None
Power index of the first component, only used if fix_n0=True.
fit_n0 : bool, optional, default True
If True, fit n0 from profiles of bright stars before the Bayesian fitting.
fit_n0_range : 2-list, optional, default [20, 40]
Range for fitting n0 in arcsec
theta0_range : 2-list, optional, default [30, 300]
Range for fitting theta0 in arcsec
fix_n0 : bool, optional, default False
If True, n0 will be fixed to that value in the fitting.
Only set as True when n0 is known to be proper of for test purpose.
fit_sigma : bool, optional, default False
Whether to fit the background stddev.
If False, will use the estimated value.
fit_frac : bool, optional, default False
Whether to fit the fraction of the aureole.
If False, use the fiducial value in core_param.
leg2d : bool, optional, default False
If True, fit a varied background with 2D Legendre polynomial.
Currently only support 1st order.
draw_real : bool, optional, default True
Whether to draw very bright stars in real space.
Recommended to be turned on.
brightest_only : bool, optional, default False
Whether to draw very bright stars only.
If turned on the fitting will ignore medium bright stars.
parallel : bool, optional, default True
Whether to run drawing for medium bright stars in parallel.
n_cpu : int, optional, default None
Number of cpu used for fitting and/or drawing.
nlive_init : int, optional, default None
Number of initial live points in dynesty. If None will
use nlive_init = ndim*10.
sample_method : {'auto', 'unif', 'rwalk', 'rstagger', 'slice', 'rslice', 'hslice', callable}, optional, default is 'auto'
Samplimg method in dynesty. If 'auto', the method is 'unif' for ndim < 10,
'rwalk' for 10 <= ndim <= 20, 'slice' for ndim > 20.
print_progress : bool, optional, default True
Whether to turn on the progress bar of dynesty
draw : bool, optional, default True
Whether to draw diagnostic plots
save : bool, optional, default True
Whether to save results
clean_measure : bool, optional, default True
Whether to clean intermediate files for measurement
use_PS1_DR2 : bool, optional, default False
Whether to use PANSTARRS DR2.
Crossmatch with DR2 is done by MAST query, which might fail
if a field is too large (> 1 deg^2)
work_dir : str, optional, default current directory
Full Path of directory for saving
Returns
-------
samplers : list
A list of Sampler class which contains fitting results.
"""
band = band.lower()
bounds_list = np.atleast_2d(bounds_list).astype(int)
# Set up directory names
plot_dir = os.path.join(work_dir, 'plot')
check_save_path(plot_dir, overwrite=True, verbose=False)
if use_PS1_DR2:
dir_measure = os.path.join(work_dir, 'Measure-PS2/')
else:
dir_measure = os.path.join(work_dir, 'Measure-PS1/')
# option for running on resampled image
from .utils import process_resampling
hdu_path, bounds_list = process_resampling(hdu_path, bounds_list,
obj_name, band,
pixel_scale=pixel_scale,
mag_limit=mag_limit,
r_scale=r_scale,
dir_measure=dir_measure,
work_dir=work_dir,
factor=resampling_factor)
if resampling_factor!=1:
obj_name += '_rp'
pixel_scale *= resampling_factor
r_scale /= resampling_factor
############################################
# Read Image and Table
############################################
# from . import DF_Gain
from .image import ImageList
from .utils import background_stats
# Read quantities from header
header = fits.getheader(hdu_path)
data = fits.getdata(hdu_path)
if ZP is None: ZP = find_keyword_header(header, "ZP")
if G_eff is None:
N_frames = find_keyword_header(header, "NFRAMES", default=1e5)
G_eff = DF_Gain * N_frames
if N_frames==1e5:
logger.info("No effective Gain is given. Use sky noise.")
else:
logger.info("Effective Gain = %.3f"%G_eff)
# Get background from header or simple stats
seg_map = fits.getdata(os.path.join(work_dir, f'{obj_name}-{band}_seg.fits'))
bkg, std = background_stats(data, header, mask=(seg_map>0), bkg_keyname="BACKVAL")
# Construct Image List
DF_Images = ImageList(hdu_path, bounds_list,
obj_name, band,
pixel_scale=pixel_scale,
pad=pad, ZP=ZP, bkg=bkg, G_eff=G_eff)
# Read faint stars info and brightness measurement
DF_Images.read_measurement_tables(dir_measure,
r_scale=r_scale,
mag_limit=mag_limit)
############################################
# Setup Stars
############################################
from .utils import assign_star_props
# class for bright stars and all stars
stars_b, stars_all = DF_Images.assign_star_props(r_scale=r_scale,
mag_threshold=mag_threshold,
verbose=True, draw=False,
save=save, save_dir=plot_dir)
############################################
# Masking
############################################
from .mask import Mask
# Mask faint and centers of bright stars
mask_param_default.update(mask_param)
DF_Images.make_mask(stars_b, dir_measure, mask_param=mask_param_default,
draw=draw, save=save, save_dir=plot_dir)
# Collect stars for fit. Choose if only use brightest stars
if brightest_only:
stars = [s.use_verybright() for s in DF_Images.stars]
else:
stars = DF_Images.stars # for fit
############################################
# Estimate Background & Fit n0
############################################
DF_Images.estimate_bkg()
if fix_n0:
DF_Images.n0_ = n0 = n0_ # fixed value
DF_Images.fix_n0 = True # fix n0 as the input value
else:
DF_Images.fit_n0(dir_measure,
pixel_scale=pixel_scale,
fit_range=fit_n0_range,
mag_max=13.5, mag_limit=mag_limit,
r_scale=r_scale, sky_std=std,
draw=draw, save=save,
save_dir=plot_dir)
DF_Images.fix_n0 = fit_n0 # if use prefit value, also fix n0
n0 = np.median(DF_Images.n0) # initial guess
############################################
# Setup PSF and Fit the Core
############################################
from .utils import (make_psf_2D, montage_psf_image)
## PSF Parameters ##
n_s = np.array([n0, 2.5]) # initial guess of power index
theta_s = np.array([theta_0, 10**2.])
# initial guess of transition radius in arcsec
# Core parameters, will be fitted
frac, beta = [core_param.get(prop) for prop in ["frac", "beta"]]
fwhm = core_param.get("fwhm", DF_Images.fwhm)
cutoff_param = dict(cutoff=cutoff, n_c=n_cutoff, theta_c=theta_cutoff)
# Make 2D PSF and a PSF Model ('psf')
image_psf, psf = make_psf_2D(n_s=n_s, theta_s=theta_s,
frac=frac, beta=beta, fwhm=fwhm,
cutoff_param=cutoff_param,
pixel_scale=pixel_scale,
psf_range=theta_cutoff)
# Montage the core and the 1st model component
fn_psf_satck = os.path.join(dir_measure, f'{obj_name}-{band}-PSF_stack.fits')
psf_stack = fits.getdata(fn_psf_satck)
image_psf = montage_psf_image(psf_stack, image_psf, r=10)
# Fit and update core parameters
psf.fit_psf_core_1D(image_psf,
obj_name=obj_name, band=band,
save=save, save_dir=plot_dir)
############################################
# Set Basement Image
############################################
# Make fixed background of dim stars
DF_Images.make_base_image(psf.psf_star, stars_all, draw=False)
############################################
# Setup Priors and Likelihood Models for Fitting
############################################
DF_Images.set_container(psf, stars,
n_spline=n_spline,
theta0_range=theta0_range,
n_min=1.1, leg2d=leg2d,
parallel=parallel,
draw_real=draw_real,
fit_sigma=fit_sigma,
fit_frac=fit_frac,
brightest_only=brightest_only,
method=sample_method,
verbose=True)
## (a stop for inspection/developer)
if stop:
print('Stop for sanity check... Does everything look good?')
return DF_Images, psf, stars
############################################
# Run Sampling
############################################
from .sampler import Sampler
from .io import DateToday, AsciiUpper
samplers = []
for i, reg in enumerate(AsciiUpper(DF_Images.N_Image)):
ct = DF_Images.containers[i]
ndim = ct.ndim
s = Sampler(ct, n_cpu=n_cpu, sample_method=sample_method)
if nlive_init is None: nlive_init = ndim*10
# Run fitting
s.run_fitting(nlive_init=nlive_init,
nlive_batch=5*ndim+5, maxbatch=2,
print_progress=print_progress)
if save:
# Save outputs
core_param = {"frac":psf.frac, "fwhm":fwhm, "beta":psf.beta}
s.fit_info = {'obj_name':obj_name,
'band':band,
'date':DateToday(),
'n_spline':n_spline,
'bounds':bounds_list[i],
'pixel_scale':pixel_scale,
'r_scale':r_scale,
'core_param':core_param,
'fit_n0':fit_n0}
if cutoff:
s.fit_info.update(cutoff_param)
suffix = str(n_spline)+'p'
if leg2d: suffix+='l'
if fit_frac: suffix+='f'
if brightest_only: suffix += 'b'
if use_PS1_DR2: suffix += '_ps2'
if sample_method=='mle': suffix+='_mle'
Xmin, Ymin, Xmax, Ymax = bounds_list[i]
range_str = f'X[{Xmin}-{Xmax}]Y[{Ymin}-{Ymax}]'
fname = f'{obj_name}-{band}-{reg}-{range_str}-fit{suffix}.res'
s.save_results(fname, save_dir=work_dir)
stars[i].save(f'{obj_name}-{band}-{reg}-{range_str}-stars', save_dir=work_dir)
############################################
# Plot Results
############################################
from .plotting import AsinhNorm
suffix = str(n_spline)+'p'+'_'+obj_name
# Generate bright star model with the PSF
s.generate_fit(psf, stars[i], image_base=DF_Images[i].image_base)
if draw:
r_core = mask_param['r_core']
s.cornerplot(figsize=(18, 16),
save=save, save_dir=plot_dir, suffix=suffix)
# Plot recovered PSF
s.plot_fit_PSF1D(psf, n_bootstrap=500, r_core=r_core,
save=save, save_dir=plot_dir, suffix=suffix)
# Calculate Chi^2
s.calculate_reduced_chi2(Gain=G_eff, dof=ndim)
# Draw 2D compaison
s.draw_comparison_2D(r_core=r_core, Gain=G_eff,
vmin=DF_Images.bkg-s.bkg_std_fit,
vmax=DF_Images.bkg+20*s.bkg_std_fit,
save=save, save_dir=plot_dir, suffix=suffix)
if leg2d:
# Draw background
s.draw_background(save=save, save_dir=plot_dir,
suffix=suffix)
# Append the sampler
samplers += [s]
# Delete Stars to avoid pickling error in rerun
clean_pickling_object('stars')
# Clean intermediate outputs of each region for measurement
if clean_measure:
for file in Path(dir_measure).glob('*X*Y*'):
os.remove(file)
return samplers
class berry:
"""
Fruit of elderflower.
(A wrapper for running the functions.)
Parameters
----------
hdu_path : str
path of hdu data
bounds_list : list [[X min, Y min, X max, Y max],[...],...]
list of boundaries of regions to be fit (Nx4)
obj_name : str
object name
band : str, 'g', 'G', 'r', 'R'
filter name
work_dir : str, optional, default current directory
Full Path of directory for saving
config_file : yaml, optional, default None
configuration file which contains keyword arguments.
If None, use the default configuration file.
Example
-------
# Initialize the task
elder = berry(hdu_path, bounds, obj_name, 'g', work_dir, config_file)
# Check keyword parameters listed in the configuration:
elder.parameters
# Run detection
elder.detection()
# Run the task
elder.run()
"""
def __init__(self,
hdu_path,
bounds_list,
obj_name,
band,
work_dir='./',
config_file=None):
self.hdu_path = hdu_path
self.bounds_list = bounds_list
self.obj_name = obj_name
self.band = band
with fits.open(hdu_path) as hdul:
self.data = hdul[0].data
self.header = hdul[0].header
hdul.close()
self.work_dir = work_dir
self.config = config_file
from .io import config_kwargs, default_config
if config_file is None: config_file = default_config
self.config_func = partial(config_kwargs, config_file=config_file)
@property
def parameters(self):
""" Keyword parameter list in the configuration file """
@self.config_func
def _kwargs(**kwargs):
return kwargs
return _kwargs()
def detection(self, **kwargs):
""" Run the source detection. """
self.ZP = Run_Detection(self.hdu_path,
self.obj_name, self.band,
work_dir=self.work_dir,
FILTER_NAME=default_conv,
STARNNW_NAME=default_nnw, **kwargs)
def run(self, **kwargs):
""" Run the task (Match_Mask_Measure + Run_PSF_Fitting). """
@self.config_func
def _run(func, **kwargs):
argnames = func.__code__.co_varnames[:func.__code__.co_argcount]
keys = set(kwargs.keys()).intersection(argnames)
pars = {key: kwargs[key] for key in keys}
return func(self.hdu_path, self.bounds_list,
self.obj_name, self.band,
work_dir=self.work_dir, **pars)
_run(Match_Mask_Measure, **kwargs)
self.samplers = _run(Run_PSF_Fitting, **kwargs)
| 2.109375
| 2
|
tmp/httprunner/debugtalk2.py
|
hanzhichao/runnerz
| 0
|
12784304
|
<gh_stars>0
def add(a,b):
return a+b
def setup(request):
print('setup')
print(request)
def teardown(response):
print('teardown')
# print(response.resp_obj.text) # 原始请求文本
| 2.171875
| 2
|
0Leetcode Solutions/0053 Maximum Subarray.py
|
DonghaoQiao/Python
| 1
|
12784305
|
<reponame>DonghaoQiao/Python
'''
https://leetcode.com/problems/maximum-subarray/
53. Maximum Subarray
Easy
Given an integer array nums, find the contiguous subarray (containing at least one number)
which has the largest sum and return its sum.
Example:
Input: [-2,1,-3,4,-1,2,1,-5,4],
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
Follow up:
If you have figured out the O(n) solution, try coding another solution using the divide
and conquer approach, which is more subtle.
'''
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
for i in range(1,len(nums)):
if nums[i-1]>0:
nums[i]+=nums[i-1]
return max(nums)
print(Solution().maxSubArray([-2,1,-3,4,-1,2,1,-5,4]))
| 4.03125
| 4
|
src/optimizer.py
|
mirefek/GeoModelBuilder
| 0
|
12784306
|
<reponame>mirefek/GeoModelBuilder<filename>src/optimizer.py
from abc import ABC, abstractmethod
import math
import pdb
import collections
import itertools
import tensorflow.compat.v1 as tf
import random
from tqdm import tqdm
from instruction import *
from primitives import Line, Point, Circle, Num
from util import is_number, FuncInfo
# Also stores the points used to compute it
LineSF = collections.namedtuple("LineSF", ["a", "b", "c", "p1", "p2"])
CircleNF = collections.namedtuple("CircleNF", ["center", "radius"])
# by convention, n is a unit vector located in the upper-half plane
LineNF = collections.namedtuple("LineNF", ["n", "r"])
class Optimizer(ABC):
def __init__(self, instructions, opts, unnamed_points, unnamed_lines, unnamed_circles, segments, seg_colors):
self.losses = dict()
self.has_loss = False
self.opts = opts
self.verbosity = opts['verbosity']
self.instructions = instructions
self.ndgs = dict()
self.goals = dict()
self.all_points = list()
self.name2pt = dict()
self.name2line = dict()
self.name2circ = dict()
self.segments = segments
self.seg_colors = seg_colors
self.unnamed_points = unnamed_points
self.unnamed_lines = unnamed_lines
self.unnamed_circles = unnamed_circles
self.n_tries = opts['n_tries']
if opts['n_tries'] < opts['n_models']:
print("WARNING: n_tries should be at least as big as n_models")
self.n_tries = opts['n_models']
self.n_inits = opts['n_inits']
if opts['n_inits'] < opts['n_tries']:
print("WARNING: n_inits should be at least as big as n_tries")
self.n_inits = opts['n_tries']
super().__init__()
def preprocess(self):
process_instr_iter = self.instructions if self.verbosity < 0 else tqdm(self.instructions, desc="Processing instructions...")
# for i in self.instructions:
# for i in tqdm(self.instructions, desc="Processing instructions..."):
for i in process_instr_iter:
self.process_instruction(i)
# After we've processed the instructions, process all the unnamed things
self.unnamed_points = [self.lookup_pt(p) for p in self.unnamed_points]
self.unnamed_lines = [self.line2nf(l) for l in self.unnamed_lines]
self.unnamed_circles = [self.circ2nf(c) for c in self.unnamed_circles]
self.segments = [(self.lookup_pt(a), self.lookup_pt(b)) for (a, b) in self.segments]
def process_instruction(self, i):
if isinstance(i, Sample):
self.sample(i)
elif isinstance(i, Compute):
self.compute(i)
elif isinstance(i, Parameterize):
self.parameterize(i)
elif isinstance(i, Assert):
self.add(i)
elif isinstance(i, AssertNDG):
self.addNDG(i)
elif isinstance(i, Eval):
self.eval_cons(i)
else:
raise NotImplementedError("FIXME: Finish process_instruction")
@abstractmethod
def get_point(self, x, y):
pass
@abstractmethod
def simplify(self, p, method="all"):
pass
def lookup_pt(self, p, name=None):
if isinstance(p.val, str): # Base case
return self.name2pt[p]
if isinstance(p.val, FuncInfo):
head, args = p.val
if head == "__val__":
return args[0]
elif head == "midp": return self.midp(*self.lookup_pts(args))
elif head == "circumcenter": return self.circumcenter(*self.lookup_pts(args))
elif head == "orthocenter": return self.orthocenter(*self.lookup_pts(args))
elif head == "incenter": return self.incenter(*self.lookup_pts(args))
elif head == "centroid": return self.centroid(*self.lookup_pts(args))
elif head == "amidp-opp": return self.amidp_opp(*self.lookup_pts(args))
elif head == "amidp-same": return self.amidp_same(*self.lookup_pts(args))
elif head == "excenter": return self.excenter(*self.lookup_pts(args))
elif head == "foot":
X, l = args
foot_p = Point(FuncInfo("inter-ll", [l, Line(FuncInfo("perp-at", [X, l]))]))
return self.lookup_pt(foot_p)
elif head == "harmonic-conj": return self.harmonic_l_conj(*self.lookup_pts(args))
elif head == "origin":
assert(len(args) == 1)
circ = args[0]
cnf = self.circ2nf(circ)
return cnf.center
elif head == "inter-ll":
l1, l2 = args
lnf1 = self.line2nf(l1)
lnf2 = self.line2nf(l2)
return self.inter_ll(lnf1, lnf2)
elif head == "inter-lc":
l, c, root_select = args
lnf = self.line2nf(l)
cnf = self.circ2nf(c)
if name:
self.make_lc_intersect(name, lnf, cnf)
else:
# Use random name as point is unnamed, but we still want to force LC to intersect
rand_name = get_random_string(6)
self.make_lc_intersect(rand_name, lnf, cnf)
return self.inter_lc(lnf, cnf, root_select)
elif head == "inter-cc":
c1, c2, root_select = args
cnf1 = self.circ2nf(c1)
cnf2 = self.circ2nf(c2)
if name:
self.make_lc_intersect(name, self.radical_axis(cnf1, cnf2), cnf1)
else:
# Use random name as point is unnamed, but we still want to force LC to intersect
rand_name = get_random_string(6)
self.make_lc_intersect(rand_name, self.radical_axis(cnf1, cnf2), cnf1)
return self.inter_cc(cnf1, cnf2, root_select)
elif head == "isogonal-conj": return self.isogonal_conj(*self.lookup_pts(args))
elif head == "isotomic-conj": return self.isotomic_conj(*self.lookup_pts(args))
elif head == "inverse": return self.inverse(*self.lookup_pts(args))
elif head == "reflect-pl":
x, l = args
X = self.lookup_pt(x)
foot_X = self.lookup_pt(Point(FuncInfo("foot", [x, l])))
vec_X_to_l = foot_X - X
return X + vec_X_to_l.smul(2)
elif head == "midp-from": return self.midp_from(*self.lookup_pts(args))
elif head == "mixtilinear-incenter": return self.mixtilinear_incenter(*self.lookup_pts(args))
else:
raise NotImplementedError(f"[lookup_pt] Unsupported head {head}")
else:
raise RuntimeError("Invalid point type")
def lookup_pts(self, ps):
p_vals = list()
for p in ps:
p_vals.append(self.lookup_pt(p))
return p_vals
def eval_num(self, n_info):
n_val = n_info.val
if not isinstance(n_val, tuple) and is_number(n_val):
return self.const(n_val)
n_pred = n_val[0]
n_args = n_val[1]
if n_pred == "dist":
p1, p2 = self.lookup_pts(n_args)
return self.dist(p1, p2)
elif n_pred == "uangle":
p1, p2, p3 = self.lookup_pts(n_args)
return self.angle(p1, p2, p3)
elif n_pred == "area":
p1, p2, p3 = self.lookup_pts(n_args)
return self.area(p1, p2, p3)
elif n_pred == "radius":
circ = self.circ2nf(n_args[0])
return circ.radius
elif n_pred == "diam":
circ = self.circ2nf(n_args[0])
return 2 * circ.radius
elif n_pred in ["div", "add", "mul", "sub", "pow"]:
n1, n2 = [self.eval_num(n) for n in n_args]
if n_pred == "div":
return n1 / n2
elif n_pred == "add":
return n1 + n2
elif n_pred == "mul":
return n1 * n2
elif n_pred == "sub":
return n1 - n2
else: # pow
return n1 ** n2
elif n_pred == "neg":
n = self.eval_num(n_args[0])
return -n
elif n_pred == "sqrt":
n = self.eval_num(n_args[0])
return self.sqrt(n)
else:
raise NotImplementedError(f"[eval_num] Unsupported pred {n_pred}")
@abstractmethod
def mkvar(self, name, shape=[], lo=-1.0, hi=1.0, trainable=None):
pass
@abstractmethod
def mk_normal_var(self, name, shape=[], mean=0.0, std=1.0, trainable=None):
pass
@abstractmethod
def register_pt(self, p, P, save_name=True):
pass
@abstractmethod
def register_line(self, l, L):
pass
@abstractmethod
def register_circ(self, c, C):
pass
@abstractmethod
def register_loss(self, key, var, weight=1.0, requires_train=True):
pass
@abstractmethod
def register_ndg(self, key, var, weight=1.0):
pass
@abstractmethod
def register_goal(self, key, var, negate):
pass
@abstractmethod
def regularize_points(self):
pass
@abstractmethod
def make_points_distinct(self):
pass
# FIXME: The below should be combined with an abstract Point class
#####################
## Math Utilities
####################
@abstractmethod
def sum(self, xs):
pass
@abstractmethod
def sqrt(self, x):
pass
@abstractmethod
def sin(self, x):
pass
@abstractmethod
def cos(self, x):
pass
@abstractmethod
def asin(self, x):
pass
@abstractmethod
def acos(self, x):
pass
@abstractmethod
def tanh(self, x):
pass
@abstractmethod
def atan2(self, x, y):
pass
@abstractmethod
def sigmoid(self, x):
pass
@abstractmethod
def const(self, x):
pass
@abstractmethod
def max(self, x, y):
pass
@abstractmethod
def min(self, x, y):
pass
@abstractmethod
def cond(self, cond, t_lam, f_lam):
pass
@abstractmethod
def lt(self, x, y):
pass
@abstractmethod
def lte(self, x, y):
pass
@abstractmethod
def gt(self, x, y):
pass
@abstractmethod
def gte(self, x, y):
pass
@abstractmethod
def eq(self, x, y):
pass
@abstractmethod
def logical_or(self, x, y):
pass
@abstractmethod
def abs(self, x):
pass
@abstractmethod
def exp(self, x):
pass
def softmax(self, xs):
exps = [self.exp(x) for x in xs]
sum_exps = self.sum(exps)
return [e / sum_exps for e in exps]
#####################
## Sample
####################
def sample(self, i):
s_method = i.sampler
s_args = i.args
if s_method == "acute-iso-tri": self.sample_triangle(i.points, iso=s_args[0], acute=True)
elif s_method == "acute-tri": self.sample_triangle(i.points, acute=True)
elif s_method == "equi-tri": self.sample_triangle(i.points, equi=True)
elif s_method == "iso-tri": self.sample_triangle(i.points, iso=s_args[0])
elif s_method == "polygon": self.sample_polygon(i.points)
elif s_method == "right-tri": self.sample_triangle(i.points, right=s_args[0])
elif s_method == "triangle": self.sample_triangle(i.points)
else: raise NotImplementedError(f"[sample] NYI: Sampling method {s_method}")
def sample_uniform(self, p, lo=-1.0, hi=1.0, save_name=True):
P = self.get_point(x=self.mkvar(str(p)+"x", lo=lo, hi=hi),
y=self.mkvar(str(p)+"y", lo=lo, hi=hi))
self.register_pt(p, P, save_name=save_name)
return P
def sample_polygon(self, ps):
if len(ps) < 4:
if self.verbosity > 0:
print("WARNING: sample_polygon expecting >3 points")
angle_zs = [self.mkvar(name=f"polygon_angle_zs_{i}", lo=-2.0, hi=2.0) for i in range(len(ps))]
multiplicand = ((len(ps) - 2) / len(ps)) * math.pi
angles = [multiplicand + (math.pi / 3) * self.tanh(0.2 * az) for az in angle_zs]
scale_zs = [self.mkvar(name=f"polygon_scale_zs_{i}", lo=-2.0, hi=2.0) for i in range(len(ps))]
scales = [0.5 * self.tanh(0.2 * sz) for sz in scale_zs]
Ps = [self.get_point(self.const(-2.0), self.const(0.0)),
self.get_point(self.const(2.0), self.const(0.0))]
s = self.dist(Ps[0], Ps[1])
for i in range(2, len(ps) + 1):
# print(f"sampling polygon point: {i}")
A, B = Ps[-2:]
X = B + self.rotate_counterclockwise(-angles[i-1], A - B)
P = B + (X - B).smul(s * (1 + scales[i-1]) / self.dist(X, B))
# Ps.append(P)
Ps.append(self.simplify(P, method="trig"))
# Angles should sum to (n-2) * pi
angle_sum = self.sum(angles)
expected_angle_sum = math.pi * (len(ps) - 2)
self.register_loss("polygon-angle-sum", angle_sum - expected_angle_sum, weight=1e-1)
# First point shoudl equal the last point
self.register_loss("polygon-first-eq-last", self.dist(Ps[0], Ps[len(ps)]), weight=1e-2)
# First angle should be the one sampled (known to be <180)
self.register_loss("polygon-first-angle-eq-sampled",
angles[0] - self.angle(Ps[-1], Ps[0], Ps[1]),
weight=1e-2)
# for i in range(len(ps)):
# self.segments.append((Ps[i], Ps[(i+1) % (len(ps))]))
for p, P in zip(ps, Ps[:-1]):
self.register_pt(p, P)
def sample_triangle_on_unit_circ(self, ps):
[nA, nB, nC] = ps
theta_a = self.mk_normal_var(f"{nA}_unit_angle", mean=(2*math.pi) * (1 / 3), std=(2*math.pi) * (1 / 8))
theta_b = self.mk_normal_var(f"{nB}_unit_angle", mean=(2*math.pi) * (2 / 3), std=(2*math.pi) * (1 / 8))
theta_c = self.mk_normal_var(f"{nC}_unit_angle", mean=(2*math.pi) * (3 / 3), std=(2*math.pi) * (1 / 8))
A = self.get_point(self.cos(theta_a), self.sin(theta_a))
B = self.get_point(self.cos(theta_b), self.sin(theta_b))
C = self.get_point(self.cos(theta_c), self.sin(theta_c))
# randomly rotate them to get diversity
if random.random() < (1 / 3):
(C, A, B) = (A, B, C)
elif random.random() < (1 / 3):
(B, C, A) = (A, B, C)
self.register_pt(nA, A)
self.register_pt(nB, B)
self.register_pt(nC, C)
def sample_triangle(self, ps, iso=None, right=None, acute=False, equi=False):
if not (iso or right or acute or equi):
# return self.sample_polygon(ps)
return self.sample_triangle_on_unit_circ(ps)
[nA, nB, nC] = ps
B = self.get_point(self.const(-2.0), self.const(0.0))
C = self.get_point(self.const(2.0), self.const(0.0))
if iso is not None or equi:
Ax = self.const(0)
else:
Ax = self.mkvar("tri_x", lo=-1.2, hi=1.2, trainable=False)
if right is not None:
Ay = self.sqrt(4 - (Ax ** 2))
elif equi:
Ay = 2 * self.sqrt(self.const(3.0))
else:
AyLo = 1.1 if acute else 0.4
z = self.mkvar("tri")
Ay = self.const(AyLo) + 3.0 * self.sigmoid(z)
A = self.get_point(Ax, Ay)
# Shuffle if the isosceles vertex was not C
if iso == nB or right == nB:
(A, B, C) = (B, A, C)
elif iso == nC or right == nC:
(A, B, C) = (C, B, A)
self.register_pt(nA, A)
self.register_pt(nB, B)
self.register_pt(nC, C)
# self.segments.extend([(A, B), (B, C), (C, A)])
#####################
## Compute
####################
def compute(self, i):
obj_name = i.obj_name
c_method = i.computation.val[0]
c_args = i.computation.val
if isinstance(i.computation, Point):
P = self.lookup_pt(i.computation, str(obj_name))
self.register_pt(obj_name, P)
elif isinstance(i.computation, Line):
L = self.line2nf(i.computation)
self.register_line(obj_name, L)
elif isinstance(i.computation, Circle):
C = self.circ2nf(i.computation)
self.register_circ(obj_name, C)
else:
raise NotImplementedError(f"[compute] NYI: {c_method} not supported")
#####################
## Parameterize
####################
def parameterize(self, i):
p_name = i.obj_name
p_method = i.parameterization[0]
p_args = i.parameterization
param_method = i.parameterization
if p_method == "coords": self.parameterize_coords(p_name)
elif p_method == "in-poly": self.parameterize_in_poly(p_name, p_args[1])
elif p_method == "on-circ": self.parameterize_on_circ(p_name, p_args[1])
elif p_method == "on-line": self.parameterize_on_line(p_name, p_args[1])
elif p_method == "on-ray": self.parameterize_on_ray(p_name, p_args[1])
elif p_method == "on-ray-opp": self.parameterize_on_ray_opp(p_name, p_args[1])
elif p_method == "on-seg": self.parameterize_on_seg(p_name, p_args[1])
elif p_method == "on-minor-arc": self.parameterize_on_minor_arc(p_name, p_args[1])
elif p_method == "on-major-arc": self.parameterize_on_major_arc(p_name, p_args[1])
elif p_method == "line": self.parameterize_line(p_name)
elif p_method == "through-l": self.parameterize_line_through(p_name, p_args[1])
elif p_method == "tangent-lc": self.parameterize_line_tangentC(p_name, p_args[1])
elif p_method == "tangent-cc": self.parameterize_circ_tangentC(p_name, p_args[1])
elif p_method == "tangent-cl": self.parameterize_circ_tangentL(p_name, p_args[1])
elif p_method == "through-c": self.parameterize_circ_through(p_name, p_args[1])
elif p_method == "circle": self.parameterize_circ(p_name)
elif p_method == "origin": self.parameterize_circ_centered_at(p_name, p_args[1])
elif p_method == "radius": self.parameterize_circ_with_radius(p_name, p_args[1])
else: raise NotImplementedError(f"FIXME: Finish parameterize: {i}")
def parameterize_coords(self, p):
return self.sample_uniform(p)
def parameterize_line(self, l):
p1 = Point(l.val + "_p1")
p2 = Point(l.val + "_p2")
P1 = self.sample_uniform(p1, save_name=False)
P2 = self.sample_uniform(p2, save_name=False)
return self.register_line(l, self.pp2lnf(P1, P2))
def parameterize_line_through(self, l, ps):
[through_p] = ps
through_p = self.lookup_pt(through_p)
p2 = Point(l.val + "_p2")
P2 = self.sample_uniform(p2, save_name=False)
return self.register_line(l, self.pp2lnf(through_p, P2))
def parameterize_line_tangentC(self, l, args):
[c] = args
P1 = self.parameterize_on_circ(Point(f"p1"), [c], save_name=False)
P1 = Point(FuncInfo('__val__', [P1]))
L = self.line2nf(Line(FuncInfo("perp-at", [P1, Point(FuncInfo("origin", [c])), P1])))
return self.register_line(l, L)
def parameterize_circ(self, c):
o = Point(c.val + "_origin")
O = self.sample_uniform(o, save_name=False)
circ_nf = CircleNF(center=O, radius=self.mkvar(name=f"{c.val}_origin", lo=0.25, hi=3.0))
return self.register_circ(c, circ_nf)
def parameterize_circ_centered_at(self, c, ps):
[origin] = ps
origin = self.lookup_pt(origin)
circ_nf = CircleNF(center=origin, radius=self.mkvar(name=f"{c.val}_origin", lo=0.25, hi=3.0))
return self.register_circ(c, circ_nf)
def parameterize_circ_through(self, c, ps):
[through_p] = ps
through_p = self.lookup_pt(through_p)
o = Point("origin")
O = self.sample_uniform(o, save_name=False)
radius = self.dist(through_p, O)
circ_nf = CircleNF(center=O, radius=radius)
return self.register_circ(c, circ_nf)
def parameterize_circ_with_radius(self, c, rs):
[radius] = rs
radius = self.eval_num(radius)
o = Point("origin")
O = self.sample_uniform(o, save_name=False)
circ_nf = CircleNF(center=O, radius=radius)
return self.register_circ(c, circ_nf)
def parameterize_circ_tangentC(self, c, args):
[circ2] = args
interP = self.parameterize_on_circ(Point(f"tangency_point"), [circ2], save_name=False)
interP = Point(FuncInfo('__val__', [interP]))
O = self.parameterize_on_line(Point(f"origin"),
[Line(FuncInfo("connecting",
[interP, Point(FuncInfo("origin", [circ2]))]))],
save_name=False)
C = CircleNF(center=O, radius=self.dist(O, interP.val.args[0]))
return self.register_circ(c, C)
def parameterize_circ_tangentL(self, c, args):
[l] = args
interP = self.parameterize_on_line(Point(f"tangency_point"), [l], save_name=False)
interP = Point(FuncInfo('__val__', [interP]))
O = self.parameterize_on_line(Point(f"origin"),
[Line(FuncInfo("perp-at",
[interP, l]))], save_name=False)
C = CircleNF(center=O, radius=self.dist(O, interP.val.args[0]))
return self.register_circ(c, C)
def parameterize_on_seg(self, p, ps):
A, B = self.lookup_pts(ps)
z = self.mkvar(name=f"{p}_seg", lo=-2, hi=2)
# z = 0.9 * z
self.register_loss(f"{p}_seg_regularization", z, weight=1e-4, requires_train=False)
# self.segments.append((A, B))
return self.register_pt(p, A + (B - A).smul(self.sigmoid(z)))
def parameterize_on_line(self, p, p_args, save_name=True):
[l] = p_args
A, B = self.lnf2pp(self.line2nf(l))
z = self.mkvar(name=f"{p}_line", lo=-2, hi=2)
# z = 0.2 * z
self.register_loss(f"{p}_line_regularization", z, weight=1e-4, requires_train=False)
# TODO: arbitrary and awkward. Better to sample "zones" first?
s = 10.0
P1 = A + (A - B).smul(s)
P2 = B + (B - A).smul(s)
# self.segments.append((A, B))
return self.register_pt(p, P1 + (P2 - P1).smul(self.sigmoid(z)), save_name=save_name)
def parameterize_on_ray(self, p, ps):
A, B = self.lookup_pts(ps)
z = self.mkvar(name=f"{p}_ray", hi=2.0)
P = A + (B - A).smul(self.exp(z))
# self.segments.extend([(A, B), (A, P)])
return self.register_pt(p, P)
def parameterize_on_ray_opp(self, p, ps):
A, B = self.lookup_pts(ps)
z = self.mkvar(f"{p}_ray_opp")
P = A + (A - B).smul(self.exp(z))
# self.segments.extend([(A, B), (A, P)])
return self.register_pt(p, P)
def parameterize_on_circ(self, p, p_args, save_name=True):
[circ] = p_args
O, r = self.circ2nf(circ)
rot = self.mkvar(name=f"{p}_rot")
theta = rot * 2 * self.const(math.pi)
X = self.get_point(x=O.x + r * self.cos(theta), y=O.y + r * self.sin(theta))
return self.register_pt(p, X, save_name=save_name)
# self.unnamed_circles.append((O, r))
def parameterize_on_minor_arc(self, p, p_args):
[circ, a, b] = p_args
base_str = f"{p}_minor_arc_{circ}"
A, B = self.lookup_pts([a, b])
O, r = self.circ2nf(circ)
# Enforce that A and B are on circ
loss_a = self.dist(O, A) - r
loss_b = self.dist(O, B) - r
self.register_loss(f"{base_str}_{a}_on_{circ}", loss_a)
self.register_loss(f"{base_str}_{b}_on_{circ}", loss_b)
z = self.mkvar(base_str, lo=0.1, hi=0.9)
aob = self.clockwise_angle(A, O, B)
boa = self.clockwise_angle(B, O, A)
anchor = self.cond(self.lt(aob, boa), lambda: B, lambda: A)
theta = self.min(aob, boa) * (-z)
Px = O.x + (anchor.x - O.x) * self.cos(theta) - (anchor.y - O.y) * self.sin(theta)
Py = O.y + (anchor.x - O.x) * self.sin(theta) + (anchor.y - O.y) * self.cos(theta)
P = self.get_point(Px, Py)
return self.register_pt(p, P)
def parameterize_on_major_arc(self, p, p_args):
[circ, a, b] = p_args
base_str = f"{p}_major_arc_{circ}"
A, B = self.lookup_pts([a, b])
O, r = self.circ2nf(circ)
# Enforce that A and B are on circ
loss_a = self.dist(O, A) - r
loss_b = self.dist(O, B) - r
self.register_loss(f"{base_str}_{a}_on_{circ}", loss_a)
self.register_loss(f"{base_str}_{b}_on_{circ}", loss_b)
z = self.mkvar(base_str, lo=0.1, hi=0.9)
aob = self.clockwise_angle(A, O, B)
boa = self.clockwise_angle(B, O, A)
anchor = self.cond(self.lt(aob, boa), lambda: A, lambda: B)
theta = (math.pi * 2 - self.min(aob, boa)) * (-z)
Px = O.x + (anchor.x - O.x) * self.cos(theta) - (anchor.y - O.y) * self.sin(theta)
Py = O.y + (anchor.x - O.x) * self.sin(theta) + (anchor.y - O.y) * self.cos(theta)
P = self.get_point(Px, Py)
return self.register_pt(p, P)
def parameterize_in_poly(self, p, ps):
Ps = self.lookup_pts(ps)
zs = [self.mkvar(name=f"{p}_in_poly_{poly_p}") for poly_p in ps]
ws = self.softmax(zs)
Px = self.sum([P.x * w for (P, w) in zip(Ps, ws)])
Py = self.sum([P.y * w for (P, w) in zip(Ps, ws)])
P = self.get_point(Px, Py)
return self.register_pt(p, P)
#####################
## Assert
####################
def add(self, assertion):
self.has_loss = True
cons = assertion.constraint
pred, args, negate = cons.pred, cons.args, cons.negate
if negate:
raise RuntimeError("[add] Mishandled negation")
vals = self.assertion_vals(pred, args)
a_str = f"{pred}_{'_'.join([str(a) for a in args])}"
weight = 1 / len(vals)
for i, val in enumerate(vals):
loss_str = a_str if len(vals) == 1 else f"{a_str}_{i}"
self.register_loss(loss_str, val, weight=weight)
def addNDG(self, ndg):
self.has_loss = True
ndg_cons = ndg.constraint
pred, args = ndg_cons.pred, ndg_cons.args
vals = self.assertion_vals(pred, args)
# We only have to violate one!
ndg_val = tf.reduce_max(vals) # Note how we reduce MAX because we are trying to make non-zero
ndg_str = f"not_{pred}_{'_'.join([str(a) for a in args])}"
self.register_ndg(ndg_str, ndg_val, weight=1.0)
"""
a_str = f"not_{pred}_{'_'.join([str(a) for a in args])}"
weight = 1 / len(vals)
for i, val in enumerate(vals):
ndg_str = a_str if len(vals) == 1 else f"{a_str}_{i}"
self.register_ndg(ndg_str, val, weight=weight)
"""
def eval_cons(self, goal):
goal_cons = goal.constraint
pred, args, negate = goal_cons.pred, goal_cons.args, goal_cons.negate
vals = self.assertion_vals(pred, args)
g_str = f"{pred}_{'_'.join([str(a) for a in args])}"
if negate:
g_str = f"not_{g_str}"
vals = [tf.reduce_max(vals)]
for i, val in enumerate(vals):
goal_str = g_str if len(vals) == 1 else f"{g_str}_{i}"
self.register_goal(goal_str, val, negate)
def assertion_vals(self, pred, args):
if pred == "amidp-opp":
M, B, C, A = self.lookup_pts(args)
return [self.dist(M, self.amidp_opp(B, C, A))]
elif pred == "amidp-same":
M, B, C, A = self.lookup_pts(args)
return [self.dist(M, self.amidp_same(B, C, A))]
# elif pred == "between": return self.between_gap(*self.lookup_pts(args))
elif pred == "circumcenter":
O, A, B, C = self.lookup_pts(args)
# self.unnamed_circles.append((O, self.dist(O, A)))
return [self.dist(O, self.circumcenter(A, B, C))]
elif pred == "coll":
coll_args = self.lookup_pts(args)
diffs = [self.coll_phi(A, B, C) for A, B, C in itertools.combinations(coll_args, 3)]
# for i in range(len(coll_args)-1):
# self.segments.append((coll_args[i], coll_args[i+1]))
return diffs
elif pred == "concur":
l1, l2, l3 = args
inter_12 = Point(FuncInfo("inter-ll", [l1, l2]))
return self.assertion_vals("on-line", [inter_12, l3])
elif pred == "cong":
A, B, C, D = self.lookup_pts(args)
# if A in [C, D]: self.unnamed_circles.append((A, self.dist(A, B)))
# elif B in [C, D]: self.unnamed_circles.append((B, self.dist(A, B)))
return [self.cong_diff(A, B, C, D)]
elif pred == "con-tri":
[A, B, C, P, Q, R] = self.lookup_pts(args)
# self.segments.extend([(A, B), (B, C), (C, A), (P, Q), (Q, R), (R, P)])
return [self.eqangle6_diff(A, B, C, P, Q, R),
self.eqangle6_diff(B, C, A, Q, R, P),
self.eqangle6_diff(C, A, B, R, P, Q),
self.cong_diff(A, B, P, Q),
self.cong_diff(A, C, P, R),
self.cong_diff(B, C, Q, R)]
elif pred == "cycl":
cycl_args = self.lookup_pts(args)
assert(len(cycl_args) > 3)
O = self.circumcenter(*cycl_args[:3])
diffs = [self.eqangle6_diff(A, B, D, A, C, D) for A, B, C, D in itertools.combinations(cycl_args, 4)]
# self.unnamed_circles.append((O, self.dist(O, cycl_args[0])))
return diffs
elif pred == "dist-lt":
X, Y, A, B = self.lookup_pts(args)
return [self.max(self.const(0.0), self.dist(X, Y) - dist(A, B))]
elif pred == "dist-gt":
X, Y, A, B = self.lookup_pts(args)
return [self.max(self.const(0.0), self.dist(A, B) - self.dist(X, Y))]
elif pred == "eq-n":
n1, n2 = [self.eval_num(n) for n in args]
return [self.abs(n1 - n2)]
elif pred == "eq-p":
A, B = self.lookup_pts(args)
return [self.dist(A, B)]
elif pred == "eq-l":
l1, l2 = args
lnf1, lnf2 = self.line2nf(l1), self.line2nf(l2)
n1, r1 = lnf1
n2, r2 = lnf2
return [self.dist(n1, n2), self.abs(r1 - r2)]
elif pred == "gte":
n1, n2 = [self.eval_num(n) for n in args]
return [self.max(self.const(0.0), n2 - n1)]
elif pred == "gt":
# n1 > n2
n1, n2 = [self.eval_num(n) for n in args]
return [self.max(self.const(0.0), (n2 + 1e-1) - n1)]
elif pred == "lte":
n1, n2 = [self.eval_num(n) for n in args]
return [self.max(self.const(0.0), n1 - n2)]
elif pred == "lt":
# n1 < n2
n1, n2 = [self.eval_num(n) for n in args]
return [self.max(self.const(0.0), (n1 + 1e-1) - n2)]
elif pred == "eq-angle": return [self.eqangle8_diff(*self.lookup_pts(args))]
# elif pred == "eqoangle":
# A, B, C, P, Q, R = self.lookup_pts(args)
# return [self.angle(A, B, C) - self.angle(P, Q, R)]
elif pred == "eq-ratio": return [self.eqratio_diff(*self.lookup_pts(args))]
elif pred == "foot":
f, x, l = args
F, X = self.lookup_pts([f, x])
lnf = self.line2nf(l)
A, B = self.lnf2pp(lnf)
return [self.coll_phi(F, A, B), self.perp_phi(F, X, A, B)]
elif pred == "i-bisector":
X, B, A, C = self.lookup_pts(args)
# self.segments.extend([(B, A), (A, X), (A, C)])
return [self.eqangle8_diff(B, A, A, X, X, A, A, C)]
elif pred == "incenter":
I, A, B, C = self.lookup_pts(args)
return [self.dist(I, self.incenter(A, B, C))]
elif pred == "in-poly": return self.in_poly_phis(*self.lookup_pts(args))
elif pred == "inter-ll":
X, A, B, C, D = self.lookup_pts(args)
return [self.coll_phi(X, A, B), self.coll_phi(X, C, D)]
elif pred == "isogonal-conj":
X, Y, A, B, C = self.lookup_pts(args)
return [self.dist(X, self.isogonal_conj(Y, A, B, C))]
elif pred == "midp":
M, A, B = self.lookup_pts(args)
return [self.dist(M, self.midp(A, B))]
elif pred == "on-circ":
X, C = args
[X] = self.lookup_pts([X])
(O, r) = self.circ2nf(C)
return [self.dist(O, X) - r]
elif pred == "on-line":
[X, l] = args
[X] = self.lookup_pts([X])
lp1, lp2 = self.line2twoPts(l)
return [self.coll_phi(X, lp1, lp2)]
elif pred == "on-ray": return [self.coll_phi(*self.lookup_pts(args))] + self.onray_gap(*self.lookup_pts(args))
elif pred == "on-seg": return [self.coll_phi(*self.lookup_pts(args))] + self.between_gap(*self.lookup_pts(args))
elif pred == "opp-sides":
a, b, l = args
A, B = self.lookup_pts([a, b])
lnf = self.line2nf(l)
X, Y = self.lnf2pp(lnf)
return [self.max(self.const(0.0), self.side_score_prod(A, B, X, Y))]
elif pred == "orthocenter":
H, A, B, C = self.lookup_pts(args)
return [self.dist(H, self.orthocenter(A, B, C))]
elif pred == "perp":
if len(args) == 4: # four points
return [self.perp_phi(*self.lookup_pts(args))]
else: # two lines
l1, l2 = args
P1, P2 = self.line2twoPts(l1)
P3, P4 = self.line2twoPts(l2)
return [self.perp_phi(P1, P2, P3, P4)]
elif pred == "para":
if len(args) == 4: # four points
return [self.para_phi(*self.lookup_pts(args))]
else: # two lines
l1, l2 = args
P1, P2 = self.line2twoPts(l1)
P3, P4 = self.line2twoPts(l2)
return [self.para_phi(P1, P2, P3, P4)]
elif pred == "reflect-pl":
X, Y, A, B = self.lookup_pts(args)
return [self.perp_phi(X, Y, A, B), self.cong_diff(A, X, A, Y)]
elif pred == "right":
A, B, C = self.lookup_pts(args)
return [self.right_phi(A, B, C)]
elif pred == "right-tri":
A, B, C = self.lookup_pts(args)
return [tf.reduce_min([self.right_phi(A, B, C),
self.right_phi(B, A, C),
self.right_phi(B, C, A)])]
elif pred == "same-side":
a, b, l = args
A, B = self.lookup_pts([a, b])
lnf = self.line2nf(l)
X, Y = self.lnf2pp(lnf)
return [self.max(self.const(0.0), -self.side_score_prod(A, B, X, Y))]
elif pred == "sim-tri":
[A, B, C, P, Q, R] = self.lookup_pts(args)
# self.segments.extend([(A, B), (B, C), (C, A), (P, Q), (Q, R), (R, P)])
# this is *too* easy to optimize, eqangle properties don't end up holding
# return [eqratio_diff(A, B, B, C, P, Q, Q, R), eqratio_diff(B, C, C, A, Q, R, R, P), eqratio_diff(C, A, A, B, R, P, P, Q)]
return [self.eqangle6_diff(A, B, C, P, Q, R), self.eqangle6_diff(B, C, A, Q, R, P), self.eqangle6_diff(C, A, B, R, P, Q)]
elif pred == "tangent-cc":
# https://mathworld.wolfram.com/TangentCircles.html
# Could distinguish b/w internally and externally if desired
c1, c2 = args
cnf1 ,cnf2 = self.circ2nf(c1), self.circ2nf(c2)
(x1, y1) = cnf1.center
(x2, y2) = cnf2.center
r1, r2 = cnf1.radius, cnf2.radius
lhs = (x1 - x2) ** 2 + (y1 - y2) ** 2
rhs_1 = (r1 - r2) ** 2
rhs_2 = (r1 + r2) ** 2
return [tf.reduce_min([self.abs(lhs - rhs_1), self.abs(lhs - rhs_2)])]
elif pred == "tangent-lc":
l, c = args
inter_point = Point(FuncInfo("inter-lc", [l, c, Root("arbitrary", list())]))
return self.assertion_vals("tangent-at-lc", [inter_point, l, c])
elif pred == "tangent-at-cc":
p, c1, c2 = args
c1_center = Point(FuncInfo("origin", [c1]))
c2_center = Point(FuncInfo("origin", [c2]))
p_on_c1 = self.assertion_vals("on-circ", [p, c1])
p_on_c2 = self.assertion_vals("on-circ", [p, c2])
tangency = self.assertion_vals("coll", [p, c1_center, c2_center])
return p_on_c1 + p_on_c2 + tangency
elif pred == "tangent-at-lc":
p, l, c = args
circ_center = Point(FuncInfo("origin", [c]))
circ_center_to_p = Line(FuncInfo("connecting", [circ_center, p]))
p_on_line = self.assertion_vals("on-line", [p, l])
p_on_circ = self.assertion_vals("on-circ", [p, c])
tangency = self.assertion_vals("perp", [l, circ_center_to_p])
return p_on_line + p_on_circ + tangency
else: raise NotImplementedError(f"[assertion_vals] NYI: {pred}")
#####################
## Comp. Geo
####################
def midp(self, A, B):
return (A + B).smul(0.5)
def midp_from(self, M, A):
return A + (M - A).smul(2)
def sqdist(self, A, B):
return (A.x - B.x)**2 + (A.y - B.y)**2
def dist(self, A, B):
return self.sqdist(A, B) ** (1 / 2)
def inner_product(self, A, B):
a1, a2 = A
b1, b2 = B
return a1*b1 + a2*b2
def scalar_product(self, A, O, B):
lhs = (A.x - O.x) * (B.x - O.x)
rhs = (A.y - O.y) * (B.y - O.y)
return lhs + rhs
def matrix_mul(self, mat, pt):
pt1, pt2 = mat
return self.get_point(self.inner_product(pt1, pt), self.inner_product(pt2, pt))
def rotation_matrix(self, theta):
r1 = self.get_point(self.cos(theta), -self.sin(theta))
r2 = self.get_point(self.sin(theta), self.cos(theta))
return (r1, r2)
def rotate_counterclockwise(self, theta, pt):
return self.matrix_mul(self.rotation_matrix(theta), pt)
def rotate_clockwise_90(self, pt):
return self.matrix_mul(
(self.get_point(self.const(0.0), self.const(1.0)),
self.get_point(self.const(-1.0),self.const(0.0))),
pt)
def rotate_counterclockwise_90(self, pt):
return self.matrix_mul(
(self.get_point(self.const(0.0), self.const(-1.0)),
self.get_point(self.const(1.0),self.const(0.0))),
pt)
def side_lengths(self, A, B, C):
return self.dist(B, C), self.dist(C, A), self.dist(A, B)
def clockwise_angle(self, A, B, C):
x1, y1 = A.x - B.x, A.y - B.y
x2, y2 = C.x - B.x, C.y - B.y
dot = x1 * x2 + y1 * y2
det = x1 * y2 - y1 * x2
return self.atan2(det, dot)
def angle(self, A, B, C):
a, b, c = self.side_lengths(A, B, C)
return self.acos((a**2 + c**2 - b**2) / (2 * a * c))
def right_phi(self, A, B, C):
return self.abs(self.angle(A, B, C) - math.pi / 2)
def conway_vals(self, A, B, C):
a, b, c = self.side_lengths(A, B, C)
return (b**2 + c**2 - a**2)/2, (c**2 + a**2 - b**2)/2, (a**2 + b**2 - c**2)/2
def trilinear(self, A, B, C, x, y, z):
a, b, c = self.side_lengths(A, B, C)
denom = a * x + b * y + c * z
return self.get_point((a * x * A.x + b * y * B.x + c * z * C.x) / denom,
(a * x * A.y + b * y * B.y + c * z * C.y) / denom)
def barycentric(self, A, B, C, x, y, z):
a, b, c = self.side_lengths(A, B, C)
return self.trilinear(A, B, C, x/a, y/b, z/c)
def circumcenter(self, A, B, C):
a, b, c = self.side_lengths(A, B, C)
Sa, Sb, Sc = self.conway_vals(A, B, C)
res = self.barycentric(A, B, C, a**2 * Sa, b**2 * Sb, c**2 * Sc)
return res
def orthocenter(self, A, B, C):
a, b, c = self.side_lengths(A, B, C)
Sa, Sb, Sc = self.conway_vals(A, B, C)
return self.barycentric(A, B, C, Sb * Sc, Sc * Sa, Sa * Sb)
def centroid(self, A, B, C):
return self.barycentric(A, B, C, 1, 1, 1)
def incenter(self, A, B, C):
return self.trilinear(A, B, C, 1, 1, 1)
def excenter(self, A, B, C):
return self.trilinear(A, B, C, -1, 1, 1)
def perp_phi(self, A, B, C, D):
return (A.x - B.x) * (C.x - D.x) + (A.y - B.y) * (C.y - D.y)
def para_phi(self, A, B, C, D):
return (A.x - B.x) * (C.y - D.y) - (A.y - B.y) * (C.x - D.x)
def cong_diff(self, A, B, C, D):
return self.sqdist(A, B) - self.sqdist(C, D)
def coll_phi(self, A, B, C):
return A.x * (B.y - C.y) + B.x * (C.y - A.y) + C.x * (A.y - B.y)
def between_gap(self, X, A, B):
eps = 0.2
A1 = self.get_point(A.x + eps * (B.x - A.x), A.y + eps * (B.y - A.y))
B1 = self.get_point(B.x + eps * (A.x - B.x), B.y + eps * (A.y - B.y))
return [self.diff_signs(X.x - A1.x, X.x - B1.x), self.diff_signs(X.y - A1.y, X.y - B1.y)]
def onray_gap(self, X, A, B):
eps = 0.2
A1 = self.get_point(A.x + eps * (B.x - A.x), A.y + eps * (B.y - A.y))
# TODO: coll_phi causing NaNs when [X, A, B] are perfectly collinear by construction
return [self.diff_signs(X.x - A1.x, A1.x - B.x), self.diff_signs(X.y - A1.y, A1.y - B.y)]
def det3(self, A, O, B):
lhs = (A.x - O.x) * (B.y - O.y)
rhs = (A.y - O.y) * (B.x - O.x)
return lhs - rhs
def side_score_prod(self, a, b, x, y):
return self.det3(a, x, y) * self.det3(b, x, y)
def opp_sides(self, a, b, x, y):
return self.lt(self.side_score_prod(a, b, x, y), 0.0)
def same_side(self, a, b, x, y):
return self.gt(self.side_score_prod(a, b, x, y), 0.0)
def inter_ll(self, l1, l2):
(n11, n12), r1 = l1 # TODO(jesse): ensure that this pattern matching works
(n21, n22), r2 = l2
def inter_ll_aux(n11, n12, r1, n21, n22, r2):
numer = n11 * r2 * (n21 ** 2 + n22**2) - r1 * (n11**2 * n21 + n12**2 * n21)
denom = n11 * n22 - n12 * n21
def on_ok():
return numer/denom
def on_bad():
return numer/(tf.math.sign(denom) * 1e-4)
return tf.cond(tf.less(tf.math.abs(denom), 1e-4),
on_bad,
on_ok)
return self.get_point(x=inter_ll_aux(n22, n21, r2, n12, n11, r1),
y=inter_ll_aux(n11, n12, r1, n21, n22, r2))
def inter_pp_c(self, P1, P2, cnf):
# We follow http://mathworld.wolfram.com/Circle-LineIntersection.html
O, r = cnf
P1, P2 = self.shift(O, [P1, P2])
dx = P1.x - P2.x
dy = P1.y - P2.y
dr = self.sqrt(dx**2 + dy**2)
D = P2.x * P1.y - P1.x * P2.y
radicand = r**2 * dr**2 - D**2
def on_nneg():
def sgnstar(x):
return self.cond(self.lt(x, self.const(0.0)), lambda: self.const(-1.0), lambda: self.const(1.0))
Q1 = self.get_point((D * dy + sgnstar(dy) * dx * self.sqrt(radicand)) / (dr**2),
(-D * dx + self.abs(dy) * self.sqrt(radicand)) / (dr**2))
Q2 = self.get_point((D * dy - sgnstar(dy) * dx * self.sqrt(radicand)) / (dr**2),
(-D * dx - self.abs(dy) * self.sqrt(radicand)) / (dr**2))
return self.unshift(O, [Q1, Q2])
def on_neg():
Operp = self.rotate_counterclockwise_90(P1 - P2) + O
F = self.inter_ll(self.pp2lnf(P1, P2), self.pp2lnf(O, Operp))
X = O + (F - O).smul(r / self.dist(O, F))
Q = self.midp(F, X)
return self.unshift(O, [Q, Q])
return self.cond(self.lte(radicand, self.const(0.0)), on_neg, on_nneg)
def inter_lc(self, lnf, c, root_select):
p1, p2 = self.lnf2pp(lnf)
I1, I2 = self.inter_pp_c(p1, p2, c)
return self.process_rs(I1, I2, root_select)
def inter_cc(self, cnf1, cnf2, root_select):
l = self.radical_axis(cnf1, cnf2)
result = self.inter_lc(l, cnf1, root_select)
return result
def make_lc_intersect(self, name, lnf, c):
A, B = self.lnf2pp(lnf)
O, r = c
Operp = self.rotate_counterclockwise_90(A - B) + O
F = self.inter_ll(lnf, self.pp2lnf(O, Operp))
d = self.dist(O, F)
f_val = self.cond(self.lt(r, d), lambda: d, lambda: self.const(0.0))
loss = self.cond(self.logical_or(self.lt(self.dist(O, Operp), 1e-6),
self.lt(self.dist(A, B), 1e-6)),
lambda: self.const(0.0), lambda: f_val)
self.register_loss(f"interLC_{name}", loss, weight=1e-1)
def second_meet_pp_c(self, A, B, O):
P1, P2 = self.inter_pp_c(A, B, CircleNF(O, self.dist(O, A)))
return self.cond(self.lt(self.sqdist(A, P1), self.sqdist(A, P2)), lambda: P2, lambda: P1)
def amidp_opp(self, B, C, A):
O = self.circumcenter(A, B, C)
I = self.incenter(A, B, C)
return self.second_meet_pp_c(A, I, O)
def amidp_same(self, B, C, A):
M = self.amidp_opp(B, C, A)
O = self.circumcenter(A, B, C)
return self.second_meet_pp_c(M, O, O)
def radical_axis_pts(self, cnf1, cnf2):
(c1x, c1y), r1 = cnf1
(c2x, c2y), r2 = cnf2
A = self.const(2.0) * (c2x - c1x)
B = self.const(2.0) * (c2y - c1y)
C = (r1**2 - r2**2) + (c2y**2 - c1y**2) + (c2x**2 - c1x**2)
# FIXME: Fails on EGMO 2.7 because we aren't passing around lambdas anymore
# pdb.set_trace()
test = self.gt(self.abs(A), 1e-6)
p1 = self.cond(test,
lambda: self.get_point(x=(C-B)/A, y=self.const(1.0)),
lambda: self.get_point(x=self.const(1.0), y=C/B))
p2 = self.cond(test,
lambda: self.get_point(x=C/A, y=self.const(0.0)),
lambda: self.get_point(x=self.const(0.0), y=C/B))
return p1, p2
def radical_axis(self, cnf1, cnf2):
p1, p2 = self.radical_axis_pts(cnf1, cnf2)
return self.pp2lnf(p1, p2)
def eqangle6_diff(self, A, B, C, P, Q, R):
s1 = self.det3(A, B, C)
c1 = self.scalar_product(A, B, C)
s2 = self.det3(P, Q, R)
c2 = self.scalar_product(P, Q, R)
return 0.1 * (s1 * c2 - s2 * c1)
def eqratio_diff(self, A, B, C, D, P, Q, R, S):
# AB/CD = PQ/RS
return self.sqrt(self.dist(A, B) * self.dist(R, S)) - self.sqrt(self.dist(P, Q) * self.dist(C, D))
def cycl_diff(self, A, B, C, D):
return self.eqangle6_diff(A, B, D, A, C, D)
def eqangle8_diff(self, A, B1, B2, C, P, Q1, Q2, R):
return self.eqangle6_diff(A, B1, C - B2 + B1, P, Q1, R - Q2 + Q1)
def semiperimeter(self, A, B, C):
a, b, c = self.side_lengths(A, B, C)
return (a + b + c) / 2
def area(self, A, B, C):
a, b, c = self.side_lengths(A, B, C)
s = self.semiperimeter(A, B, C)
return self.sqrt(s * (s - a) * (s - b) * (s - c))
def inradius(self, A, B, C):
return self.area(A, B, C) / self.semiperimeter(A, B, C)
def exradius(self, A, B, C):
r = self.inradius(A, B, C)
a, b, c = self.side_lengths(A, B, C)
s = (a + b + c)/2
return r * s / (s - a)
def mixtilinear_incenter(self, A, B, C):
ta, tb, tc = self.angle(C, A, B), self.angle(A, B, C), self.angle(B, C, A)
return self.trilinear(A, B, C, (1/2) * (1 + self.cos(ta) - self.cos(tb) - self.cos(tc)), 1, 1)
def mixtilinear_inradius(self, A, B, C):
r = self.inradius(A, B, C)
ta = self.angle(C, A, B)
return r * (1 / self.cos(ta / 2)**2)
def to_trilinear(self, P, A, B, C):
la = self.pp2lnf(B, C)
lb = self.pp2lnf(C, A)
lc = self.pp2lnf(A, B)
ga = self.pp2lnf(P, P + self.rotate_counterclockwise_90(C - B))
gb = self.pp2lnf(P, P + self.rotate_counterclockwise_90(A - C))
gc = self.pp2lnf(P, P + self.rotate_counterclockwise_90(B - A))
da = self.dist(P, self.inter_ll(la, ga))
db = self.dist(P, self.inter_ll(lb, gb))
dc = self.dist(P, self.inter_ll(lc, gc))
da = self.cond(self.opp_sides(P, A, B, C), lambda: -da, lambda: da)
db = self.cond(self.opp_sides(P, B, C, A), lambda: -db, lambda: db)
dc = self.cond(self.opp_sides(P, C, A, B), lambda: -dc, lambda: dc)
return da, db, dc
def invert_or_zero(self, x):
return self.cond(self.abs(x) < 1e-5, lambda: self.const(0.0), lambda: self.const(1) / x)
def isogonal_conj(self, P, A, B, C):
x, y, z = self.to_trilinear(P, A, B, C)
return self.trilinear(A, B, C, self.invert_or_zero(x), self.invert_or_zero(y), self.invert_or_zero(z))
def isotomic_conj(self, P, A, B, C):
a, b, c = self.side_lengths(A, B, C)
x, y, z = self.to_trilinear(P, A, B, C)
return self.trilinear(A, B, C, (a**2) * self.invert_or_zero(x), (b**2) * self.invert_or_zero(y), (c**2) * self.invert_or_zero(z))
def inverse(self, X, O, A):
return O + (X - O).smul(self.sqdist(O, A) / self.sqdist(O, X))
def harmonic_l_conj(self, X, A, B):
# see picture in https://en.wikipedia.org/wiki/Projective_harmonic_conjugate
# L is arbitrary here, not on the line X A B
# (could also do case analysis and cross-ratio)
L = A + self.rotate_counterclockwise(self.const(math.pi / 3), X - A).smul(0.5)
M = self.midp(A, L)
N = self.inter_ll(self.pp2lnf(B, L), self.pp2lnf(X, M))
K = self.inter_ll(self.pp2lnf(A, N), self.pp2lnf(B, M))
Y = self.inter_ll(self.pp2lnf(L, K), self.pp2lnf(A, X))
return Y
def in_poly_phis(self, X, *Ps):
phis = []
n = len(Ps)
for i in range(n):
A, B, C = Ps[i], Ps[(i+1) % n], Ps[(i+2) % n]
# X and C are on the same side of AB
phis.append(self.max(self.const(0.0), - self.side_score_prod(X, C, A, B)))
return phis
#####################
## Utilities
####################
def line2twoPts(self, l):
if isinstance(l.val, str):
L = self.name2line[l]
return self.lnf2pp(L)
elif isinstance(l.val, FuncInfo):
pred, args = l.val
if pred == "connecting":
return self.lookup_pts(args)
elif pred == "isogonal":
D, A, B, C = self.lookup_pts(args)
return A, self.isogonal_conj(D, A, B, C)
elif pred == "isotomic":
D, A, B, C = self.lookup_pts(args)
return A, self.isotomic_conj(D, A, B, C)
elif pred == "para-at":
x, l = args
X = self.lookup_pt(x)
L = self.line2nf(l)
A, B = self.lnf2pp(L)
return X, X + B - A
elif pred == "perp-at":
x, l = args
X = self.lookup_pt(x)
L = self.line2nf(l)
A, B = self.lnf2pp(L)
return X, X + self.rotate_counterclockwise_90(A - B)
elif pred == "perp-bis":
a, b = args
l_ab = Line(FuncInfo("connecting", [a, b]))
m_ab = Point(FuncInfo("midp", [a, b]))
return self.line2twoPts(Line(FuncInfo("perp-at", [m_ab, l_ab])))
elif pred == "mediator":
A, B = self.lookup_pts(args)
M = self.midp(A, B)
return M, M + self.rotate_counterclockwise_90(A - B)
elif pred == "i-bisector":
A, B, C = self.lookup_pts(args)
X = B + (A - B).smul(self.dist(B, C) / self.dist(B, A))
M = self.midp(X, C)
return B, M
elif pred == "e-bisector":
A, B, C = self.lookup_pts(args)
X = B + (A - B).smul(self.dist(B, C) / self.dist(B, A))
M = self.midp(X, C)
Y = B + self.rotate_counterclockwise_90(M - B)
return B, Y
# elif pred == "eqoangle":
# B, C, D, E, F = self.lookup_pts(args)
# theta = self.angle(D, E, F)
# X = B + self.rotate_counterclockwise(theta, C - B)
# # self.segments.extend([(A, B), (B, C), (P, Q), (Q, R)])
return B, X
elif pred == "reflect-ll":
l1, l2 = args
lnf1 = self.line2nf(l1)
p1, p2 = self.lnf2pp(lnf1)
refl_p1 = Point(FuncInfo("reflect-pl", [Point(FuncInfo("__val__", [p1])), l2]))
refl_p1 = self.lookup_pt(refl_p1)
refl_p2 = Point(FuncInfo("reflect-pl", [Point(FuncInfo("__val__", [p2])), l2]))
refl_p2 = self.lookup_pt(refl_p2)
return refl_p1, refl_p2
else:
raise RuntimeError(f"[line2twoPts] Unexpected line pred: {pred}")
else:
raise RuntimeError(f"Unsupported line type: {type(l)}")
def line2sf(self, l):
if isinstance(l.val, str):
return self.name2line[l]
else:
p1, p2 = self.line2twoPts(l)
return self.pp2sf(p1, p2)
def lnf2pp(self, lnf):
n, r = lnf
w = n.smul(r)
m = self.rotate_clockwise_90(n)
return w, w + m
def pp2lnf(self, p1, p2):
# TODO(jesse): please name this
def mysterious_pp2pp(p1, p2):
x,y = p2
def pred(x,y):
return tf.logical_or(tf.math.less(y, self.const(0.0)),
tf.logical_and(tf.equal(y, self.const(0.0)), tf.math.less(x, self.const(0.0))))
return tf.compat.v1.cond(pred(x,y), lambda:(p1, p2.smul(-1.0)), lambda:(p1, p2))
def pp2lnf_core(p1, p2):
p1, p2 = mysterious_pp2pp(p1, p2)
x , _ = p2
n = tf.compat.v1.cond(tf.less_equal(x,0.0), lambda: self.rotate_clockwise_90(p2), lambda: self.rotate_counterclockwise_90(p2))
r = self.inner_product(p1, n)
return LineNF(n=n, r=r)
return pp2lnf_core(p1, (p1 - p2).normalize())
def line2nf(self, l):
if isinstance(l.val, str):
return self.name2line[l]
else:
p1, p2 = self.line2twoPts(l)
return self.pp2lnf(p1, p2)
def pp2sf(self, p1, p2):
def vert_line():
return LineSF(self.const(1.0), self.const(0.0), p1.x, p1, p2)
def horiz_line():
return LineSF(self.const(0.0), self.const(1.0), p1.y, p1, p2)
def calc_sf_from_slope_intercept():
(x1, y1) = p1
(x2, y2) = p2
m = (y2 - y1) / (x2 - x1)
b = y1 - m * x1
# y = mx + b ---> -mx + (1)y = b
return LineSF(-m, self.const(1.0), b, p1, p2)
return self.cond(self.eq(p1.x, p2.x),
vert_line,
lambda: self.cond(self.eq(p1.y, p2.y),
horiz_line,
calc_sf_from_slope_intercept))
def circ2nf(self, circ):
if isinstance(circ.val, str):
return self.name2circ[circ]
elif isinstance(circ.val, FuncInfo):
pred, args = circ.val
if pred == "c3" or pred == "circumcircle":
A, B, C = self.lookup_pts(args)
O = self.circumcenter(A, B, C)
return CircleNF(center=O, radius=self.dist(O, A))
elif pred == "coa":
O, A = self.lookup_pts(args)
return CircleNF(center=O, radius=self.dist(O, A))
elif pred == "cong":
O, X, Y = self.lookup_pts(args)
return CircleNF(center=O, radius=self.dist(X, Y))
elif pred == "diam":
B, C = self.lookup_pts(args)
O = self.midp(B, C)
return CircleNF(center=O, radius=self.dist(O, B))
elif pred == "incircle":
A, B, C = self.lookup_pts(args)
I = self.incenter(A, B, C)
return CircleNF(center=I, radius=self.inradius(A, B, C))
elif pred == "excircle":
A, B, C = self.lookup_pts(args)
I = self.excenter(A, B, C)
return CircleNF(center=I, radius=self.exradius(A, B, C))
elif pred == "mixtilinear-incircle":
A, B, C = self.lookup_pts(args)
I = self.mixtilinear_incenter(A, B, C)
return CircleNF(center=I, radius=self.mixtilinear_inradius(A, B, C))
else:
raise RuntimeError(f"[circ2nf] NYI: {pred}")
else:
raise RuntimeError("Invalid circle type")
def shift(self, O, Ps):
return [self.get_point(P.x - O.x, P.y - O.y) for P in Ps]
def unshift(self, O, Ps):
return [self.get_point(P.x + O.x, P.y + O.y) for P in Ps]
def pt_eq(self, p1, p2):
return self.lt(self.dist(p1, p2), 1e-6)
def pt_neq(self, p1, p2):
return self.gt(self.dist(p1, p2), 1e-6)
def process_rs(self, P1, P2, root_select):
pred = root_select.pred
rs_args = root_select.vars
if pred == "neq":
[pt] = self.lookup_pts(rs_args)
return self.cond(self.pt_neq(P1, pt), lambda: P1, lambda: P2)
elif pred == "closer-to-p":
[pt] = self.lookup_pts(rs_args)
test = self.lte(self.sqdist(P1, pt), self.sqdist(P2, pt))
return self.cond(test, lambda: P1, lambda: P2)
elif pred == "closer-to-l":
[l] = rs_args
P1_foot = Point(FuncInfo("foot", [Point(FuncInfo("__val__", [P1])), l]))
P1_foot = self.lookup_pt(P1_foot)
P2_foot = Point(FuncInfo("foot", [Point(FuncInfo("__val__", [P2])), l]))
P2_foot = self.lookup_pt(P2_foot)
test = self.lte(self.sqdist(P1, P1_foot), self.sqdist(P2, P2_foot))
return self.cond(test, lambda: P1, lambda: P2)
# elif pred == "furtherFrom":
# [pt] = self.lookup_pts(rs_args)
# test = self.lt(self.sqdist(P2, pt), self.sqdist(P1, pt))
# return self.cond(test, lambda: P1, lambda: P2)
elif pred == "opp-sides":
[pt] = self.lookup_pts([rs_args[0]])
a, b = self.lnf2pp(self.line2nf(rs_args[1]))
return self.cond(self.opp_sides(P1, pt, a, b), lambda: P1, lambda: P2)
elif pred == "same-side":
[pt] = self.lookup_pts([rs_args[0]])
a, b = self.lnf2pp(self.line2nf(rs_args[1]))
return self.cond(self.same_side(P1, pt, a, b), lambda: P1, lambda: P2)
elif pred == "arbitrary":
return P2
else:
raise NotImplementedError(f"[process_rs] NYI: {pred}")
def points_far_enough_away(self):
name2pt = self.run(self.name2pt)
min_dist = self.opts['min_dist']
for a, b in itertools.combinations(name2pt.keys(), 2):
A, B = name2pt[a], name2pt[b]
d = self.dist(A, B)
if d < min_dist:
if self.opts['verbosity'] >= 0:
print(f"DUP: {a} {b}")
return False
return True
def diff_signs(self, x, y):
return self.max(self.const(0.0), x * y)
| 2.421875
| 2
|
slm_lab/agent/algorithm/policy_util.py
|
achao2013/SLM-Lab
| 1
|
12784307
|
'''
Action policy methods to sampling actions
Algorithm provides a `calc_pdparam` which takes a state and do a forward pass through its net,
and the pdparam is used to construct an action probability distribution as appropriate per the action type as indicated by the body
Then the prob. dist. is used to sample action.
The default form looks like:
```
ActionPD, pdparam, body = init_action_pd(state, algorithm, body)
action, action_pd = sample_action_pd(ActionPD, pdparam, body)
```
We can also augment pdparam before sampling - as in the case of Boltzmann sampling,
or do epsilon-greedy to use pdparam-sampling or random sampling.
'''
from slm_lab.env.wrapper import LazyFrames
from slm_lab.lib import logger, math_util, util
from torch import distributions
import numpy as np
import pydash as ps
import torch
logger = logger.get_logger(__name__)
# probability distributions constraints for different action types; the first in the list is the default
ACTION_PDS = {
'continuous': ['Normal', 'Beta', 'Gumbel', 'LogNormal'],
'multi_continuous': ['MultivariateNormal'],
'discrete': ['Categorical', 'Argmax'],
'multi_discrete': ['MultiCategorical'],
'multi_binary': ['Bernoulli'],
}
class Argmax(distributions.Categorical):
'''
Special distribution class for argmax sampling, where probability is always 1 for the argmax.
NOTE although argmax is not a sampling distribution, this implementation is for API consistency.
'''
def __init__(self, probs=None, logits=None, validate_args=None):
if probs is not None:
new_probs = torch.zeros_like(probs, dtype=torch.float)
new_probs[torch.argmax(probs, dim=0)] = 1.0
probs = new_probs
elif logits is not None:
new_logits = torch.full_like(logits, -1e8, dtype=torch.float)
max_idx = torch.argmax(logits, dim=0)
new_logits[max_idx] = logits[max_idx]
logits = new_logits
super(Argmax, self).__init__(probs=probs, logits=logits, validate_args=validate_args)
class MultiCategorical(distributions.Categorical):
'''MultiCategorical as collection of Categoricals'''
def __init__(self, probs=None, logits=None, validate_args=None):
self.categoricals = []
if probs is None:
probs = [None] * len(logits)
elif logits is None:
logits = [None] * len(probs)
else:
raise ValueError('Either probs or logits must be None')
for sub_probs, sub_logits in zip(probs, logits):
categorical = distributions.Categorical(probs=sub_probs, logits=sub_logits, validate_args=validate_args)
self.categoricals.append(categorical)
@property
def logits(self):
return [cat.logits for cat in self.categoricals]
@property
def probs(self):
return [cat.probs for cat in self.categoricals]
@property
def param_shape(self):
return [cat.param_shape for cat in self.categoricals]
@property
def mean(self):
return torch.stack([cat.mean for cat in self.categoricals])
@property
def variance(self):
return torch.stack([cat.variance for cat in self.categoricals])
def sample(self, sample_shape=torch.Size()):
return torch.stack([cat.sample(sample_shape=sample_shape) for cat in self.categoricals])
def log_prob(self, value):
return torch.stack([cat.log_prob(value[idx]) for idx, cat in enumerate(self.categoricals)])
def entropy(self):
return torch.stack([cat.entropy() for cat in self.categoricals])
def enumerate_support(self):
return [cat.enumerate_support() for cat in self.categoricals]
setattr(distributions, 'Argmax', Argmax)
setattr(distributions, 'MultiCategorical', MultiCategorical)
# base methods
def try_preprocess(state, algorithm, body, append=True):
'''Try calling preprocess as implemented in body's memory to use for net input'''
if isinstance(state, LazyFrames):
state = state.__array__() # from global env preprocessor
if hasattr(body.memory, 'preprocess_state'):
state = body.memory.preprocess_state(state, append=append)
# as float, and always as minibatch for net input
state = torch.from_numpy(state).float().unsqueeze(dim=0)
return state
def cond_squeeze(out):
'''Helper to squeeze output depending if it is tensor (discrete pdparam) or list of tensors (continuous pdparam of loc and scale)'''
if isinstance(out, list):
return [out_t.squeeze(dim=0) for out_t in out]
else:
return out.squeeze(dim=0)
def init_action_pd(state, algorithm, body, append=True):
'''
Build the proper action prob. dist. to use for action sampling.
state is passed through algorithm's net via calc_pdparam, which the algorithm must implement using its proper net.
This will return body, ActionPD and pdparam to allow augmentation, e.g. applying temperature tau to pdparam for boltzmann.
Then, output must be called with sample_action_pd(body, ActionPD, pdparam) to sample action.
@returns {cls, tensor, *} ActionPD, pdparam, body
'''
pdtypes = ACTION_PDS[body.action_type]
assert body.action_pdtype in pdtypes, f'Pdtype {body.action_pdtype} is not compatible/supported with action_type {body.action_type}. Options are: {ACTION_PDS[body.action_type]}'
ActionPD = getattr(distributions, body.action_pdtype)
state = try_preprocess(state, algorithm, body, append=append)
state = state.to(algorithm.net.device)
pdparam = algorithm.calc_pdparam(state, evaluate=False)
return ActionPD, pdparam, body
def sample_action_pd(ActionPD, pdparam, body):
'''
This uses the outputs from init_action_pd and an optionally augmented pdparam to construct a action_pd for sampling action
@returns {tensor, distribution} action, action_pd A sampled action, and the prob. dist. used for sampling to enable calculations like kl, entropy, etc. later.
'''
pdparam = cond_squeeze(pdparam)
if body.is_discrete:
action_pd = ActionPD(logits=pdparam)
else: # continuous outputs a list, loc and scale
assert len(pdparam) == 2, pdparam
# scale (stdev) must be >0, use softplus
if pdparam[1] < 5:
pdparam[1] = torch.log(1 + torch.exp(pdparam[1])) + 1e-8
action_pd = ActionPD(*pdparam)
action = action_pd.sample()
return action, action_pd
# interface action sampling methods
def default(state, algorithm, body):
'''Plain policy by direct sampling using outputs of net as logits and constructing ActionPD as appropriate'''
ActionPD, pdparam, body = init_action_pd(state, algorithm, body)
action, action_pd = sample_action_pd(ActionPD, pdparam, body)
return action, action_pd
def random(state, algorithm, body):
'''Random action sampling that returns the same data format as default(), but without forward pass. Uses gym.space.sample()'''
state = try_preprocess(state, algorithm, body, append=True) # for consistency with init_action_pd inner logic
if body.action_type == 'discrete':
action_pd = distributions.Categorical(logits=torch.ones(body.action_space.high, device=algorithm.net.device))
elif body.action_type == 'continuous':
# Possibly this should this have a 'device' set
action_pd = distributions.Uniform(
low=torch.tensor(body.action_space.low).float(),
high=torch.tensor(body.action_space.high).float())
elif body.action_type == 'multi_discrete':
action_pd = distributions.Categorical(
logits=torch.ones(body.action_space.high.size, body.action_space.high[0], device=algorithm.net.device))
elif body.action_type == 'multi_continuous':
raise NotImplementedError
elif body.action_type == 'multi_binary':
raise NotImplementedError
else:
raise NotImplementedError
sample = body.action_space.sample()
action = torch.tensor(sample, device=algorithm.net.device)
return action, action_pd
def epsilon_greedy(state, algorithm, body):
'''Epsilon-greedy policy: with probability epsilon, do random action, otherwise do default sampling.'''
epsilon = body.explore_var
if epsilon > np.random.rand():
return random(state, algorithm, body)
else:
return default(state, algorithm, body)
def boltzmann(state, algorithm, body):
'''
Boltzmann policy: adjust pdparam with temperature tau; the higher the more randomness/noise in action.
'''
tau = body.explore_var
ActionPD, pdparam, body = init_action_pd(state, algorithm, body)
pdparam /= tau
action, action_pd = sample_action_pd(ActionPD, pdparam, body)
return action, action_pd
# multi-body policy with a single forward pass to calc pdparam
def multi_default(states, algorithm, body_list, pdparam):
'''
Apply default policy body-wise
Note, for efficiency, do a single forward pass to calculate pdparam, then call this policy like:
@example
pdparam = self.calc_pdparam(state, evaluate=False)
action_a, action_pd_a = self.action_policy(pdparam, self, body_list)
'''
pdparam = pdparam.squeeze(dim=0)
# assert pdparam has been chunked
assert len(pdparam.shape) > 1 and len(pdparam) == len(body_list), f'pdparam shape: {pdparam.shape}, bodies: {len(body_list)}'
action_list, action_pd_a = [], []
for idx, sub_pdparam in enumerate(pdparam):
body = body_list[idx]
try_preprocess(states[idx], algorithm, body, append=True) # for consistency with init_action_pd inner logic
ActionPD = getattr(distributions, body.action_pdtype)
action, action_pd = sample_action_pd(ActionPD, sub_pdparam, body)
action_list.append(action)
action_pd_a.append(action_pd)
action_a = torch.tensor(action_list, device=algorithm.net.device).unsqueeze(dim=1)
return action_a, action_pd_a
def multi_random(states, algorithm, body_list, pdparam):
'''Apply random policy body-wise.'''
pdparam = pdparam.squeeze(dim=0)
action_list, action_pd_a = [], []
for idx, body in body_list:
action, action_pd = random(states[idx], algorithm, body)
action_list.append(action)
action_pd_a.append(action_pd)
action_a = torch.tensor(action_list, device=algorithm.net.device).unsqueeze(dim=1)
return action_a, action_pd_a
def multi_epsilon_greedy(states, algorithm, body_list, pdparam):
'''Apply epsilon-greedy policy body-wise'''
assert len(pdparam) > 1 and len(pdparam) == len(body_list), f'pdparam shape: {pdparam.shape}, bodies: {len(body_list)}'
action_list, action_pd_a = [], []
for idx, sub_pdparam in enumerate(pdparam):
body = body_list[idx]
epsilon = body.explore_var
if epsilon > np.random.rand():
action, action_pd = random(states[idx], algorithm, body)
else:
try_preprocess(states[idx], algorithm, body, append=True) # for consistency with init_action_pd inner logic
ActionPD = getattr(distributions, body.action_pdtype)
action, action_pd = sample_action_pd(ActionPD, sub_pdparam, body)
action_list.append(action)
action_pd_a.append(action_pd)
action_a = torch.tensor(action_list, device=algorithm.net.device).unsqueeze(dim=1)
return action_a, action_pd_a
def multi_boltzmann(states, algorithm, body_list, pdparam):
'''Apply Boltzmann policy body-wise'''
assert len(pdparam) > 1 and len(pdparam) == len(body_list), f'pdparam shape: {pdparam.shape}, bodies: {len(body_list)}'
action_list, action_pd_a = [], []
for idx, sub_pdparam in enumerate(pdparam):
body = body_list[idx]
try_preprocess(states[idx], algorithm, body, append=True) # for consistency with init_action_pd inner logic
tau = body.explore_var
sub_pdparam /= tau
ActionPD = getattr(distributions, body.action_pdtype)
action, action_pd = sample_action_pd(ActionPD, sub_pdparam, body)
action_list.append(action)
action_pd_a.append(action_pd)
action_a = torch.tensor(action_list, device=algorithm.net.device).unsqueeze(dim=1)
return action_a, action_pd_a
# action policy update methods
class VarScheduler:
'''
Variable scheduler for decaying variables such as explore_var (epsilon, tau) and entropy
e.g. spec
"explore_var_spec": {
"name": "linear_decay",
"start_val": 1.0,
"end_val": 0.1,
"start_step": 0,
"end_step": 800,
},
'''
def __init__(self, var_decay_spec=None):
self._updater_name = 'no_decay' if var_decay_spec is None else var_decay_spec['name']
self._updater = getattr(math_util, self._updater_name)
util.set_attr(self, dict(
start_val=np.nan,
))
util.set_attr(self, var_decay_spec, [
'start_val',
'end_val',
'start_step',
'end_step',
])
if not getattr(self, 'end_val', None):
self.end_val = self.start_val
def update(self, algorithm, clock):
'''Get an updated value for var'''
if (util.in_eval_lab_modes()) or self._updater_name == 'no_decay':
return self.end_val
step = clock.get(clock.max_tick_unit)
val = self._updater(self.start_val, self.end_val, self.start_step, self.end_step, step)
return val
# misc calc methods
def guard_multi_pdparams(pdparams, body):
'''Guard pdparams for multi action'''
action_dim = body.action_dim
is_multi_action = ps.is_iterable(action_dim)
if is_multi_action:
assert ps.is_list(pdparams)
pdparams = [t.clone() for t in pdparams] # clone for grad safety
assert len(pdparams) == len(action_dim), pdparams
# transpose into (batch_size, [action_dims])
pdparams = [list(torch.split(t, action_dim, dim=0)) for t in torch.cat(pdparams, dim=1)]
return pdparams
def calc_log_probs(algorithm, net, body, batch):
'''
Method to calculate log_probs fresh from batch data
Body already stores log_prob from self.net. This is used for PPO where log_probs needs to be recalculated.
'''
states, actions = batch['states'], batch['actions']
action_dim = body.action_dim
is_multi_action = ps.is_iterable(action_dim)
# construct log_probs for each state-action
pdparams = algorithm.calc_pdparam(states, net=net)
pdparams = guard_multi_pdparams(pdparams, body)
assert len(pdparams) == len(states), f'batch_size of pdparams: {len(pdparams)} vs states: {len(states)}'
pdtypes = ACTION_PDS[body.action_type]
ActionPD = getattr(distributions, body.action_pdtype)
log_probs = []
for idx, pdparam in enumerate(pdparams):
if not is_multi_action: # already cloned for multi_action above
pdparam = pdparam.clone() # clone for grad safety
_action, action_pd = sample_action_pd(ActionPD, pdparam, body)
log_probs.append(action_pd.log_prob(actions[idx].float()).sum(dim=0))
log_probs = torch.stack(log_probs)
assert not torch.isnan(log_probs).any(), f'log_probs: {log_probs}, \npdparams: {pdparams} \nactions: {actions}'
logger.debug(f'log_probs: {log_probs}')
return log_probs
def update_online_stats(body, state):
'''
Method to calculate the running mean and standard deviation of the state space.
See https://www.johndcook.com/blog/standard_deviation/ for more details
for n >= 1
M_n = M_n-1 + (state - M_n-1) / n
S_n = S_n-1 + (state - M_n-1) * (state - M_n)
variance = S_n / (n - 1)
std_dev = sqrt(variance)
'''
logger.debug(f'mean: {body.state_mean}, std: {body.state_std_dev}, num examples: {body.state_n}')
# Assumes only one state is given
if ('Atari' in util.get_class_name(body.memory)):
assert state.ndim == 3
elif getattr(body.memory, 'raw_state_dim', False):
assert state.size == body.memory.raw_state_dim
else:
assert state.size == body.state_dim or state.shape == body.state_dim
mean = body.state_mean
body.state_n += 1
if np.isnan(mean).any():
assert np.isnan(body.state_std_dev_int)
assert np.isnan(body.state_std_dev)
body.state_mean = state
body.state_std_dev_int = 0
body.state_std_dev = 0
else:
assert body.state_n > 1
body.state_mean = mean + (state - mean) / body.state_n
body.state_std_dev_int = body.state_std_dev_int + (state - mean) * (state - body.state_mean)
body.state_std_dev = np.sqrt(body.state_std_dev_int / (body.state_n - 1))
# Guard against very small std devs
if (body.state_std_dev < 1e-8).any():
body.state_std_dev[np.where(body.state_std_dev < 1e-8)] += 1e-8
logger.debug(f'new mean: {body.state_mean}, new std: {body.state_std_dev}, num examples: {body.state_n}')
def normalize_state(body, state):
'''
Normalizes one or more states using a running mean and standard deviation
Details of the normalization from Deep RL Bootcamp, L6
https://www.youtube.com/watch?v=8EcdaCk9KaQ&feature=youtu.be
'''
same_shape = False if type(state) == list else state.shape == body.state_mean.shape
has_preprocess = getattr(body.memory, 'preprocess_state', False)
if ('Atari' in util.get_class_name(body.memory)):
# never normalize atari, it has its own normalization step
logger.debug('skipping normalizing for Atari, already handled by preprocess')
return state
elif ('Replay' in util.get_class_name(body.memory)) and has_preprocess:
# normalization handled by preprocess_state function in the memory
logger.debug('skipping normalizing, already handled by preprocess')
return state
elif same_shape:
# if not atari, always normalize the state the first time we see it during act
# if the shape is not transformed in some way
if np.sum(body.state_std_dev) == 0:
return np.clip(state - body.state_mean, -10, 10)
else:
return np.clip((state - body.state_mean) / body.state_std_dev, -10, 10)
else:
# broadcastable sample from an un-normalized memory so we should normalize
logger.debug('normalizing sample from memory')
if np.sum(body.state_std_dev) == 0:
return np.clip(state - body.state_mean, -10, 10)
else:
return np.clip((state - body.state_mean) / body.state_std_dev, -10, 10)
# TODO Not currently used, this will crash for more exotic memory structures
# def unnormalize_state(body, state):
# '''
# Un-normalizes one or more states using a running mean and new_std_dev
# '''
# return state * body.state_mean + body.state_std_dev
def update_online_stats_and_normalize_state(body, state):
'''
Convenience combination function for updating running state mean and std_dev and normalizing the state in one go.
'''
logger.debug(f'state: {state}')
update_online_stats(body, state)
state = normalize_state(body, state)
logger.debug(f'normalized state: {state}')
return state
def normalize_states_and_next_states(body, batch, episodic_flag=None):
'''
Convenience function for normalizing the states and next states in a batch of data
'''
logger.debug(f'states: {batch["states"]}')
logger.debug(f'next states: {batch["next_states"]}')
episodic = episodic_flag if episodic_flag is not None else body.memory.is_episodic
logger.debug(f'Episodic: {episodic}, episodic_flag: {episodic_flag}, body.memory: {body.memory.is_episodic}')
if episodic:
normalized = []
for epi in batch['states']:
normalized.append(normalize_state(body, epi))
batch['states'] = normalized
normalized = []
for epi in batch['next_states']:
normalized.append(normalize_state(body, epi))
batch['next_states'] = normalized
else:
batch['states'] = normalize_state(body, batch['states'])
batch['next_states'] = normalize_state(body, batch['next_states'])
logger.debug(f'normalized states: {batch["states"]}')
logger.debug(f'normalized next states: {batch["next_states"]}')
return batch
| 2.96875
| 3
|
test/unit/test_cli.py
|
pgiraud/temboard-agent
| 0
|
12784308
|
<gh_stars>0
from __future__ import unicode_literals
import pytest
def test_ok():
from temboardagent.cli import cli
@cli
def main(argv, environ):
assert 'TESTVALUE' in argv
return 0xcafe
with pytest.raises(SystemExit) as ei:
main(argv=['TESTVALUE'])
assert 0xcafe == ei.value.code
def test_bdb_quit():
from temboardagent.cli import cli
from bdb import BdbQuit
@cli
def main(argv, environ):
raise BdbQuit()
with pytest.raises(SystemExit) as ei:
main()
assert 1 == ei.value.code
def test_interrupt():
from temboardagent.cli import cli
@cli
def main(argv, environ):
raise KeyboardInterrupt()
with pytest.raises(SystemExit) as ei:
main(argv=[])
assert 1 == ei.value.code
def test_user_error():
from temboardagent.cli import cli
from temboardagent.errors import UserError
@cli
def main(argv, environ):
raise UserError('POUET', retcode=0xd0d0)
with pytest.raises(SystemExit) as ei:
main()
assert 0xd0d0 == ei.value.code
def test_unhandled_error_prod():
from temboardagent.cli import cli
@cli
def main(argv, environ):
raise KeyError('name')
with pytest.raises(SystemExit) as ei:
main()
assert 1 == ei.value.code
def test_unhandled_error_debug(mocker):
from temboardagent.cli import cli
pm = mocker.patch('temboardagent.cli.pdb.post_mortem')
@cli
def main(argv, environ):
raise KeyError('name')
with pytest.raises(SystemExit) as ei:
main(environ=dict(DEBUG='y'))
assert 1 == ei.value.code
assert pm.called is True
def test_bootstrap(mocker):
mocker.patch('temboardagent.cli.Application.read_file', autospec=True)
mocker.patch('temboardagent.cli.Application.apply_config', autospec=True)
mocker.patch('temboardagent.cli.MergedConfiguration')
from temboardagent.cli import Application, bootstrap
app = Application()
app.config.temboard.configfile = 'pouet'
app.bootstrap(args=None, environ={})
assert repr(app)
app = bootstrap(args=None, environ={})
assert app.apply_config.called is True
def test_apply_config_with_plugins(mocker):
mod = 'temboardagent.cli.'
mocker.patch(mod + 'Postgres', autospec=True)
mocker.patch(mod + 'Application.setup_logging', autospec=True)
cp = mocker.patch(mod + 'Application.create_plugins', autospec=True)
mocker.patch(mod + 'Application.update_plugins', autospec=True)
mocker.patch(mod + 'Application.purge_plugins', autospec=True)
from temboardagent.cli import Application
app = Application()
app.config_sources = dict()
app.config = mocker.Mock(name='config')
app.config.postgresql = dict()
cp.return_value = ['plugin']
app.apply_config()
assert app.postgres
assert app.setup_logging.called is True
assert app.update_plugins.called is True
assert app.purge_plugins.called is True
def test_apply_config_without_plugins(mocker):
mod = 'temboardagent.cli.'
mocker.patch(mod + 'Postgres', autospec=True)
mocker.patch(mod + 'Application.setup_logging', autospec=True)
from temboardagent.cli import Application
app = Application(with_plugins=False)
app.config_sources = dict()
app.config = mocker.Mock(name='config')
app.config.postgresql = dict()
app.apply_config()
assert app.postgres
assert app.setup_logging.called is True
def test_application_specs():
from temboardagent.cli import Application
app = Application()
list(app.bootstrap_specs())
list(app.core_specs())
app = Application(with_plugins=None)
specs = [str(s) for s in app.core_specs()]
assert 'temboard_plugins' not in specs
def test_app_pickle():
from pickle import dumps as pickle, loads as unpickle
from temboardagent.cli import Application
empty_generator = (x for x in [])
orig = Application(specs=empty_generator)
orig.config.update(dict(a=1))
copy = unpickle(pickle(orig))
assert [] == copy.specs
assert copy.config
def test_read_file(mocker):
from temboardagent.cli import Application, UserError
app = Application()
open_ = mocker.patch('temboardagent.cli.open', create=True)
app.read_file(mocker.Mock(name='parser'), 'pouet.conf')
open_.side_effect = IOError()
with pytest.raises(UserError):
app.read_file(mocker.Mock(name='parser'), 'pouet.conf')
def test_reload(mocker):
mocker.patch('temboardagent.cli.Application.read_file', autospec=True)
mocker.patch('temboardagent.cli.Application.apply_config', autospec=True)
from temboardagent.cli import Application
app = Application()
app.config = mocker.Mock(name='config')
app.reload()
def test_fetch_plugin(mocker):
iter_ep = mocker.patch('temboardagent.cli.iter_entry_points')
from temboardagent.cli import Application
app = Application()
ep = mocker.Mock(name='found')
ep.name = 'found'
ep.load.return_value = 'PLUGIN OBJECT'
iter_ep.return_value = [ep]
assert 'PLUGIN OBJECT' == app.fetch_plugin(['found'])
def test_fetch_failing(mocker):
iter_ep = mocker.patch('temboardagent.cli.iter_entry_points')
from temboardagent.cli import Application, UserError
app = Application()
ep = mocker.Mock(name='ep')
ep.load.side_effect = Exception('Pouet')
iter_ep.return_value = [ep]
with pytest.raises(UserError):
app.fetch_plugin('myplugin')
def test_fetch_missing(mocker):
iter_ep = mocker.patch('temboardagent.cli.iter_entry_points')
from temboardagent.cli import Application, UserError
app = Application()
iter_ep.return_value = []
with pytest.raises(UserError):
app.fetch_plugin('myplugin')
def test_create_plugins(mocker):
mocker.patch(
'temboardagent.cli.Application.fetch_plugin', autospec=True)
llp = mocker.patch('temboardagent.cli.load_legacy_plugins', autospec=True)
from temboardagent.cli import Application
app = Application()
app.config = mocker.Mock(name='config')
app.config.temboard.plugins = ['legacy', 'ng']
llp.return_value = dict(legacy=dict())
app.create_plugins()
assert 'legacy' not in app.plugins
assert 'legacy' in app.config.plugins
assert 'ng' in app.plugins
assert 'ng' not in app.config.plugins
def test_update_plugins(mocker):
from temboardagent.cli import Application
app = Application()
unloadme = mocker.Mock(name='unloadme')
old_plugins = dict(unloadme=unloadme)
loadme = mocker.Mock(name='loadme')
app.plugins = dict(loadme=loadme)
app.update_plugins(old_plugins=old_plugins)
assert loadme.load.called is True
assert unloadme.unload.called is True
def test_purge_plugins():
from temboardagent.cli import Application, MergedConfiguration
app = Application()
app.plugins = dict(destroyme=1, keepme=1)
app.config = MergedConfiguration()
app.config.update(dict(temboard=dict(plugins=['keepme'])))
app.purge_plugins()
assert 'destroyme' not in app.plugins
def test_debug_arg():
from argparse import ArgumentParser, SUPPRESS
from temboardagent.cli import define_core_arguments
parser = ArgumentParser(argument_default=SUPPRESS)
define_core_arguments(parser)
args = parser.parse_args([])
assert 'logging_debug' not in args
args = parser.parse_args(['--debug'])
assert args.logging_debug is True
args = parser.parse_args(['--debug', 'myplugin'])
assert 'myplugin' == args.logging_debug
def test_debug_var():
from temboardagent.cli import detect_debug_mode
assert not detect_debug_mode(dict())
assert not detect_debug_mode(dict(DEBUG=b'N'))
env = dict(DEBUG=b'1')
assert detect_debug_mode(env) is True
assert b'__debug__' == env['TEMBOARD_LOGGING_DEBUG']
env = dict(DEBUG=b'mymodule')
assert detect_debug_mode(env)
assert b'mymodule' == env['TEMBOARD_LOGGING_DEBUG']
| 2.109375
| 2
|
battleship/battleship.py
|
johncoleman83/codewars
| 0
|
12784309
|
<reponame>johncoleman83/codewars<gh_stars>0
#!/usr/bin/env python3
def ship_size(row, i):
size = 0
if 0 <= i < 10 and row[i] == 1:
row[i] = 0
size += 1 + ship_size(row, i + 1) + ship_size(row, i - 1)
return size
def check_diagonals(f):
coords = {}
for r in range(10):
for c in range(10):
coords[(r, c)] = f[r][c]
for coord, val in coords.items():
if val == 1:
r, c = coord[0], coord[1]
diagonals = [
coords.get((r - 1, c - 1), 0), coords.get((r - 1, c + 1), 0),
coords.get((r + 1, c - 1), 0), coords.get((r + 1, c + 1), 0)
]
if any(diagonals): return False
return True
def validateBattlefield(f):
if check_diagonals(f) is False: return False
fr = list(zip(*f[::-1]))
for r in range(10): fr[r] = list(fr[r])
ships, subs = [4, 3, 3, 2, 2, 2], 4
while len(ships) > 0:
pre_ships = len(ships) + subs
r = 0
while r < 10:
c = 0
while c < 10:
if f[r][c] == 1:
c_r, c_c = c, 9 - r
count_r = ship_size(f[r].copy(), c)
count_c = ship_size(fr[c_r].copy(), c_c)
count = max(count_r, count_c)
if count_r > 1 and count_c > 1: return False
if count_r == 1 and count_c == 1:
subs -= 1
ship_size(f[r], c)
ship_size(fr[c_r], c_c)
if len(ships) > 0 and count == ships[0]:
if len(ships) > 0: del ships[0]
else: return False
ship_size(f[r], c)
ship_size(fr[c_r], c_c)
c += 1
r += 1
if pre_ships == len(ships) + subs: return False
if len(ships) == 0 and subs == 0: return True
return False
battleField = [
[1, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 1, 1, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
battleField2 = [
[1, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 1, 0, 0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
battleField3 = [
[1, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 1, 1, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
valid = validateBattlefield(battleField3)
print(valid)
| 3.21875
| 3
|
corehq/warehouse/migrations/0003_userstagingtable.py
|
kkrampa/commcare-hq
| 1
|
12784310
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-29 08:39
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('warehouse', '0002_domainstagingtable_groupstagingtable'),
]
operations = [
migrations.CreateModel(
name='UserStagingTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(max_length=255)),
('username', models.CharField(max_length=150)),
('first_name', models.CharField(max_length=30, null=True)),
('last_name', models.CharField(max_length=30, null=True)),
('email', models.CharField(max_length=255, null=True)),
('doc_type', models.CharField(max_length=100)),
('base_doc', models.CharField(max_length=100)),
('is_active', models.BooleanField()),
('is_staff', models.BooleanField()),
('is_superuser', models.BooleanField()),
('last_login', models.DateTimeField(null=True)),
('date_joined', models.DateTimeField()),
('user_last_modified', models.DateTimeField(null=True)),
],
options={
'abstract': False,
},
),
]
| 1.75
| 2
|
setup.py
|
KyleKing/not-on-pypi
| 0
|
12784311
|
"""Setup File."""
from pathlib import Path
from setuptools import setup
from setuptools.command.install import install
# ✓ PACKAGE_NAME = 'common_app'
# ✓ PACKAGE_NAME = 'cn_smtp_sink_server'
# ✓ PACKAGE_NAME = 'common_bootstrap'
# ✓ PACKAGE_NAME = 'common_dash'
# ✓ PACKAGE_NAME = 'common_img'
# ✓ PACKAGE_NAME = 'common_inst'
# ✓ PACKAGE_NAME = 'common_notifier'
# ✓ PACKAGE_NAME = 'common_prstub'
# ✓ PACKAGE_NAME = 'common_tracker'
PACKAGE_NAME = None
"""Modify the package name here which is to be seen on PyPi."""
VERSION = '0.0.0a1'
AUTHOR = '<NAME>'
AUTHOR_EMAIL = '<EMAIL>'
package_init = Path(PACKAGE_NAME).resolve() / '__init__.py'
package_init.parent.mkdir(exist_ok=True)
package_init.write_text('"""Do nothing."""\n')
# --------------------------------------------------------------------------------------
class WrongPackageInstalledError(RuntimeError):
"""More specific error."""
pass
class RaiseErrorPreInstall(install):
"""Customized setuptools install command - prints a friendly greeting."""
def run(self):
raise WrongPackageInstalledError(f"""
\n\n
'{PACKAGE_NAME}' was downloaded from the public pypi.org repository, but is only available on an internal repository
Please update your installer's configuration and download from the proper index-url
\n\n
""")
if __name__ == '__main__':
setup(
name=PACKAGE_NAME,
version=VERSION,
packages=[PACKAGE_NAME],
description = 'Reserved package name',
long_description = Path('README.md').read_text(),
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url='https://github.com/KyleKing/not-on-pypi',
license = 'Unlicense',
classifiers=['License :: Public Domain'],
cmdclass={
'install': RaiseErrorPreInstall,
},
# repository = 'https://github.com/KyleKing/not-on-pypi',
# documentation = 'https://github.com/KyleKing/not-on-pypi',
# 'Bug Tracker' = 'https://github.com/KyleKing/not-on-pypi/issues',
# include = [ 'LICENSE.md',],
# scripts = [],
)
| 2.546875
| 3
|
src/coalescenceml/cli/hidden.py
|
bayoumi17m/CoalescenceML
| 1
|
12784312
|
<gh_stars>1-10
import time
import click
from coalescenceml.cli.cli import cli
import coalescenceml.cli.utils as cli_utils
from coalescenceml.constants import console
from coalescenceml.logger import get_logger
@cli.command(name="over", help="Funny joke...hehe", hidden=True)
def print_deez_nutz_joke()->None:
deez_nutz = r"""
- -
_/-\_ _/-\_
.-`-:-:-`-. _____ ______ ______ ______ _ _ _ _ _______ ______ .-`-:-:-`-.
/-:-:-:-:-:-\ | __ \ | ____| | ____| |___ / | \ | | | | | | |__ __| |___ / /-:-:-:-:-:-\
\:-:-:-:-:-:/ | | | | | |__ | |__ / / | \| | | | | | | | / / \:-:-:-:-:-:/
|` `| | | | | | __| | __| / / | . ` | | | | | | | / / |` `|
| | | |__| | | |____ | |____ / /__ | |\ | | |__| | | | / /__ | |
`\ /` |_____/ |______| |______| /_____| |_| \_| \____/ |_| /_____| `\ /`
`-._.-' `-._.-'"""
max_width = console.width
deez_nutz_width = console.measure(deez_nutz).maximum
styling = "bold cyan"
console.print("coml over...", style="cyan", justify="left")
with console.status("", spinner="monkey"):
time.sleep(5)
if deez_nutz_width >= max_width:
console.print(deez_nutz, style=styling, overflow="crop")
console.rule(f"")
else:
width_buffer = int((max_width - deez_nutz_width) / (2))
deez_nutz = "\n".join([
((" "* width_buffer) + line) for line in deez_nutz.split("\n")
])
console.print(deez_nutz, style=styling)
console.rule("")
| 1.945313
| 2
|
Domains/Python/04 - Sets/No Idea!/solution.py
|
abhinavgunwant/hackerrank-solutions
| 1
|
12784313
|
n,m = [int(i) for i in input().split()]
arr = [int(i) for i in input().split()]
A = set([int(i) for i in input().split()])
B = set([int(i) for i in input().split()])
happiness = 0
for i in arr:
if i in A:
happiness += 1
elif i in B:
happiness -= 1
print(happiness)
| 2.96875
| 3
|
preprocessing/hdf5/opflow_hdf5.py
|
sbrodeur/CREATE-dataset
| 0
|
12784314
|
<filename>preprocessing/hdf5/opflow_hdf5.py<gh_stars>0
#!/usr/bin/env python
# Copyright (c) 2018, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import os
import logging
import numpy as np
import cv2
import scipy.signal
import matplotlib.pyplot as plt
from h5utils import Hdf5Dataset
from optparse import OptionParser
logger = logging.getLogger(__name__)
def is_cv2():
import cv2 as lib
return lib.__version__.startswith("2.")
def is_cv3():
import cv2 as lib
return lib.__version__.startswith("3.")
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
class FlowPlotter(object):
def __init__(self, x, y, scale):
h, w = x.shape
self.x = x
self.y = y
# NOTE: the y-axis needs to be inverted to be in image-coordinates
fig = plt.figure(figsize=(20,10), facecolor='white')
ax = fig.add_subplot(121)
q = ax.quiver(x, y, np.zeros((h,w)), np.zeros((h,w)), edgecolor='k', scale=1, angles='xy', scale_units='xy')
ax.invert_yaxis()
plt.axis('off')
self.fig = fig
self.ax = ax
self.q = q
ax2 = fig.add_subplot(122)
m = ax2.imshow(np.zeros((int(h/scale), int(w/scale))), vmin = 0, vmax = 255, cmap = plt.get_cmap('gray'))
plt.axis('off')
self.ax2 = ax2
self.m = m
plt.ion()
def update(self, flow, img):
# NOTE: the y-axis needs to be negated to be in image-coordinates
self.q.set_UVC(flow[:,:,0], -flow[:,:,1])
self.m.set_data(img)
self.fig.canvas.draw()
def computeSparseFlow(img, imgPrev, gridShape, border):
# Define the fixed grid where optical flow is calculated
h, w = img.shape[:2]
y, x = np.meshgrid(np.linspace(border * h, (1.0 - border) * h, gridShape[0], dtype=np.int),
np.linspace(border * w, (1.0 - border) * w, gridShape[1], dtype=np.int),
indexing='ij')
p0 = np.stack((y,x), axis=-1).astype(np.float32)
# Calculate optical flow
if is_cv2():
p1, _, _ = cv2.calcOpticalFlowPyrLK(imgPrev, img, p0.reshape((-1,2)), None, winSize=(63,63), maxLevel=5)
else:
p1, _, _ = cv2.calcOpticalFlowPyrLK(imgPrev, img, p0.reshape((-1,2)), None, winSize=(63,63), maxLevel=5)
flow = np.zeros((h, w, 2), dtype=np.float32)
flow[y, x] = (p1.reshape(p0.shape) - p0)
# TODO: test if all valid with p1[st==1] ?
f = flow[y, x]
return f, x, y
def computeCompactFlow(img, imgPrev, gridShape, border, filtering=False, filterSize=16):
# Calculate optical flow
if is_cv2():
flow = cv2.calcOpticalFlowFarneback(imgPrev, img, pyr_scale=0.5, levels=5, winsize=63, iterations=3, poly_n=7, poly_sigma=1.5, flags=cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
else:
flow = np.zeros((img.shape[0], img.shape[1], 2), dtype=np.float32)
flow = cv2.calcOpticalFlowFarneback(imgPrev, img, flow, pyr_scale=0.5, levels=5, winsize=63, iterations=3, poly_n=7, poly_sigma=1.5, flags=cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
flow = np.array(flow, np.float32)
h, w = flow.shape[:2]
y, x = np.meshgrid(np.linspace(border * h, (1.0 - border) * h, gridShape[0], dtype=np.int),
np.linspace(border * w, (1.0 - border) * w, gridShape[1], dtype=np.int),
indexing='ij')
if filtering:
kernel = np.ones((filterSize, filterSize)) / float(filterSize * filterSize)
flow = np.stack([scipy.signal.convolve2d(flow[:,:,k], kernel, mode='same', boundary='symm')
for k in range(flow.shape[-1])], axis=-1)
f = flow[y, x]
return f, x, y
def preprocessImage(img, scale=0.5):
imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imggray = clahe.apply(imggray)
imggray = np.array(imggray * 255, dtype=np.uint8)
imggray = cv2.resize(imggray, None,fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
return imggray
def processImageSequence(raw, clock, shape, scale=0.25, gridShape=(12,16), border=0.1, filtering=True, filterSize=5, visualize=False, stepSize=1):
if visualize:
plotter = None
flows = []
for i in range(stepSize, raw.shape[0]):
logger.info('Processing image %d (%d total)' % (i+1, raw.shape[0]))
img = cv2.imdecode(raw[i, :shape[i,0]], flags=-1) #CV_LOAD_IMAGE_UNCHANGED
img = preprocessImage(img, scale)
imgPrev = cv2.imdecode(raw[i-stepSize, :shape[0,0]], flags=-1) # CV_LOAD_IMAGE_UNCHANGED
imgPrev = preprocessImage(imgPrev, scale)
flow, x, y = computeCompactFlow(img, imgPrev, gridShape, border, filtering, filterSize)
#flow, x, y = computeSparseFlow(img, imgPrev, gridShape, border)
# Divide by the time interval and scale to get flow in unit of pixels/sec relative to the full-size image
dt = clock[i] - clock[i-stepSize]
assert dt > 0
flow /= (dt * scale)
if visualize:
if plotter is None:
plotter = FlowPlotter(x, y, scale)
plotter.update(flow, img)
plt.show()
flows.append(flow)
flows = np.stack(flows, axis=0)
clock = clock[stepSize:]
# # Temporal filtering
# if filtering:
# kernel = np.ones((filterSize,)) / float(filterSize)
# for i in range(flow.shape[0]):
# for j in range(flow.shape[1]):
# flow[i,j,:] = scipy.signal.convolve(flow[i,j,:], kernel, mode='same')
return flows, clock
def main(args=None):
parser = OptionParser()
parser.add_option("-i", "--input", dest="input", type='string', default=None,
help='specify the path of the input HDF5 file')
parser.add_option("-o", "--output", dest="output", type='string', default=None,
help='specify the path of the output HDF5 file')
parser.add_option("-s", "--scale", dest="scale", type='float', default=0.5,
help='specify the rescaling of images')
parser.add_option("-b", "--border", dest="border", type='float', default=0.1,
help='specify the border to keep')
parser.add_option("-f", "--filter-size", dest="filterSize", type='int', default=16,
help='specify the size of the smoothing filter')
parser.add_option("-m", "--step-size", dest="stepSize", type='int', default=1,
help='specify the number of frame between optical flow calculation')
parser.add_option("-y", "--grid-shape-y", dest="gridShape_y", type='int', default=12,
help='specify the shape of the optical flow on the y-axis')
parser.add_option("-x", "--grid-shape-x", dest="gridShape_x", type='int', default=16,
help='specify the shape of the optical flow on the x-axis')
parser.add_option("-t", "--filtering", action="store_true", dest="filtering", default=False,
help='specify to visualize computed optical flow')
parser.add_option("-v", "--visualize", action="store_true", dest="visualize", default=False,
help='specify to visualize computed optical flow')
(options,args) = parser.parse_args(args=args)
inputDatasetPath = os.path.abspath(options.input)
logger.info('Using input HDF5 dataset file: %s' % (inputDatasetPath))
outputDatasetPath = os.path.abspath(options.output)
logger.info('Using output HDF5 dataset file: %s' % (outputDatasetPath))
with Hdf5Dataset(outputDatasetPath, mode='w') as outHdf5:
with Hdf5Dataset(inputDatasetPath, mode='r') as inHdf5:
# Process states
for state in inHdf5.getAllStates():
name, group, raw, clock, shape = state
# Write original data to output file
if group is not None:
ngroup = '/' + group
else:
ngroup = ''
fs = 1.0/np.mean(clock[1:] - clock[:-1])
logger.info('Writing to output HDF5 dataset file: %s, shape=%s, fs=%f Hz' % (ngroup + '/' + name, str(raw.shape), fs))
outHdf5.addStates(name, raw, clock, group, shape)
if group == 'video':
rawFlow, clockFlow = processImageSequence(raw, clock, shape,
scale=options.scale, gridShape=(options.gridShape_y, options.gridShape_x), border=options.border,
filtering=options.filtering, filterSize=options.filterSize,
visualize=options.visualize, stepSize=options.stepSize)
groupFlow = 'optical_flow'
# Write original data to output file
fsFlow = 1.0/np.mean(clockFlow[1:] - clockFlow[:-1])
logger.info('Writing to output HDF5 dataset file: %s, shape=%s, fs=%f Hz' % ('/' + groupFlow + '/' + name, str(rawFlow.shape), fsFlow))
outHdf5.addStates(name, rawFlow, clockFlow, groupFlow, shape=None)
logger.info('All done.')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
| 1.523438
| 2
|
gethouse/urls.py
|
Alvin-21/patanyumba
| 0
|
12784315
|
from django.urls import path, re_path
from . import views
urlpatterns = [
re_path(r'^$', views.index, name='homepage'),
re_path(r'^ajax/subscription/$', views.subscription, name='subscription'),
re_path(r'^search/', views.search_results, name='search_results'),
re_path(r'^accomodation/(\d+)', views.accomodation, name='accomodation'),
re_path(r'^new/accomodation$', views.new_accom, name='new_accomodation'),
re_path(r'^profile/(\d+)$', views.profile, name='profile'),
re_path(r'^edit/profile', views.edit_profile, name='edit_profile'),
re_path(r'^api/accomodations$', views.AccomodationList.as_view()),
re_path(r'api/accomodation/accom-id/(\d+)$',
views.AccomodationDetails.as_view()),
re_path(r'^api/profiles$', views.ProfileList.as_view()),
re_path(r'^api/profile/profile-id/(\d+)$', views.ProfileDetails.as_view()),
]
| 1.890625
| 2
|
pyble/const/characteristic/body_sensor_location.py
|
bgromov/PyBLEWrapper
| 14
|
12784316
|
NAME="Body Sensor Location"
UUID=0x2A38
| 1.070313
| 1
|
libs/gradiusnlp/old/gradius_nlp.py
|
gradiuscypher/internetmademe
| 0
|
12784317
|
import praw
import elasticsearch
import string
import random
from nltk import word_tokenize, pos_tag
class GradiusNlp:
def __init__(self):
self.elastic = elasticsearch.Elasticsearch()
def get_reddit_data(self, subreddit, post_count):
r = praw.Reddit(user_agent="https://github.com/gradiuscypher/internetmademe")
sub = r.get_subreddit(subreddit)
top = sub.get_top_from_day(limit=post_count)
#process post_count top posts
for post in top:
comments = post.comments
#process top comment
for c in comments:
if type(c) is praw.objects.Comment:
tokens = word_tokenize(c.body)
tagged_tokens = pos_tag(tokens)
for tag in tagged_tokens:
print(tag)
if not tag[1] in string.punctuation:
es_index = tag[1].lower()
q_txt = 'word: ' + '"' + tag[0].lower() + '"'
if self.elastic.indices.exists(es_index):
if not (self.elastic.search(index=es_index, q=q_txt)['hits']['total'] > 0):
self.elastic.index(index=es_index, doc_type='word', body={'word': tag[0].lower()})
else:
self.elastic.index(index=es_index, doc_type='word', body={'word': tag[0].lower()})
#process comment replies one tree down
for r in c.replies:
if type(r) is praw.objects.Comment:
tokens = word_tokenize(r.body)
tagged_tokens = pos_tag(tokens)
for tag in tagged_tokens:
print(tag)
if not tag[1] in string.punctuation:
es_index = tag[1].lower()
q_txt = 'word: ' + '"' + tag[0].lower() + '"'
if self.elastic.indices.exists(es_index):
if not (self.elastic.search(index=es_index, q=q_txt)['hits']['total'] > 0):
self.elastic.index(index=es_index, doc_type='word', body={'word': tag[0].lower()})
else:
self.elastic.index(index=es_index, doc_type='word', body={'word': tag[0].lower()})
def reform_sentence(self, sentence, force_replace_count=1):
sentence_tokens = word_tokenize(sentence)
replace_count = random.randint(force_replace_count, len(sentence_tokens))
print(replace_count)
#Ensure at least force_replace_count words are being replaced
for x in range(0, replace_count):
tagged_tokens = pos_tag(sentence_tokens)
choice = random.choice(tagged_tokens)
while choice[0] in string.punctuation:
choice = random.choice(tagged_tokens)
new_word = self.replace_pos(choice)
sentence_tokens[sentence_tokens.index(choice[0])] = new_word
return ' '.join(sentence_tokens)
def replace_pos(self, pos_tuple):
es_index = pos_tuple[1].lower()
results = self.elastic.search(index=es_index, body={"query": {
"function_score": {
"query": {"wildcard": {"word": "*"}},
"random_score": {}
}}})
return random.choice(results['hits']['hits'])['_source']['word']
| 2.78125
| 3
|
reply.py
|
Asdvamp/CEH-python-Scripts
| 0
|
12784318
|
<reponame>Asdvamp/CEH-python-Scripts<filename>reply.py<gh_stars>0
while True:
message = input("Send -> ")
with open("log.txt", "w") as file:
file.write(message)
| 2.484375
| 2
|
insect/models/__init__.py
|
Kradukman/beesUlb
| 0
|
12784319
|
from . import super_family
from . import family
from . import sub_family
from . import tribe
from . import genus
from . import specie
from . import sub_specie
from . import wizard
| 1.28125
| 1
|
nanopores/py4gmsh/__init__.py
|
mitschabaude/nanopores
| 8
|
12784320
|
from basic import *
from extra import *
| 1.28125
| 1
|
examples/traveltime_straight_channel.py
|
wrightky/dorado
| 19
|
12784321
|
"""Example case for particle travel times in a straight channel."""
import numpy as np
import matplotlib.pyplot as plt
import dorado.particle_track as pt
# fix the random seed so it stays the same as weights change
np.random.seed(1)
# create synthetic domain and flow field
domain = np.zeros((100, 50))
depth = np.zeros_like(domain)
stage = np.zeros_like(domain)
u = np.zeros_like(domain)
v = np.zeros_like(domain)
dx = 50.
Np_tracer = 500
seed_xloc = [10]
seed_yloc = [25]
# set up straight channel
depth[:, 10:40] = 1.0
stage[:, 10:40] = 1.0
v[:, 10:40] = -10.0
# choose number of iterations for particle to route
num_iter = 100
# define your 'known' or 'expected' travel time for this simple geometry
# picking expected time from location x=10 to x=70
# (really the boundary of row 70, so 1/2 a cell)
# 59.5 cells * 50 m/cell / 10 m/s = 297.5 seconds
target_row = 70
expected_time = 297.5
# assign particle parameters
params = pt.modelParams()
params.depth = depth
params.stage = stage
params.u = u
params.v = v
params.dx = dx
# set-up figure
plt.figure()
plt.imshow(np.sqrt(u**2 + v**2))
plt.colorbar()
plt.scatter(seed_yloc, seed_xloc, c='k', marker='o', s=5)
# plot the target line where time is measured
plt.plot(np.linspace(0, 50, 100), np.ones(100)*target_row, c='red')
plt.title('Velocity Field')
plt.legend(labels=['Target Row to Measure Times',
'Particle Seeding Location'],
loc='best')
plt.tight_layout()
plt.show()
# do the routing twice, once without any diffusivity added to the travel times
# (diff_coeff==0) then a second time with significant diffusion (diff_coeff==1)
for dc in list(range(0, 2)):
# set diff_coeff
if dc == 0:
params.diff_coeff = 0.0
else:
params.diff_coeff = 1.0
# make particle
particle = pt.Particles(params)
# walk it
particle.generate_particles(Np_tracer, seed_xloc, seed_yloc)
for i in list(range(0, num_iter)):
walk_data = particle.run_iteration()
# get travel times associated with particles when they are at coord x=70
# use the exposure_time function to measure this
roi = np.zeros_like(depth, dtype='int')
roi[0:target_row, :] = 1
target_times = pt.exposure_time(walk_data, roi)
# plot histogram
plt.subplot(1, 2, dc+1)
n, bins, _ = plt.hist(target_times, bins=100, range=(200, 400),
histtype='bar', density=True,
color=[0.5, 0.5, 1, 0.5])
# plot expected travel time to row 70
plt.scatter(expected_time, np.max(n),
s=75, c='green', marker='x', linewidths=20)
plt.legend(['Expected Travel Time',
'Histogram of Final Travel Times'], ncol=2,
loc='upper left', bbox_to_anchor=(0.0, -0.06), fontsize=16)
plt.title('Travel Time Distribution at Target Row \n'
'Diffusion Coefficient : ' + str(params.diff_coeff), fontsize=20)
plt.xlabel('Travel Time at Target Row [s]', fontsize=16)
plt.ylabel('Probability Density', fontsize=16)
plt.show()
| 3.078125
| 3
|
descwl_shear_sims/randsphere.py
|
aguinot/descwl-shear-sims
| 5
|
12784322
|
import numpy as np
import esutil as eu
def randcap(*,
rng,
nrand,
ra,
dec,
radius,
get_radius=False,
dorot=False):
"""
Generate random points in a sherical cap
parameters
----------
nrand:
The number of random points
ra,dec:
The center of the cap in degrees. The ra should be within [0,360) and
dec from [-90,90]
radius: float
radius of the cap, same units as ra,dec
get_radius: bool, optional
if true, return radius of each point in radians
dorot: bool
If dorot is True, generate the points on the equator and rotate them to
be centered at the desired location. This is the default when the dec
is within 0.1 degrees of the pole, to avoid calculation issues
"""
# generate uniformly in r**2
if dec >= 89.9 or dec <= -89.9:
dorot = True
if dorot:
tra, tdec = 90.0, 0.0
rand_ra, rand_dec, rand_r = randcap(
rng=rng,
nrand=nrand,
ra=90.0,
dec=0.0,
radius=radius,
get_radius=True,
)
rand_ra, rand_dec = eu.coords.rotate(
0.0,
dec-tdec,
0.0,
rand_ra,
rand_dec,
)
rand_ra, rand_dec = eu.coords.rotate(
ra-tra,
0.0,
0.0,
rand_ra,
rand_dec,
)
else:
rand_r = rng.uniform(size=nrand)
rand_r = np.sqrt(rand_r)*radius
# put in degrees
np.deg2rad(rand_r, rand_r)
# generate position angle uniformly 0, 2*PI
rand_posangle = rng.uniform(low=0, high=2*np.pi, size=nrand)
theta = np.array(dec, dtype='f8', ndmin=1, copy=True)
phi = np.array(ra, dtype='f8', ndmin=1, copy=True)
theta += 90
np.deg2rad(theta, theta)
np.deg2rad(phi, phi)
sintheta = np.sin(theta)
costheta = np.cos(theta)
sinr = np.sin(rand_r)
cosr = np.cos(rand_r)
cospsi = np.cos(rand_posangle)
costheta2 = costheta*cosr + sintheta*sinr*cospsi
np.clip(costheta2, -1, 1, costheta2)
# gives [0,pi)
theta2 = np.arccos(costheta2)
sintheta2 = np.sin(theta2)
cos_dphi = (cosr - costheta*costheta2)/(sintheta*sintheta2)
np.clip(cos_dphi, -1, 1, cos_dphi)
dphi = np.arccos(cos_dphi)
# note fancy usage of where
phi2 = np.where(rand_posangle > np.pi, phi+dphi, phi-dphi)
np.rad2deg(phi2, phi2)
np.rad2deg(theta2, theta2)
rand_ra = phi2
rand_dec = theta2-90.0
eu.coords.atbound(rand_ra, 0.0, 360.0)
if get_radius:
np.rad2deg(rand_r, rand_r)
return rand_ra, rand_dec, rand_r
else:
return rand_ra, rand_dec
def randsphere(rng, num, ra_range=None, dec_range=None):
"""Generate random points on the sphere, possibly on a subset of it.
Routine due to Erin Sheldon.
Parameters
----------
num: integer
The number of randoms to generate
ra_range: list, optional
Should be within range [0,360]. Default [0,360]
dec_range: list, optional
Should be within range [-90,90]. Default [-90,90]
Returns
-------
ra : array-like
ra values for the random points
dec : array-like
dec values for the random points
"""
ra_range = _check_range(ra_range, [0.0, 360.0])
dec_range = _check_range(dec_range, [-90.0, 90.0])
ra = rng.uniform(
size=num,
low=ra_range[0],
high=ra_range[1],
)
cosdec_min = np.cos(np.radians(90.0+dec_range[0]))
cosdec_max = np.cos(np.radians(90.0+dec_range[1]))
v = rng.uniform(
size=num,
low=cosdec_min,
high=cosdec_max,
)
np.clip(v, -1.0, 1.0, v)
# Now this generates on [0,pi)
dec = np.arccos(v)
# convert to degrees
np.degrees(dec, dec)
# now in range [-90,90.0)
dec -= 90.0
return ra, dec
def _check_range(rng, allowed):
if rng is None:
rng = allowed
else:
if not hasattr(rng, '__len__'):
raise ValueError("range input does not have len() method")
if rng[0] < allowed[0] or rng[1] > allowed[1]:
raise ValueError("%s should be within %s" % (rng, allowed))
return rng
| 3.375
| 3
|
sandbox/advect_microbes_llc.py
|
ali-ramadhan/lagrangian-microbes
| 6
|
12784323
|
import os
from datetime import datetime, timedelta
import numpy as np
import xarray as xr
import parcels
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import cartopy
import cartopy.util
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
u_filename = '/home/alir/hawaii_npac/0000969408_U_10800.8150.1_1080.3720.90'
v_filename = '/home/alir/hawaii_npac/0000969408_V_10800.8150.1_1080.3720.90'
level = 0
with open(u_filename, 'rb') as f:
nx, ny = 1080, 3720 # parse; advance file-pointer to data segment
record_length = 4 # [bytes]
f.seek(level * record_length * nx*ny, os.SEEK_SET)
u_data = np.fromfile(f, dtype='>f4', count=nx*ny)
u_array = np.reshape(u_data, [ny, nx], order='F')
with open(v_filename, 'rb') as f:
nx, ny = 1080, 3720 # parse; advance file-pointer to data segment
record_length = 4 # [bytes]
f.seek(level * record_length * nx*ny, os.SEEK_SET)
v_data = np.fromfile(f, dtype='>f4', count=nx*ny)
v_array = np.reshape(v_data, [ny, nx], order='F')
u_data = u_array
v_data = v_array
# u_data = np.ma.masked_where(u_array == 0, u_array)
# v_data = np.ma.masked_where(v_array == 0, v_array)
lats = np.arange(ny)/48
lons = np.arange(nx)/48
depth = np.array([0.0])
u_field = parcels.field.Field(name='U', data=u_data,
lon=lons, lat=lats, depth=depth, mesh='spherical')
v_field = parcels.field.Field(name='V', data=v_data,
lon=lons, lat=lats, depth=depth, mesh='spherical')
u_magnitude = np.sqrt(u_data*u_data + v_data*v_data)
fieldset = parcels.fieldset.FieldSet(u_field, v_field)
# fieldset.U.show()
lats_pset = np.tile(np.linspace(5, 70, 11), 11)
lons_pset = np.repeat(np.linspace(5, 15, 11), 11)
# species_field = -1 * np.ones((11,11), dtype=np.int32)
# for i, lat in enumerate(np.linspace(10, 50, 11)):
# for j, lon in enumerate(np.linspace(-170, -130, 11)):
# pass
# species_pfield = parcels.field.Field(name='species', data=species_field,
# lat=np.linspace(10, 50, 11), lon=np.linspace(-170, -130, 11), depth=depth, mesh='spherical')
class MicrobeParticle(parcels.JITParticle):
species = parcels.Variable('species', dtype=np.int32, initial=-1)
pset = parcels.ParticleSet.from_list(fieldset=fieldset, pclass=MicrobeParticle,
lon=lons_pset, lat=lats_pset)
for i, particle in enumerate(pset):
if 37.5 <= particle.lat <= 52.5 and -172.5 <= particle.lon <= -157.5:
particle.species = 1
elif 37.5 <= particle.lat <= 52.5 and -157.5 <= particle.lon <= -142.5:
particle.species = 2
elif 37.5 <= particle.lat <= 52.5 and -142.5 <= particle.lon <= -127.5:
particle.species = 3
elif 22.5 <= particle.lat <= 37.5 and -172.5 <= particle.lon <= -157.5:
particle.species = 3
elif 22.5 <= particle.lat <= 37.5 and -157.5 <= particle.lon <= -142.5:
particle.species = 1
elif 22.5 <= particle.lat <= 37.5 and -142.5 <= particle.lon <= -127.5:
particle.species = 2
elif 7.5 <= particle.lat <= 22.5 and -172.5 <= particle.lon <= -157.5:
particle.species = 2
elif 7.5 <= particle.lat <= 22.5 and -157.5 <= particle.lon <= -142.5:
particle.species = 3
elif 7.5 <= particle.lat <= 22.5 and -142.5 <= particle.lon <= -127.5:
particle.species = 1
particle.species = 1
print("Particle {:03d} @({:.2f},{:.2f}) [species={:d}]".format(i, particle.lat, particle.lon, particle.species))
def rock_paper_scissors_type(n):
if n == 1:
return "rock"
elif n == 2:
return "paper"
elif n == 3:
return "scissors"
return None
vector_crs = ccrs.PlateCarree()
land_50m = cartopy.feature.NaturalEarthFeature('physical', 'land', '50m',
edgecolor='face',facecolor='dimgray', linewidth=0)
t = datetime(2017, 1, 1)
dt = timedelta(hours=1)
for n in range(1):
print("Advecting: {:} -> {:}".format(t, t+dt))
nc_filename = "advected_microbes_" + str(n).zfill(4) + ".nc"
pset.execute(parcels.AdvectionRK4, runtime=dt, dt=dt, verbose_progress=True,
output_file=pset.ParticleFile(name=nc_filename, outputdt=dt))
# print("Computing microbe interactions...")
# N = len(pset)
# for i, p1 in enumerate(pset):
# for j, p2 in enumerate(pset[i+1:]):
# if np.abs(p1.lat - p2.lat) < 1 and np.abs(p1.lon - p2.lon) < 1:
# p1_type = rock_paper_scissors_type(p1.species)
# p2_type = rock_paper_scissors_type(p2.species)
# winner = None
# if p1_type == "rock" and p2_type == "scissors":
# winner = p1
# elif p1_type == "rock" and p2_type == "paper":
# winner = p2
# elif p1_type == "paper" and p2_type == "rock":
# winner = p1
# elif p1_type == "paper" and p2_type == "scissors":
# winner = p2
# elif p1_type == "scissors" and p2_type == "rock":
# winner = p2
# elif p1_type == "scissors" and p2_type == "paper":
# winner = p1
# else:
# winner = None
# if winner == p1:
# p2.species = p1.species
# print("[{:s}#{:d}] @({:.2f}, {:.2f}) vs. [{:s}#{:d}] @({:.2f}, {:.2f}): #{:d} wins!"
# .format(p1_type, i, p1.lat, p1.lon, p2_type, j+i, p2.lat, p2.lon, i))
# elif winner == p2:
# p1.species = p2.species
# print("[{:s}#{:d}] @({:.2f}, {:.2f}) vs. [{:s}#{:d}] @({:.2f}, {:.2f}): #{:d} wins!"
# .format(p1_type, i, p1.lat, p1.lon, p2_type, j+i, p2.lat, p2.lon, j+i))
# for i, p in enumerate(pset):
# if p.lat >= 59 or p.lat <= 1 or p.lon <= -179 or p.lon >= -121:
# print("Removing particle #{:d} @({:.2f},{:.2f}). Too close to boundary"
# .format(i, p.lat, p.lon))
# pset.remove(i)
t = t+dt
print("Plotting figure...")
fig = plt.figure(figsize=(16, 9))
matplotlib.rcParams.update({'font.size': 10})
crs_sps = ccrs.PlateCarree(central_longitude=-150)
crs_sps._threshold = 1000.0 # This solves https://github.com/SciTools/cartopy/issues/363
ax = plt.subplot(111, projection=crs_sps)
ax.add_feature(land_50m)
ax.set_extent([0, 22.5, 0, 77.5], ccrs.PlateCarree())
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1, color='black',
alpha=0.8, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlocator = mticker.FixedLocator([0, 7.5, 15, 22.5])
gl.ylocator = mticker.FixedLocator([0, 15.5, 31, 46.5, 62, 77.5])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
im = ax.pcolormesh(lons, lats, u_magnitude, transform=vector_crs, vmin=0, vmax=1, cmap='Blues_r')
clb = fig.colorbar(im, ax=ax, extend='max', fraction=0.046, pad=0.1)
clb.ax.set_title(r'm/s')
rock_lats, rock_lons = [], []
paper_lats, paper_lons = [], []
scissors_lats, scissors_lons = [], []
for microbe in pset:
if microbe.species == 1:
rock_lats.append(microbe.lat)
rock_lons.append(microbe.lon)
elif microbe.species == 2:
paper_lats.append(microbe.lat)
paper_lons.append(microbe.lon)
elif microbe.species == 3:
scissors_lats.append(microbe.lat)
scissors_lons.append(microbe.lon)
# ax.plot(rock_lons, rock_lats, marker='o', linestyle='', color='red', ms=4, label='Rocks', transform=vector_crs)
# ax.plot(paper_lons, paper_lats, marker='o', linestyle='', color='lime', ms=4, label='Papers', transform=vector_crs)
# ax.plot(scissors_lons, scissors_lats, marker='o', linestyle='', color='cyan', ms=4, label='Scissors', transform=vector_crs)
plt.title(str(t))
ax.legend()
# plt.show()
png_filename = "advected_microbes_" + str(n).zfill(4) + ".png"
print("Saving figure: {:s}".format(png_filename))
plt.savefig(png_filename, dpi=300, format='png', transparent=False)
plt.close('all')
| 1.929688
| 2
|
project_euler/051-100/75.py
|
floppp/programming_challenges
| 0
|
12784324
|
'''
It turns out that 12 cm is the smallest length of wire that can be bent to
form an integer sided right angle triangle in exactly one way, but there are
many more examples.
12 cm: (3,4,5)
24 cm: (6,8,10)
30 cm: (5,12,13)
36 cm: (9,12,15)
40 cm: (8,15,17)
48 cm: (12,16,20)
In contrast, some lengths of wire, like 20 cm, cannot be bent to form an
integer sided right angle triangle, and other lengths allow more than one
solution to be found; for example, using 120 cm it is possible to form
exactly three different
integer sided right angle triangles.
120 cm: (30,40,50), (20,48,52), (24,45,51)
Given that L is the length of the wire, for how many values of L ≤ 1,500,000
can exactly one integer sided right angle triangle be formed?
'''
from time import time
from math import sqrt
def find_sides(l):
o = c = h = 0
hay_resultado = False
for i in range(l//2 - 1, l//3, -1):
h = i
hs = i ** 2
limit = (l - h) // 2
for j in range(i - 1, limit, -1):
o = j
os = j ** 2
c = l - h - o
cs = c ** 2
if cs + os == hs:
if hay_resultado:
return False
hay_resultado = True
if hay_resultado:
return 1
return 0
def pytagoric_triplet(l):
a = b = c = 0
n = int(sqrt(l)) + 1
m = 2*n / 3
for i in range(1, m):
for j in range(i+1, n):
if a + b + c != l:
a = j*j - i*i
b = 2*j*i
c = j*j + i*i
else:
return 1
return 0
if __name__ == "__main__":
start = time()
res = 0
N = 150000
pytagoric_triplet(36)
for i in range(2, N+1):
res += pytagoric_triplet(i)
print res, 'ternas primitivas'
# for i in range(2, N+1, 2):
# res += find_sides(i)
# print res
print 'total time =', (time() - start), 's'
| 4.28125
| 4
|
src/pyasys/__init__.py
|
heewoonkim2020/pyasys
| 0
|
12784325
|
<filename>src/pyasys/__init__.py
"""
The official Pyasys Python library!
Please check README.md for more information on how to set up Pyasys for your next project.
"""
class Pyasys:
def __init__(self, import_name, author):
"""
Sets up a basic project class where project data can be stored.
:param import_name: The __name__ of the import structure.
:param author: The author of the project. Used for project directory handling and project attribute handling.
"""
# Setup
self.setup_project(project_automation=True, author=author, import_dir=import_name)
def setup_project(self, project_automation, author, import_dir):
# Prepare for project
self.project_automation = project_automation
self.author = author
self.import_dir = import_dir
self.data = {}
def get_attribute(self, keyname):
"""
:param keyname: The key name of the attribute to find.
"""
if keyname in self.data:
return self.data[keyname]
else:
return None
| 3
| 3
|
quilldelta/types.py
|
mariocesar/quill-delta-py
| 2
|
12784326
|
<filename>quilldelta/types.py
from collections import namedtuple
from functools import partial
from typing import Any, Dict, Union
from quilldelta import utils as _
__all__ = ['Insert', 'Retain', 'Delete', 'OperationType',
'is_retain', 'is_insert', 'is_delete',
'it_insert_text', 'load_operation']
def _sum_operation(instance, other):
type_op = type(instance)
type_other = type(other)
if type(other) != type_op:
raise ValueError(f'Operations are not the same type '
f'{type_op.__name__} != {type_other}')
if hasattr(instance, 'attributes'):
instance_attr = instance.attributes if instance.attributes else None
other_attr = other.attributes if other.attributes else None
if instance_attr != other_attr:
raise ValueError("Can't sum operations with different attributes")
return type_op(instance.value + other.value, other_attr)
else:
return type_op(instance.value + other.value)
class Insert(namedtuple('Insert', 'value, attributes')):
__slots__ = ()
__str__ = _.instance_as_json
__add__ = _sum_operation
as_data = _.instance_as_dict
as_json = _.instance_as_json
@classmethod
def fromdict(cls, data):
data.setdefault('attributes', None)
return _.dict_to_class(cls, data)
@property
def length(self):
if isinstance(self.value, str):
return len(self.value)
return 1
class Retain(namedtuple('Retain', 'value, attributes')):
__slots__ = ()
__str__ = _.instance_as_json
__add__ = _sum_operation
as_data = _.instance_as_dict
as_json = _.instance_as_json
@classmethod
def fromdict(cls, data: dict):
data.setdefault('attributes', None)
return _.dict_to_class(cls, data)
@property
def length(self):
return self.value
@length.setter
def length(self, val: int):
assert isinstance(val, int)
self.value = val
class Delete(namedtuple('Delete', 'value')):
__slots__ = ()
__str__ = _.instance_as_json
__add__ = _sum_operation
as_data = _.instance_as_dict
as_json = _.instance_as_json
@classmethod
def fromdict(cls, data: dict):
return _.dict_to_class(cls, data)
@property
def length(self):
return self.value
@length.setter
def length(self, val: int):
assert isinstance(val, int)
self.value = val
OperationType = Union[Insert, Retain, Delete, Dict]
def load_operation(data: OperationType):
if isinstance(data, (Insert, Retain, Delete)):
return data
elif isinstance(data, Dict):
if 'insert' in data:
return Insert.fromdict(data)
elif 'retain' in data:
return Retain.fromdict(data)
elif 'delete' in data:
return Delete.fromdict(data)
raise ValueError('Unknown operation for %s' % data)
def _isinstance(op: Any, class_or_tuple):
return isinstance(op, class_or_tuple)
is_insert = partial(_isinstance, class_or_tuple=Insert)
is_retain = partial(_isinstance, class_or_tuple=Retain)
is_delete = partial(_isinstance, class_or_tuple=Delete)
def it_insert_text(op: Any):
return is_insert(op) and isinstance(op.value, str)
| 2.3125
| 2
|
03_ThreeWayAndTkinter/main.py
|
faizovboris/PythonDevelopment2021
| 0
|
12784327
|
<reponame>faizovboris/PythonDevelopment2021
import random
import tkinter as tk
import tkinter.messagebox
class Game():
def __init__(self):
self.root = tk.Tk();
self.root.title("Game 15")
self.root.rowconfigure(0, weight=1)
self.root.columnconfigure(0, weight=1)
self.frame = tk.Frame(self.root)
self.frame.grid(column=0, row=0, sticky=tk.NSEW)
self.new_button = tk.Button(self.frame, text="New", command=self.new_game)
self.new_button.grid(column=0, row=0, columnspan=2, sticky="")
self.exit_button = tk.Button(self.frame, text="Exit", command=self.stop_game)
self.exit_button.grid(column=2, row=0, columnspan=2, sticky="")
self.game_buttons = [None]
for i in range(1, 16):
self.game_buttons.append(tk.Button(self.frame, text=str(i), command=self.make_move(i)))
for i in range(4):
self.frame.grid_rowconfigure(i + 1, weight=1, pad=1, uniform="row_gr")
self.frame.grid_columnconfigure(i, weight=1, pad=1, uniform="col_gr")
self.new_game()
def new_game(self):
self.buttons_order = []
for i in range(1, 16):
self.buttons_order.append(i)
random.shuffle(self.buttons_order)
inv_cnt = 0
for i in range(15):
for j in range(i + 1, 15):
inv_cnt += (self.buttons_order[i] > self.buttons_order[j])
if inv_cnt % 2 != 0:
self.swap_buttons(self.buttons_order.index(14), self.buttons_order.index(15))
self.buttons_order.append('Blank')
self.place_buttons()
def swap_buttons(self, i, j):
self.buttons_order[i], self.buttons_order[j] = self.buttons_order[j], self.buttons_order[i]
def place_buttons(self):
for i, button_id in enumerate(self.buttons_order):
if button_id == 'Blank':
continue
self.game_buttons[button_id].grid(column=i % 4, row=1 + i // 4, columnspan=1, sticky=tk.NSEW)
self.game_buttons[button_id]['background'] = '#00bbbb'
def make_move(self, button_id):
def move_fn():
button_position = self.buttons_order.index(button_id)
button_x = button_position % 4
button_y = button_position // 4
if button_x > 0 and self.buttons_order[button_position - 1] == 'Blank':
self.swap_buttons(button_position, button_position - 1)
elif button_x < 3 and self.buttons_order[button_position + 1] == 'Blank':
self.swap_buttons(button_position, button_position + 1)
elif button_y > 0 and self.buttons_order[button_position - 4] == 'Blank':
self.swap_buttons(button_position, button_position - 4)
elif button_y < 3 and self.buttons_order[button_position + 4] == 'Blank':
self.swap_buttons(button_position, button_position + 4)
self.place_buttons()
self.check_winning()
return move_fn
def check_winning(self):
for i in range(15):
if self.buttons_order[i] != i + 1:
return
tk.messagebox.showinfo(message="You win!")
self.new_game()
return
def stop_game(self):
self.root.destroy()
def start_game(self):
self.root.mainloop()
if __name__ == '__main__':
game = Game()
game.start_game()
| 3.4375
| 3
|
gunicorn.conf.py
|
jience/flask_gunicorn_nginx
| 1
|
12784328
|
<reponame>jience/flask_gunicorn_nginx
"""
gunicorn WSGI server configuration.
https://docs.gunicorn.org/en/latest/index.html
"""
import multiprocessing
loglevel = "debug"
bind = "unix:/tmp/gunicorn.sock"
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = "gevent"
max_requests = 1000
pidfile = "gunicorn.pid"
accesslog = "gunicorn.access.log"
errorlog = "gunicorn.error.log"
| 1.9375
| 2
|
gists/process_data.py
|
snafis/Utils
| 3
|
12784329
|
<gh_stars>1-10
# coding: utf-8
import os
import warnings
import pandas as pd
import numpy as np
import click
import pathlib
from src.utils.io.get_absolute_fpath import get_absolute_fpath
from src.utils.io.python_config_dict import config_dict_from_python_fpath
# Silence C dtype mapping warnings
warnings.filterwarnings("ignore", category=pd.errors.PerformanceWarning)
# Silence Deprecation Warning for click using importlib
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Change to root dir
os.chdir(get_absolute_fpath())
# Script Purpose
#################
# 1) Load in data from a source file and table (e.g. SQL, HDF, cloud etc)
# 2) Load in an sklearn pipeline for ETL transformation via the config file
# specified at --config_path which should be a python file holding necessary
# variables, specifically the ETL pipeline in variable PIPELINE
# 3) Export data to a db and table. If none is specified, use the import variable ones
@click.command()
@click.option('--config_path', default=None)
@click.option('--db_import_path', default=None)
@click.option('--db_input_table', default=None)
@click.option('--db_export_path', default=None)
@click.option('--db_export_table', default=None)
@click.option('--verbose', default=1)
def process_data(config_path: str = None,
db_import_path: str = None,
db_input_table: str = None,
db_export_path: str = None,
db_export_table: str = None,
verbose: int = None
):
"""
Process the data
"""
# Set export variables if none specified to be the same as import (e.g. inplace)
if verbose:
if (not db_export_path) | (not db_export_table):
click.echo('No export variables set. Data ETL will be done in place at the'
f'db and table specified by db at "{db_export_path}" and table "{db_export_table}"')
if not db_export_path:
db_export_path = db_import_path
if not db_export_table:
db_export_table = db_input_table
# Turn string paths into pathlib Paths
db_import_path = pathlib.Path(db_import_path)
db_export_path = pathlib.Path(db_export_path)
config = config_dict_from_python_fpath(config_path)
# Establish database connection
# conn = sqlite3.connect(db_import_path.as_posix())
# Read Data
# df = pd.read_sql(sql=f'SELECT * FROM {db_input_table}', con=conn)
df = pd.read_hdf(db_import_path.as_posix(), db_input_table)
if verbose:
click.echo(f'Reading in data from table {db_input_table} at {db_import_path.as_posix()}')
# Process data with pipeline
pipeline = config['PIPELINE']
transformed_df = pipeline.fit_transform(df)
# Write to DB
# Write each DF into the database, replacing the table if it previously existed
if verbose:
click.echo(f'Final datashape for transformed data is {transformed_df.shape}')
click.echo(f'Compare to original shapes of: {df.shape}')
if verbose:
click.echo(f'Placing data into table "{db_export_table}" in the db at {db_export_path.as_posix()}')
# transformed_df.to_sql(db_export_table, con=conn, if_exists='replace', index=False)
transformed_df.to_hdf(db_export_path.as_posix(), db_export_table)
if __name__ == '__main__':
process_data()
| 2.609375
| 3
|
search.py
|
wifijanitor/Search
| 0
|
12784330
|
<reponame>wifijanitor/Search<filename>search.py<gh_stars>0
#!/usr/bin/env python3
import os
import sys
import re
version = 2.0
part = sys.argv[1]
file = open(os.path.expanduser('~/Cisco/price.txt'))
rgx = re.compile(part, re.I)
for line in file:
if re.search(rgx, line):
print(line)
file.close()
| 2.875
| 3
|
terrainbento/boundary_handlers/not_core_node_baselevel_handler.py
|
alexmitchell/terrainbento
| 18
|
12784331
|
<gh_stars>10-100
# coding: utf8
# !/usr/env/python
"""**NotCoreNodeBaselevelHandler** modifies elevation for not-core nodes."""
import os
import numpy as np
from scipy.interpolate import interp1d
class NotCoreNodeBaselevelHandler(object):
"""Control the elevation of all nodes that are not core nodes.
The **NotCoreNodeBaselevelHandler** controls the elevation of all nodes on
the model grid with ``status != 0`` (i.e., all not-core nodes). The
elevation change at these nodes is specified either as a constant rate, or
through a text file that specifies the elevation change through time.
Through the parameter ``modify_core_nodes`` the user can determine if the
core nodes should be moved in the direction (up or down) specified by the
elevation change directive, or if the non-core nodes should be moved in
the opposite direction.
The **NotCoreNodeBaselevelHandler** expects that ``topographic__elevation``
is an at-node model grid field. It will modify this field as well as
the field ``bedrock__elevation``, if it exists.
Note that **NotCoreNodeBaselevelHandler** increments time at the end of the
**run_one_step** method.
"""
def __init__(
self,
grid,
modify_core_nodes=False,
lowering_rate=None,
lowering_file_path=None,
model_end_elevation=None,
**kwargs
):
"""
Parameters
----------
grid : landlab model grid
modify_core_nodes : boolean, optional
Flag to indicate if the core nodes or the non-core nodes will
be modified. Default is False, indicating that the boundary nodes
will be modified.
lowering_rate : float, optional
Lowering rate of the outlet node. One of ``lowering_rate`` and
``lowering_file_path`` is required. Units are implied by the
model grids spatial scale and the time units of ``step``. Negative
values mean that the outlet lowers.
lowering_file_path : str, optional
Lowering history file path. One of ``lowering_rate``
and `lowering_file_path` is required. Units are implied by
the model grids spatial scale and the time units of ``step``.
This file should be readable with
``np.loadtxt(filename, skiprows=1, delimiter=",")``
Its first column is time and its second colum is the elevation
change at the outlet since the onset of the model run. Negative
values mean the outlet lowers.
model_end_elevation : float, optional
Average elevation of the nodes_to_lower at the end of the model
run duration. When the outlet is lowered based on an
lowering_file_path, a ``model_end_elevation`` can be set such that
lowering is scaled based on the starting and ending outlet
elevation. Default behavior is to not scale the lowering pattern.
Examples
--------
Start by creating a landlab model grid and set its boundary conditions.
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((5, 5))
>>> z = mg.add_zeros("node", "topographic__elevation")
>>> mg.set_closed_boundaries_at_grid_edges(bottom_is_closed=True,
... left_is_closed=True,
... right_is_closed=True,
... top_is_closed=True)
>>> mg.set_watershed_boundary_condition_outlet_id(
... 0, mg.at_node["topographic__elevation"], -9999.)
>>> print(z.reshape(mg.shape))
[[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
Now import the **NotCoreNodeBaselevelHandler** and instantiate.
>>> from terrainbento.boundary_handlers import (
... NotCoreNodeBaselevelHandler)
>>> bh = NotCoreNodeBaselevelHandler(mg,
... modify_core_nodes = False,
... lowering_rate = -0.1)
>>> bh.run_one_step(10.0)
We should expect that the boundary nodes (except for node 0) will all
have lowered by -1.
>>> print(z.reshape(mg.shape))
[[-1. -1. -1. -1. -1.]
[-1. 0. 0. 0. -1.]
[-1. 0. 0. 0. -1.]
[-1. 0. 0. 0. -1.]
[-1. -1. -1. -1. -1.]]
If we wanted instead for all of the non core nodes to change their
elevation, we would set ``modify_core_nodes = True``.
>>> mg = RasterModelGrid((5, 5))
>>> z = mg.add_zeros("node", "topographic__elevation")
>>> mg.set_closed_boundaries_at_grid_edges(bottom_is_closed=True,
... left_is_closed=True,
... right_is_closed=True,
... top_is_closed=True)
>>> mg.set_watershed_boundary_condition_outlet_id(
... 0, mg.at_node["topographic__elevation"], -9999.)
>>> from terrainbento.boundary_handlers import (
... NotCoreNodeBaselevelHandler)
>>> bh = NotCoreNodeBaselevelHandler(mg,
... modify_core_nodes = True,
... lowering_rate = -0.1)
>>> bh.run_one_step(10.0)
>>> print(z.reshape(mg.shape))
[[ 0. 0. 0. 0. 0.]
[ 0. 1. 1. 1. 0.]
[ 0. 1. 1. 1. 0.]
[ 0. 1. 1. 1. 0.]
[ 0. 0. 0. 0. 0.]]
More complex baselevel histories can be provided with a
``lowering_file_path``.
"""
self.model_time = 0.0
self.grid = grid
self.modify_core_nodes = modify_core_nodes
self.z = self.grid.at_node["topographic__elevation"]
# determine which nodes to lower
# based on which are lowering, set the prefactor correctly.
if self.modify_core_nodes:
self.nodes_to_lower = self.grid.status_at_node == 0
self.prefactor = -1.0
else:
self.nodes_to_lower = self.grid.status_at_node != 0
self.prefactor = 1.0
if (lowering_file_path is None) and (lowering_rate is None):
raise ValueError(
(
"NotCoreNodeBaselevelHandler requires one of "
"lowering_rate and lowering_file_path"
)
)
else:
if lowering_rate is None:
# initialize outlet elevation object
if os.path.exists(lowering_file_path):
elev_change_df = np.loadtxt(
lowering_file_path, skiprows=1, delimiter=","
)
time = elev_change_df[:, 0]
elev_change = elev_change_df[:, 1]
model_start_elevation = np.mean(
self.z[self.nodes_to_lower]
)
if model_end_elevation is None:
self.scaling_factor = 1.0
else:
self.scaling_factor = np.abs(
model_start_elevation - model_end_elevation
) / np.abs(elev_change[0] - elev_change[-1])
outlet_elevation = (
self.scaling_factor
* self.prefactor
* elev_change_df[:, 1]
) + model_start_elevation
self.outlet_elevation_obj = interp1d(
time, outlet_elevation
)
self.lowering_rate = None
else:
raise ValueError(
(
"The lowering_file_path provided "
"to NotCoreNodeBaselevelHandler does not "
"exist."
)
)
elif lowering_file_path is None:
self.lowering_rate = lowering_rate
self.outlet_elevation_obj = None
else:
raise ValueError(
(
"Both an lowering_rate and a "
"lowering_file_path have been provided "
"to NotCoreNodeBaselevelHandler. Please provide "
"only one."
)
)
def run_one_step(self, step):
"""Run **NotCoreNodeBaselevelHandler** forward and update elevations.
The **run_one_step** method provides a consistent interface to update
the terrainbento boundary condition handlers.
In the **run_one_step** routine, the **NotCoreNodeBaselevelHandler**
will either lower the closed or raise the non-closed nodes based on
inputs specified at instantiation.
Note that **NotCoreNodeBaselevelHandler** increments time at the end of
the **run_one_step** method.
Parameters
----------
step : float
Duration of model time to advance forward.
"""
# next, lower the correct nodes the desired amount
# first, if we do not have an outlet elevation object
if self.outlet_elevation_obj is None:
# calculate lowering amount and subtract
self.z[self.nodes_to_lower] += (
self.prefactor * self.lowering_rate * step
)
# if bedrock__elevation exists as a field, lower it also
other_fields = [
"bedrock__elevation",
"lithology_contact__elevation",
]
for of in other_fields:
if of in self.grid.at_node:
self.grid.at_node[of][self.nodes_to_lower] += (
self.prefactor * self.lowering_rate * step
)
# if there is an outlet elevation object
else:
# if bedrock__elevation exists as a field, lower it also
# calcuate the topographic change required to match the current
# time"s value for outlet elevation. This must be done in case
# bedrock elevation exists, and must be done before the topography
# is lowered
mean_z = np.mean(self.z[self.nodes_to_lower])
self.topo_change = mean_z - self.outlet_elevation_obj(
self.model_time
)
other_fields = [
"bedrock__elevation",
"lithology_contact__elevation",
]
for of in other_fields:
if of in self.grid.at_node:
self.grid.at_node[of][
self.nodes_to_lower
] -= self.topo_change
# lower topography
self.z[self.nodes_to_lower] -= self.topo_change
# increment model time
self.model_time += step
| 2.515625
| 3
|
new_network/spw_network_new_brian2.py
|
andrisecker/KOKI_sharp_waves
| 0
|
12784332
|
<filename>new_network/spw_network_new_brian2.py
#!/usr/bin/python
# -*- coding: utf8 -*-
"""
creates PC (adExp IF) and BC (exp IF) population in Brian2, loads in recurrent connection matrix for PC population
runs simulation and checks the dynamics
(updated network, parameters are/should be closer to the experimental data!)
authors: <NAME>, <NAME> last update: 11.2017
"""
import os
import sys
from brian2 import *
import numpy as np
import random as pyrandom
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore") # ignore scipy 0.18 sparse matrix warning...
SWBasePath = os.path.sep.join(os.path.abspath('__file__').split(os.path.sep)[:-2])
sys.path.insert(0, os.path.sep.join([SWBasePath, 'scripts']))
from detect_oscillations import *
from plots import *
# size of populations
NE = 4000
NI = 1000
# sparseness
eps_pyr = 0.16
eps_bas = 0.4
# synaptic time constants:
# rise time constants
PyrExc_rise = 1.3 * ms # Guzman 2016 (only from Fig.1 H - 20-80%)
PyrExcMF_rise = 0.65 * ms # Vyleta ... Jonas 2016 (20-80%)
PyrInh_rise = 0.3 * ms # Bartos 2002 (20-80%)
BasExc_rise = 1. * ms # Lee 2014 (data from CA1)
BasInh_rise = 0.25 * ms # Bartos 2002 (20-80%)
# decay time constants
PyrExc_decay = 9.5 * ms # Guzman 2016 ("needed for temporal summation of EPSPs")
PyrExcMF_decay = 5.4 * ms # Vyleta ... Jonas 2016
PyrInh_decay = 3.3 * ms # Bartos 2002
BasExc_decay = 4.1 * ms # Lee 2014 (data from CA1)
BasInh_decay = 1.2 * ms # Bartos 2002
# Normalization factors (normalize the peak of the PSC curve to 1)
invpeak_PyrExc = (PyrExc_decay / PyrExc_rise) ** (PyrExc_rise / (PyrExc_decay - PyrExc_rise))
invpeak_PyrExcMF = (PyrExcMF_decay / PyrExcMF_rise) ** (PyrExcMF_rise / (PyrExcMF_decay - PyrExcMF_rise))
invpeak_PyrInh = (PyrInh_decay / PyrInh_rise) ** (PyrInh_rise / (PyrInh_decay - PyrInh_rise))
invpeak_BasExc = (BasExc_decay / BasExc_rise) ** (BasExc_rise / (BasExc_decay - BasExc_rise))
invpeak_BasInh = (BasInh_decay / BasInh_rise) ** (BasInh_rise / (BasInh_decay - BasInh_rise))
# synaptic delays:
delay_PyrExc = 2.2 * ms # Guzman 2016
delay_PyrInh = 1.1 * ms # Bartos 2002
delay_BasExc = 0.9 * ms # Geiger 1997 (data from DG)
delay_BasInh = 0.6 * ms # Bartos 2002
# synaptic reversal potentials
E_Exc = 0.0 * mV
E_Inh = -70.0 * mV
# mossy fiber input
rate_MF = 20 * Hz
z = 1 * nS
# parameters for pyr cells (optimized by Bence)
gL_Pyr = 4.49581428461e-3 * uS
tauMem_Pyr = 37.97630516 * ms
Cm_Pyr = tauMem_Pyr * gL_Pyr
Vrest_Pyr = -59.710040237 * mV
reset_Pyr = -24.8988661181 * mV
theta_Pyr = -13.3139788756 * mV
tref_Pyr = 3.79313737057 * ms
a_Pyr = -0.255945300382 * nS
b_Pyr = 0.22030375858 * nA
delta_T_Pyr = 3.31719795927 * mV
tau_w_Pyr = 80.1747780694 * ms
v_spike_Pyr = theta_Pyr + 10 * delta_T_Pyr
# parameters for bas cells (optimized by Bence)
gL_Bas = 7.0102757369e-3 * uS
tauMem_Bas = 37.7598232668 * ms
Cm_Bas = tauMem_Bas * gL_Bas
Vrest_Bas = -58.9682231705 * mV
reset_Bas = -39.1229822301 * mV
theta_Bas = -39.5972788689 * mV
tref_Bas = 1.06976577195 * ms
delta_T_Bas = 2.21103724225 * mV
v_spike_Bas = theta_Bas + 10 * delta_T_Bas
eqs_Pyr = '''
dvm/dt = (-gL_Pyr*(vm-Vrest_Pyr) + gL_Pyr*delta_T_Pyr*exp((vm- theta_Pyr)/delta_T_Pyr) - w - ((g_ampa+g_ampaMF)*z*(vm-E_Exc) + g_gaba*z*(vm-E_Inh)))/Cm_Pyr : volt (unless refractory)
dw/dt = (a_Pyr*(vm- Vrest_Pyr )-w)/tau_w_Pyr : amp
dg_ampa/dt = (invpeak_PyrExc * x_ampa - g_ampa) / PyrExc_rise : 1
dx_ampa/dt = -x_ampa / PyrExc_decay : 1
dg_ampaMF/dt = (invpeak_PyrExcMF * x_ampaMF - g_ampaMF) / PyrExcMF_rise : 1
dx_ampaMF/dt = -x_ampaMF / PyrExcMF_decay : 1
dg_gaba/dt = (invpeak_PyrInh * x_gaba - g_gaba) / PyrInh_rise : 1
dx_gaba/dt = -x_gaba/PyrInh_decay : 1
'''
eqs_Bas = '''
dvm/dt = (-gL_Bas*(vm-Vrest_Bas) + gL_Bas*delta_T_Bas*exp((vm- theta_Bas)/delta_T_Bas) - (g_ampa*z*(vm-E_Exc) + g_gaba*z*(vm-E_Inh)))/Cm_Bas : volt (unless refractory)
dg_ampa/dt = (invpeak_BasExc * x_ampa - g_ampa) / BasExc_rise : 1
dx_ampa/dt = -x_ampa/BasExc_decay : 1
dg_gaba/dt = (invpeak_BasInh * x_gaba - g_gaba) / BasInh_rise : 1
dx_gaba/dt = -x_gaba/BasInh_decay : 1
'''
def run_simulation(Wee, STDP_mode="asym", detailed=True, verbose=True):
"""
Sets up the network and runs simulation
:param Wee: np.array representing synaptic weight matrix
:param STDP_mode: symmetric or asymmetric weight matrix flag (used for synapse parameters)
:param detailed: bool - useage of multi state monitor (for membrane pot and inh. and exc. inputs of some singe cells)
:param verbose: bool - report status of simulation
:return sme, smi, popre, popri, selection, msMe: brian2 monitors (+ array of selected cells used by multi state monitor)
"""
np.random.seed(12345)
pyrandom.seed(12345)
# synaptic weights
J_PyrInh = 0.02
if STDP_mode == "asym":
J_BasExc = 5
J_BasInh = 0.4
J_PyrMF = 24.25
elif STDP_mode == "sym":
J_BasExc = 5.5
J_BasInh = 0.8
J_PyrMF = 30
# wmx scale factor already introduced in the stdp* script!
PE = NeuronGroup(NE, model=eqs_Pyr, threshold="vm>v_spike_Pyr",
reset="vm=reset_Pyr; w+=b_Pyr", refractory=tref_Pyr, method="exponential_euler")
PI = NeuronGroup(NI, model=eqs_Bas, threshold="vm>v_spike_Bas",
reset="vm=reset_Bas", refractory=tref_Bas, method="exponential_euler")
PE.vm = Vrest_Pyr
PE.g_ampa = 0
PE.g_ampaMF = 0
PE.g_gaba = 0
PI.vm = Vrest_Bas
PI.g_ampa = 0
PI.g_gaba = 0
MF = PoissonGroup(NE, rate_MF)
Cext = Synapses(MF, PE, on_pre="x_ampaMF+=J_PyrMF")
Cext.connect(j='i')
# weight matrix used here:
Cee = Synapses(PE, PE, 'w_exc:1', on_pre='x_ampa+=w_exc')
Cee.connect()
Cee.w_exc = Wee.flatten()
Cee.delay = delay_PyrExc
del Wee # clear memory
Cei = Synapses(PE, PI, on_pre='x_ampa+=J_BasExc')
Cei.connect(p=eps_pyr)
Cei.delay = delay_BasExc
Cie = Synapses(PI, PE, on_pre='x_gaba+=J_PyrInh')
Cie.connect(p=eps_bas)
Cie.delay = delay_PyrInh
Cii = Synapses(PI, PI, on_pre='x_gaba+=J_BasInh')
Cii.connect(p=eps_bas)
Cii.delay = delay_BasInh
# Monitors
sme = SpikeMonitor(PE)
smi = SpikeMonitor(PI)
popre = PopulationRateMonitor(PE)
popri = PopulationRateMonitor(PI)
if detailed:
selection = np.arange(0, 4000, 50) # subset of neurons for recoring variables
mSME = StateMonitor(PE, ["vm", "w", "g_ampa", "g_ampaMF","g_gaba"], record=selection.tolist()) # comment this out later (takes memory!)
sMI = StateMonitor(PI, "vm", record=[500])
if verbose:
run(10000*ms, report="text")
else:
run(10000*ms)
if detailed:
return sme, smi, popre, popri, selection, mSME, sMI
else:
return sme, smi, popre, popri
if __name__ == "__main__":
try:
STDP_mode = sys.argv[1]
except:
STDP_mode = "asym"
assert(STDP_mode in ["sym", "asym"])
fIn = "wmxR_%s.txt"%STDP_mode
detailed = True
TFR = True
# load in weight matrix
fName = os.path.join(SWBasePath, "files", fIn)
Wee = load_Wee(fName)
# run simulation
if detailed:
sme, smi, popre, popri, selection, mSME, sMI = run_simulation(Wee, STDP_mode)
else:
sme, smi, popre, popri = run_simulation(Wee, STDP_mode, detailed=False)
# analyse results
if sme.num_spikes > 0 and smi.num_spikes > 0: # check if there is any activity
# analyse spikes
spikeTimesE, spikingNeuronsE, poprE, ISIhist, bin_edges = preprocess_monitors(sme, popre)
spikeTimesI, spikingNeuronsI, poprI = preprocess_monitors(smi, popri, calc_ISI=False)
# detect replay
avgReplayInterval = replay(ISIhist[3:16]) # bins from 150 to 850 (range of interest)
# analyse rates
if TFR:
meanEr, rEAC, maxEAC, tMaxEAC, fE, PxxE, trfE, tE, freqsE = analyse_rate(poprE, TFR=True)
meanIr, rIAC, maxIAC, tMaxIAC, fI, PxxI, trfI, tI, freqsI = analyse_rate(poprI, TFR=True)
else:
meanEr, rEAC, maxEAC, tMaxEAC, fE, PxxE = analyse_rate(poprE)
meanIr, rIAC, maxIAC, tMaxIAC, fI, PxxI = analyse_rate(poprI)
maxEACR, tMaxEACR, avgRippleFE, ripplePE = ripple(rEAC, fE, PxxE)
maxIACR, tMaxIACR, avgRippleFI, ripplePI = ripple(rIAC, fI, PxxI)
avgGammaFE, gammaPE = gamma(fE, PxxE)
avgGammaFI, gammaPI = gamma(fI, PxxI)
# print out some info
print "Mean excitatory rate: %.3f"%meanEr
print "Mean inhibitory rate: %.3f"%meanIr
print "Average exc. ripple freq: %.3f"%avgRippleFE
print "Exc. ripple power: %.3f"%ripplePE
print "Average exc. gamma freq: %.3f"%avgGammaFE
print "Exc. gamma power: %.3f"%gammaPE
print "Average inh. ripple freq: %.3f"%avgRippleFI
print "Inh. ripple power: %.3f"%ripplePI
print "Average inh. gamma freq: %.3f"%avgGammaFI
print "Inh. gamma power: %.3f"%gammaPI
print "--------------------------------------------------"
# Plots
plot_raster_ISI(spikeTimesE, spikingNeuronsE, poprE, [ISIhist, bin_edges], "blue", multiplier_=1)
if TFR:
plot_PSD(poprE, rEAC, fE, PxxE, "Pyr_population", "blue", multiplier_=1,
TFR=True, tfr=trfE, t=tE, freqs=freqsE, fs=1000)
plot_PSD(poprI, rIAC, fI, PxxI, "Bas_population", "green", multiplier_=1,
TFR=True, tfr=trfI, t=tI, freqs=freqsI, fs=1000)
else:
plot_PSD(poprE, rEAC, fE, PxxE, "Pyr_population", "blue", multiplier_=1)
plot_PSD(poprI, rIAC, fI, PxxI, "Bas_population", "green", multiplier_=1)
if detailed:
subset = plot_zoomed(spikeTimesE, spikingNeuronsE, poprE, "Pyr_population", "blue", multiplier_=1,
sm=mSME, selection=selection)
plot_zoomed(spikeTimesI, spikingNeuronsI, poprI, "Bas_population", "green", multiplier_=1,
Pyr_pop=False, sm=sMI)
plot_detailed(mSME, subset, multiplier_=1, new_network=True)
else:
plot_zoomed(spikeTimesE, spikingNeuronsE, poprE, "Pyr_population", "blue", multiplier_=1)
plot_zoomed(spikeTimesI, spikingNeuronsI, poprI, "Bas_population", "green", multiplier_=1, Pyr_pop=False)
else: # if there is no activity the auto-correlation function will throw an error!
print "No activity !"
print "--------------------------------------------------"
plt.show()
| 2.15625
| 2
|
a01_gui_main.py
|
CyabbageRoll/Schedule_Manager
| 0
|
12784333
|
# %% =======================================================================
# import libraries
#===========================================================================
# default
import os
import datetime
import hashlib
# conda
import pandas as pd
import PySimpleGUI as sg
# user
from b01_schedule_class_base import ScheduleManageBase
from b01_schedule_class import ScheduleManage
# %% =======================================================================
# pysimple gui settings
#===========================================================================
# window layout is defined in b01_schedule_class_base
# button or other functions are difined in b01_schedule_class
sch_m = ScheduleManage()
sch_m.create_window()
sch_m.window.read(timeout=1)
# %% =======================================================================
# simple gyi while
#===========================================================================
# Event Loop to process "events" and get the "values" of the inputs
while True:
event, pos, item, eid = sch_m.parse_event()
# if event and "MV" not in event:
# print(event, pos, item, eid)
if event == sg.WIN_CLOSED: # if user closes window or clicks cancel
break
# %% =======================================================================
# header
#===========================================================================
if pos == "hd": # header
if item == "btn": # click button
if eid == 0: # all
sch_m.header_all_button_pressed()
sch_m.header_checkbox_changed()
if eid == 1: # clear
sch_m.header_clear_button_pressed()
sch_m.header_checkbox_changed()
if eid == 2: # drawchart
sch_m.header_refresh_button_pressed()
if eid == 3: # upload
sch_m.header_upload_button_pressed()
if eid == 4: # download
sch_m.header_reload_button_pressed()
if item == "cbx": # check box was updated
sch_m.header_checkbox_changed([eid])
if item == "rdi": # radio button
sch_m.header_member_raido_button_changed()
continue
# %% =======================================================================
# left tab
#===========================================================================
if pos == "lt":
if item == "grp":
sch_m.l1_chart_draw()
continue
if pos == "l1": # left tab1
if item[:3] == "grp":
if len(item) == 3:
sch_m.l1_graph_area_clicked()
if "MV" in item:
sch_m.l1_graphs_capture_mouse_motion(eid)
if "RC" in item:
sch_m.l1_graph_right_click_menu_selected(event, eid)
continue
if pos == "l2": # left tab2
continue
if pos == "l3": # left tab2
if item == "btn":
if eid < 4:
sch_m.l3_priority_updown_button_pressed(eid)
if eid == 4:
sch_m.l3_priority_auto_button_pressed()
if "tbl" in item:
sch_m.l3_table_selected_ticket_changed()
continue
if pos == "l0":
if item == "btn":
if eid == 0:
sch_m.l0_settings_save_and_restart_button_pressed()
# %% =======================================================================
# right tab
#===========================================================================
if pos == "r1": # right tab1
if item[:3] == "inp":
if len(item) == 6:
if item[4:6] == "LC":
sch_m.r1_input_date_box_selected(eid)
sch_m.r1_input_check()
# sch_m._r1_pre_next_ticket_table_update()
if item[:3] == "btn":
if eid == 0:
sch_m.r1_apply_button_pressed()
if eid == 1:
sch_m.r1_delete_button_pressed()
if item == "right_menu":
sch_m.r1_right_click_menu_clicked(event)
continue
if pos == "r2": # right tab2
if item == "btn":
# if eid == 0:
# sch_m.r2_save_plan_button_pressed()
if eid == 1:
sch_m.r2_save_record_button_pressed()
if eid == 4:
sch_m.r2_delete_button_pressed()
if item == "txt":
if eid == 2:
sch_m.r2_date_txt_pressed()
if item == "inp":
sch_m.r2_information_box_inputed()
continue
if pos == "r3": # right tab3
if item == "btn":
if eid == 0:
sch_m.r3_mail_button_pressed()
if eid == 1:
sch_m.r3_folder_button_pressed()
if eid == 2:
sch_m.r3_memo_button_pressed()
continue
if pos == "r4": # right tab4
continue
if pos == "r5": # right tab5
if item == "btn":
sch_m.r5_arrow_button_pressed(eid)
if item == "mul":
sch_m.r5_df_from_multiline(eid)
sch_m.window.close()
| 1.882813
| 2
|
OOP-PythonBackend/sensor_db.py
|
mrmareksvk/Project3
| 0
|
12784334
|
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
class Sensor_DB:
def __init__(self, host="127.0.0.1", database="postgres", user="postgres", password=""):
self.__DBconnection = psycopg2.connect(
host=host, database="postgres", user=user, password=password
)
self.__DBconnection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.__DBcursor = self.__DBconnection.cursor()
self.__DBcursor.execute("SELECT 1 FROM pg_catalog.pg_database WHERE datname = %s", (database,),)
self.__DBexists = self.__DBcursor.fetchone()
if not self.__DBexists:
self.__DBcursor.execute("CREATE DATABASE " + database)
self.__DBcursor.close()
self.__DBconnection.close()
###############################################
self.__DBconnection = psycopg2.connect(
host=host, database=database, user=user, password=password
)
self.__DBcursor = self.__DBconnection.cursor()
self.__DBtables = set()
def __createTable(self, tableID):
self.__DBcursor.execute(
"CREATE TABLE IF NOT EXISTS sensor%s ("
" id_entry SERIAL PRIMARY KEY,"
" temperature DOUBLE PRECISION,"
" humidity INT,"
" lux INT,"
" latitude DOUBLE PRECISION,"
" longitude DOUBLE PRECISION,"
" dt TIMESTAMP"
")",
(int(tableID),),
)
self.__DBconnection.commit()
self.__DBtables.add(tableID)
def saveData(self, sensorID, temperature, humidity, lux, latitude, longitude, date, time):
if sensorID not in self.__DBtables:
self.__createTable(sensorID)
self.__DBcursor.execute(
"INSERT INTO sensor%s (temperature, humidity, lux, latitude, longitude, dt) VALUES(%s, %s, %s, %s, %s, %s)",
(
int(sensorID),
temperature,
humidity,
lux,
latitude,
longitude,
date + " " + time,
),
)
self.__DBconnection.commit()
| 3.25
| 3
|
tests/test_log.py
|
Yusadolat/bottery
| 0
|
12784335
|
<reponame>Yusadolat/bottery
import logging
from testfixtures import LogCapture
from bottery.log import DEFAULT_LOGGING, ColoredFormatter
def test_ColoredFormatter():
'''Test if logs are being colored'''
logging.config.dictConfig(DEFAULT_LOGGING)
with LogCapture(names='bottery') as logs:
logger = logging.getLogger('bottery')
logger.debug('DEBUG')
logger.info('INFO')
logger.warning('WARN')
logger.error('ERROR')
logger.critical('CRITICAL')
records = [record for record in logs.records]
# Create a list of all records formated with ColoredFormatter
colored_formatter = ColoredFormatter()
formatted_records = [colored_formatter.format(record)
for record in records]
expected_records = [
'DEBUG',
'INFO',
'\x1b[33mWARN\x1b[0m',
'\x1b[31mERROR\x1b[0m',
'\x1b[30m\x1b[41mCRITICAL\x1b[0m'
]
assert formatted_records == expected_records
| 2.765625
| 3
|
tests/guinea-pigs/unittest/subtest_failure.py
|
djeebus/teamcity-python
| 0
|
12784336
|
<gh_stars>0
import unittest
from teamcity.unittestpy import TeamcityTestRunner
class TestXXX(unittest.TestCase):
def testSubtestFailure(self):
with self.subTest(i=0):
pass
with self.subTest(i="abc.xxx"):
assert 1 == 0
unittest.main(testRunner=TeamcityTestRunner)
| 2.328125
| 2
|
refex/python/syntactic_template.py
|
ssbr/refex
| 11
|
12784337
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`refex.python.syntactic_template`
--------------------------------------
Syntax-aware Python substitution templates, as described in
:doc:`/guide/patterns_templates`.
The core problem with lexical or textual substitution of Python code, as with
e.g. :class:`refex.formatting.ShTemplate`, is that the substitution can be
unintentionally wrong. For example:
If you replace ``f($x)`` with ``$x``, what if ``$x`` contains a newline?
If you replace ``$a`` with ``$a * 2``, what if ``$a`` is ``1 + 2``?
The template classes here aim to make replacements that match the intended
syntax -- i.e. the structure of the template -- and will parenthesize as
necessary.
.. autoclass:: PythonExprTemplate
.. autoclass:: PythonStmtTemplate
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import tokenize
from typing import Text
from absl import logging
import attr
import cached_property
import six
from refex import formatting
from refex.python import matcher
from refex.python import python_pattern
from refex.python.matchers import ast_matchers
from refex.python.matchers import base_matchers
from refex.python.matchers import syntax_matchers
@attr.s(frozen=True)
class _LexicalTemplate(object):
"""Lexically-aware Python templates.
$variables are only used for replacements if they occur inside Python, not
inside of e.g. a string literal or a comment. So "a = $x" has an x variable
in the template, but "a = '$x'" does not.
"""
template = attr.ib()
_tokens = attr.ib(init=False, repr=False)
_var_to_i = attr.ib(init=False, repr=False)
variables = attr.ib(init=False, repr=False)
def __attrs_post_init__(self):
# Because we have frozen=True, creating values for _tokens and _var_to_i
# is a little complex, and requires invoking object.__setattr__.
tokenized, metavar_indices = python_pattern.token_pattern(self.template)
var_to_i = {}
for i in metavar_indices:
var = tokenized[i][1]
var_to_i.setdefault(var, []).append(i)
object.__setattr__(self, '_tokens', tokenized)
object.__setattr__(self, '_var_to_i', var_to_i)
object.__setattr__(self, 'variables', six.viewkeys(var_to_i))
def substitute(self, replacements):
"""Lexically-aware substitution.
Args:
replacements: A map from metavariable name to new content.
Returns:
Substituted Python code.
Raises:
KeyError: A variable in the template was not specified in the
replacements. THis is to be compatible with strings.Template.
"""
if not self.template:
# Work around: if u'' is untokenized and then retokenized,
# it comes back out as b'' on Python 2!
return self.template
# Take a copy of tokens to modify the target slots.
tokens = list(self._tokens)
free_vars = set(self._var_to_i)
logging.debug('Applying %r to tokens %r for substitution of %r', free_vars,
tokens, replacements)
for var, new in six.iteritems(replacements):
try:
all_i = self._var_to_i[var]
except KeyError:
# overspecified: a replacement for a variable not in the template.
continue
free_vars.remove(var)
for i in all_i:
tok = list(tokens[i])
tok[1] = new
tokens[i] = tuple(tok)
if free_vars:
raise KeyError(next(iter(free_vars)))
return tokenize.untokenize(tokens)
@attr.s(frozen=True)
class _BasePythonTemplate(formatting.Template):
"""Base class for syntax-aware templates.
The templates use ShTemplate/string.Template style templates. e.g. "$x + $y".
Subclasses must override _pattern_factory to provide a matcher for the
template.
Attributes:
template: The source template
"""
template = attr.ib(type=Text)
_lexical_template = attr.ib(repr=False, init=False, type=_LexicalTemplate)
_ast_matcher = attr.ib(repr=False, init=False, type=matcher.Matcher)
def __attrs_post_init__(self):
if not isinstance(self.template, six.text_type):
raise TypeError('Expected text, got: {}'.format(
type(self.template).__name__))
@_lexical_template.default
def _lexical_template_default(self):
return _LexicalTemplate(self.template)
@_ast_matcher.default
def _ast_matcher_default(self):
return self._pattern_factory(self.template)
def substitute_match(self, parsed, match, matches):
"""Syntax-aware substitution, parsing the template using _pattern_factory.
Args:
parsed: The ParsedFile being substituted into.
match: The match being replaced.
matches: The matches used for formatting.
Returns:
Substituted Python code.
"""
replacement, _ = self._parenthesized(
_matchers_for_matches(matches),
formatting.stringify_matches(matches),
)
if not isinstance(parsed, matcher.PythonParsedFile):
# This file is not (known to be) a Python file, so we can't (efficiently)
# check the replacement into the parent AST node.
# TODO: It would help if we could automatically obtain a
# PythonParsedFile for any given ParsedFile. This also lets us compose
# arbitrary searchers together, some of which take a PythonParsedFile, and
# others which don't.
return replacement
# replacement is now a safely-interpolated string: all of the substitutions
# are parenthesized where needed. The only thing left is to ensure that when
# replacement itself is substituted in, it itself is parenthesized
# correctly.
#
# To do this, we essentially repeat the same safe substitution algorithm,
# except using the context of the replacement as a fake pattern.
#
# Note that this whole process is only valid if we are matching an actual
# AST node, and an expr node at that.
# For example, it is _not_ valid to replace b'foo' with b('foo'), or 'foo'
# with 'f(o)o'.
if not isinstance(match, matcher.LexicalASTMatch):
# The match wasn't even for an AST node. We'll assume they know what
# they're doing and pass thru verbatim. Non-AST matches can't be
# automatically parenthesized -- for example, b'foo' vs b('foo')
return replacement
if not isinstance(match.matched, ast.expr):
# Only expressions need to be parenthesized, so this is fine as-is.
return replacement
parent = parsed.nav.get_parent(match.matched)
if isinstance(parent, ast.Expr):
# the matched object is already a top-level expression, and will never
# need extra parentheses.
# We are assuming here that the template does not contain comments,
# which is not enforced. We also assume that it doesn't contain raw
# newlines, but this is enforced by the template and substitution both
# needing to parse on their own.
return replacement
if isinstance(parent, ast.stmt) and hasattr(parent, 'body'):
# TODO(b/139758169): re-enable reparsing of statements in templating.
# Multi-line statements can't be reparsed due to issues with
# indentation. We can usually safely assume that it doesn't need parens,
# although exceptions exist. (Including, in an ironic twist, "except".)
return replacement
# keep navigating up until we reach a lexical AST node.
# e.g. skip over lists.
while True:
parent_span = matcher.create_match(parsed, parent).span
if parent_span is not None and isinstance(parent, (ast.expr, ast.stmt)):
# We need a parent node which has a known source span,
# and which is by itself parseable (i.e. is an expr or stmt).
break
next_parent = parsed.nav.get_parent(parent)
if isinstance(next_parent, ast.stmt) and hasattr(next_parent, 'body'):
# TODO(b/139758169): re-enable reparsing of statements in templating.
# We encountered no reparseable parents between here and a
# non-reparseable statement, so, as before, we must fall back to
# returning the replacement verbatim and hoping it isn't broken.
# (For example, replacing T with T1, T2 in "class A(T)" is incorrect.)
return replacement
parent = next_parent
else:
raise formatting.RewriteError(
"Bug in Python formatter? Couldn't find parent of %r" % match)
# Re-apply the safe substitution, but on the parent.
context_start, context_end = parent_span
match_start, match_end = match.span
prefix = parsed.text[context_start:match_start]
suffix = parsed.text[match_end:context_end]
if isinstance(parent, ast.expr):
# expressions can occur in a multiline context, but now that we've
# removed it from its context for reparsing, we're in a single-line
# unparenthesized context. We need to add parens to make sure this
# parses correctly.
prefix = '(' + prefix
suffix += ')'
parent_pseudotemplate = PythonTemplate(prefix + u'$current_expr' + suffix)
parsed_replacement = ast.parse(replacement)
if len(parsed_replacement.body) != 1 or not isinstance(
parsed_replacement.body[0], ast.Expr):
raise formatting.RewriteError(
"Non-expression template can't be used in expression context: %s" %
self.template)
current_matcher = syntax_matchers.ast_matchers_matcher(
parsed_replacement.body[0].value)
_, safe_mapping = parent_pseudotemplate._parenthesized( # pylint: disable=protected-access
{u'current_expr': current_matcher}, {u'current_expr': replacement})
return safe_mapping[u'current_expr']
def _parenthesized(self, matchers, stringified_matches):
"""Parenthesizes a substitution for a template.
Args:
matchers: Dict mapping {var: Matcher that must match this variable}
stringified_matches: Dict mapping variable -> string for match.
Returns:
A tuple of two elements:
0: The full safely parenthesized substitution.
1: A dict dict mapping var -> parenthesized string, for each match.
"""
safe_mapping = {}
unparenthesized_mapping = {}
for k, v in stringified_matches.items():
raw_string = stringified_matches[k]
if k in matchers:
safe_mapping[k] = '(%s)' % raw_string
unparenthesized_mapping[k] = raw_string
else:
# Non-expressions cannot be parenthesized and must be inserted verbatim.
safe_mapping[k] = raw_string
# We start parenthesized and try dropping parentheses to see if things
# still match the same way.
# First, build up the canonical replacement:
replacement = self._lexical_template.substitute(safe_mapping)
# Now let's parse the produced canonical replacement and make sure that it
# looks "correct" -- it is structurally the same as our template is,
# and the substituted in values are identical.
try:
parsed_template = matcher.parse_ast(replacement,
'<_BasePythonTemplate pattern>')
except matcher.ParseError as e:
raise formatting.RewriteError(
'Bug in Python formatter? Failed to parse formatted Python string as '
'Python: template=%r, substitute(matches for %r) -> %r: %s' %
(self.template, stringified_matches, replacement, e))
m = self._ast_matcher.match(
matcher.MatchContext(parsed_template), parsed_template.tree)
if m is None:
raise formatting.RewriteError(
'Bug in Python formatter? Even "safe" formatting of Python template '
'produced an incorrect and different AST, so it must be discarded: '
' template=%r, substitute(matches for %r) -> %r' %
(self.template, stringified_matches, replacement ))
for k, bound in m.bindings.items():
v = bound.value
if not matchers[k].match(
matcher.MatchContext(parsed_template), v.matched):
raise formatting.RewriteError(
'Bug in Python formatter? Even "safe" formatting of Python template'
' produced an incorrect and different AST, so it must be discarded '
'[variable %s=%r was corrupted -- %r]: '
'template=%r, substitute(matches for %r) -> %r' %
(k, matchers[k], v, self.template, stringified_matches,
replacement))
# The preliminaries are done: safe templating worked, and all that's left is
# to try to make the substitutions less over-parenthesized.
candidate_matcher = syntax_matchers.ast_matchers_matcher(
parsed_template.tree)
for k, unparenthesized in unparenthesized_mapping.items():
parenthesized = safe_mapping[k]
safe_mapping[k] = unparenthesized
try:
alt_replacement = self._lexical_template.substitute(safe_mapping)
alt_parsed = matcher.parse_ast(
alt_replacement, '<_BasePythonTemplate alternate proposal>')
except matcher.ParseError as e:
pass
else:
if candidate_matcher.match(
matcher.MatchContext(alt_parsed), alt_parsed.tree):
replacement = alt_replacement
continue
# if we made it this far, the replacement was not a success.
safe_mapping[k] = parenthesized
return replacement, safe_mapping
@cached_property.cached_property
def variables(self):
return self._lexical_template.variables
def _matchers_for_matches(matches):
"""Returns AST matchers for all expressions in `matches`.
Args:
matches: A mapping of variable name -> match
Returns:
A mapping of <variable name> -> <matcher that must match>.
Only variables that can be parenthesized are in this mapping, and the
matcher must match where those variables are substituted in.
"""
matchers = {}
for k, v in matches.items():
if (isinstance(v, matcher.LexicalASTMatch) and
isinstance(v.matched, ast.expr)):
matchers[k] = syntax_matchers.ast_matchers_matcher(v.matched)
else:
# as a fallback, treat it as a black box, and assume that the rest of the
# expression will catch things.
matchers[k] = base_matchers.Anything()
return matchers
class PythonTemplate(_BasePythonTemplate):
_pattern_factory = syntax_matchers.ModulePattern
class PythonExprTemplate(_BasePythonTemplate):
"""A template for a Python expression."""
@staticmethod
def _pattern_factory(pattern):
return ast_matchers.Module(
body=base_matchers.ItemsAre(
[ast_matchers.Expr(value=syntax_matchers.ExprPattern(pattern))]))
class PythonStmtTemplate(_BasePythonTemplate):
"""A template for a single Python statement."""
@staticmethod
def _pattern_factory(pattern):
return ast_matchers.Module(
body=base_matchers.ItemsAre([syntax_matchers.StmtPattern(pattern)]))
| 2.21875
| 2
|
core/polyaxon/polyboard/processors/logs_processor.py
|
admariner/polyaxon
| 3,200
|
12784338
|
<filename>core/polyaxon/polyboard/processors/logs_processor.py
# !/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from polyaxon.polyboard.logging.handler import PolyaxonHandler
EXCLUDE_DEFAULT_LOGGERS = ("polyaxon.client", "polyaxon.cli")
def setup_logging(add_logs, exclude=EXCLUDE_DEFAULT_LOGGERS):
plx_logger = logging.getLogger()
plx_logger.setLevel(logging.INFO)
if logging.StreamHandler not in map(type, plx_logger.handlers):
plx_logger.addHandler(logging.StreamHandler())
plx_logger.propagate = False
if PolyaxonHandler in map(type, plx_logger.handlers):
for handler in plx_logger.handlers:
if isinstance(handler, PolyaxonHandler):
handler.set_add_logs(add_logs=add_logs)
else:
handler = PolyaxonHandler(add_logs=add_logs)
plx_logger.addHandler(handler)
for logger_name in exclude:
plx_logger = logging.getLogger(logger_name)
if logging.StreamHandler not in map(type, plx_logger.handlers):
plx_logger.addHandler(logging.StreamHandler())
plx_logger.propagate = False
| 1.914063
| 2
|
opt/lib/local_lib_color.py
|
MeF0504/basic_setup
| 4
|
12784339
|
<gh_stars>1-10
#! /usr/bin/env python3
from __future__ import print_function
BG = {'k':'\033[40m','w':'\033[47m','r':'\033[41m','g':'\033[42m','b':'\033[44m','m':'\033[45m','c':'\033[46m','y':'\033[43m'}
FG = {'k':'\033[30m','w':'\033[37m','r':'\033[31m','g':'\033[32m','b':'\033[34m','m':'\033[35m','c':'\033[36m','y':'\033[33m'}
END = '\033[0m'
def BG256(n):
if (0 <= n < 256):
return '\033[48;5;%dm' % n
else:
return ''
def FG256(n):
if (0 <= n < 256):
return '\033[38;5;%dm' % n
else:
return ''
# for vim color test
def isdark(r,g,b):
# cond = (r+g+b<7) and (max([r,g,b])<4)
# cond = (r**2+g**2+b**2 < 5**2)
# if r < 4:
# cond = (g==0 or g*g+b*b < 3**2)
# cond = (g<3 and g+b < 6)
# else:
# cond = g*g+b*b < (7-r)**2
w_r, w_g, w_b = (0.299,0.587,0.114)
cond = (r*w_r+g*w_g+b*w_b)/(w_r+w_g+w_b) < 2.1
w_r_old, w_g_old, w_b_old = (0.299,0.587,0.114)
cond_old = (r*w_r+g*w_g+b*w_b)/(w_r+w_g+w_b) < 2.1
return cond, cond_old
col_list = None
def convert_color_name(color_name, color_type, verbose=False):
if color_type not in ['256', 'full']:
if verbose:
print('incorrect color type ({}).'.format(color_type))
print('selectable type: "256" or "full". return None.')
return None
global col_list
if col_list is None:
col_list = { \
"black" : {'256':0 , 'full': '#000000'}, \
"maroon" : {'256':1 , 'full': '#800000'}, \
"green" : {'256':2 , 'full': '#008000'}, \
"olive" : {'256':3 , 'full': '#808000'}, \
"navy" : {'256':4 , 'full': '#000080'}, \
"purple" : {'256':5 , 'full': '#800080'}, \
"teal" : {'256':6 , 'full': '#008080'}, \
"silver" : {'256':7 , 'full': '#c0c0c0'}, \
"gray" : {'256':8 , 'full': '#808080'}, \
"grey" : {'256':8 , 'full': '#808080'}, \
"red" : {'256':9 , 'full': '#ff0000'}, \
"lime" : {'256':10 , 'full': '#00ff00'}, \
"yellow" : {'256':11 , 'full': '#ffff00'}, \
"blue" : {'256':12 , 'full': '#0000ff'}, \
"fuchsia" : {'256':13 , 'full': '#ff00ff'}, \
"aqua" : {'256':14 , 'full': '#00ffff'}, \
"white" : {'256':15 , 'full': '#ffffff'}, \
"gray0" : {'256':16 , 'full': '#000000'}, \
"grey0" : {'256':16 , 'full': '#000000'}, \
"navyblue" : {'256':17 , 'full': '#00005f'}, \
"darkblue" : {'256':18 , 'full': '#000087'}, \
"blue3" : {'256':19 , 'full': '#0000af'}, \
"blue3" : {'256':20 , 'full': '#0000d7'}, \
"blue1" : {'256':21 , 'full': '#0000ff'}, \
"darkgreen" : {'256':22 , 'full': '#005f00'}, \
"deepskyblue4" : {'256':23 , 'full': '#005f5f'}, \
"deepskyblue4" : {'256':24 , 'full': '#005f87'}, \
"deepskyblue4" : {'256':25 , 'full': '#005faf'}, \
"dodgerblue3" : {'256':26 , 'full': '#005fd7'}, \
"dodgerblue2" : {'256':27 , 'full': '#005fff'}, \
"green4" : {'256':28 , 'full': '#008700'}, \
"springgreen4" : {'256':29 , 'full': '#00875f'}, \
"turquoise4" : {'256':30 , 'full': '#008787'}, \
"deepskyblue3" : {'256':31 , 'full': '#0087af'}, \
"deepskyblue3" : {'256':32 , 'full': '#0087d7'}, \
"dodgerblue1" : {'256':33 , 'full': '#0087ff'}, \
"green3" : {'256':34 , 'full': '#00af00'}, \
"springgreen3" : {'256':35 , 'full': '#00af5f'}, \
"darkcyan" : {'256':36 , 'full': '#00af87'}, \
"lightseagreen" : {'256':37 , 'full': '#00afaf'}, \
"deepskyblue2" : {'256':38 , 'full': '#00afd7'}, \
"deepskyblue1" : {'256':39 , 'full': '#00afff'}, \
"green3" : {'256':40 , 'full': '#00d700'}, \
"springgreen3" : {'256':41 , 'full': '#00d75f'}, \
"springgreen2" : {'256':42 , 'full': '#00d787'}, \
"cyan3" : {'256':43 , 'full': '#00d7af'}, \
"darkturquoise" : {'256':44 , 'full': '#00d7d7'}, \
"turquoise2" : {'256':45 , 'full': '#00d7ff'}, \
"green1" : {'256':46 , 'full': '#00ff00'}, \
"springgreen2" : {'256':47 , 'full': '#00ff5f'}, \
"springgreen1" : {'256':48 , 'full': '#00ff87'}, \
"mediumspringgreen" : {'256':49 , 'full': '#00ffaf'}, \
"cyan2" : {'256':50 , 'full': '#00ffd7'}, \
"cyan1" : {'256':51 , 'full': '#00ffff'}, \
"darkred" : {'256':52 , 'full': '#5f0000'}, \
"deeppink4" : {'256':53 , 'full': '#5f005f'}, \
"purple4" : {'256':54 , 'full': '#5f0087'}, \
"purple4" : {'256':55 , 'full': '#5f00af'}, \
"purple3" : {'256':56 , 'full': '#5f00d7'}, \
"blueviolet" : {'256':57 , 'full': '#5f00ff'}, \
"orange4" : {'256':58 , 'full': '#5f5f00'}, \
"gray37" : {'256':59 , 'full': '#5f5f5f'}, \
"grey37" : {'256':59 , 'full': '#5f5f5f'}, \
"mediumpurple4" : {'256':60 , 'full': '#5f5f87'}, \
"slateblue3" : {'256':61 , 'full': '#5f5faf'}, \
"slateblue3" : {'256':62 , 'full': '#5f5fd7'}, \
"royalblue1" : {'256':63 , 'full': '#5f5fff'}, \
"chartreuse4" : {'256':64 , 'full': '#5f8700'}, \
"darkseagreen4" : {'256':65 , 'full': '#5f875f'}, \
"paleturquoise4" : {'256':66 , 'full': '#5f8787'}, \
"steelblue" : {'256':67 , 'full': '#5f87af'}, \
"steelblue3" : {'256':68 , 'full': '#5f87d7'}, \
"cornflowerblue" : {'256':69 , 'full': '#5f87ff'}, \
"chartreuse3" : {'256':70 , 'full': '#5faf00'}, \
"darkseagreen4" : {'256':71 , 'full': '#5faf5f'}, \
"cadetblue" : {'256':72 , 'full': '#5faf87'}, \
"cadetblue" : {'256':73 , 'full': '#5fafaf'}, \
"skyblue3" : {'256':74 , 'full': '#5fafd7'}, \
"steelblue1" : {'256':75 , 'full': '#5fafff'}, \
"chartreuse3" : {'256':76 , 'full': '#5fd700'}, \
"palegreen3" : {'256':77 , 'full': '#5fd75f'}, \
"seagreen3" : {'256':78 , 'full': '#5fd787'}, \
"aquamarine3" : {'256':79 , 'full': '#5fd7af'}, \
"mediumturquoise" : {'256':80 , 'full': '#5fd7d7'}, \
"steelblue1" : {'256':81 , 'full': '#5fd7ff'}, \
"chartreuse2" : {'256':82 , 'full': '#5fff00'}, \
"seagreen2" : {'256':83 , 'full': '#5fff5f'}, \
"seagreen1" : {'256':84 , 'full': '#5fff87'}, \
"seagreen1" : {'256':85 , 'full': '#5fffaf'}, \
"aquamarine1" : {'256':86 , 'full': '#5fffd7'}, \
"darkslategray2" : {'256':87 , 'full': '#5fffff'}, \
"darkslategrey2" : {'256':87 , 'full': '#5fffff'}, \
"darkred" : {'256':88 , 'full': '#870000'}, \
"deeppink4" : {'256':89 , 'full': '#87005f'}, \
"darkmagenta" : {'256':90 , 'full': '#870087'}, \
"darkmagenta" : {'256':91 , 'full': '#8700af'}, \
"darkviolet" : {'256':92 , 'full': '#8700d7'}, \
"purple" : {'256':93 , 'full': '#8700ff'}, \
"orange4" : {'256':94 , 'full': '#875f00'}, \
"lightpink4" : {'256':95 , 'full': '#875f5f'}, \
"plum4" : {'256':96 , 'full': '#875f87'}, \
"mediumpurple3" : {'256':97 , 'full': '#875faf'}, \
"mediumpurple3" : {'256':98 , 'full': '#875fd7'}, \
"slateblue1" : {'256':99 , 'full': '#875fff'}, \
"yellow4" : {'256':100 , 'full': '#878700'}, \
"wheat4" : {'256':101 , 'full': '#87875f'}, \
"gray53" : {'256':102 , 'full': '#878787'}, \
"grey53" : {'256':102 , 'full': '#878787'}, \
"lightslategray" : {'256':103 , 'full': '#8787af'}, \
"lightslategrey" : {'256':103 , 'full': '#8787af'}, \
"mediumpurple" : {'256':104 , 'full': '#8787d7'}, \
"lightslateblue" : {'256':105 , 'full': '#8787ff'}, \
"yellow4" : {'256':106 , 'full': '#87af00'}, \
"darkolivegreen3" : {'256':107 , 'full': '#87af5f'}, \
"darkseagreen" : {'256':108 , 'full': '#87af87'}, \
"lightskyblue3" : {'256':109 , 'full': '#87afaf'}, \
"lightskyblue3" : {'256':110 , 'full': '#87afd7'}, \
"skyblue2" : {'256':111 , 'full': '#87afff'}, \
"chartreuse2" : {'256':112 , 'full': '#87d700'}, \
"darkolivegreen3" : {'256':113 , 'full': '#87d75f'}, \
"palegreen3" : {'256':114 , 'full': '#87d787'}, \
"darkseagreen3" : {'256':115 , 'full': '#87d7af'}, \
"darkslategray3" : {'256':116 , 'full': '#87d7d7'}, \
"darkslategrey3" : {'256':116 , 'full': '#87d7d7'}, \
"skyblue1" : {'256':117 , 'full': '#87d7ff'}, \
"chartreuse1" : {'256':118 , 'full': '#87ff00'}, \
"lightgreen" : {'256':119 , 'full': '#87ff5f'}, \
"lightgreen" : {'256':120 , 'full': '#87ff87'}, \
"palegreen1" : {'256':121 , 'full': '#87ffaf'}, \
"aquamarine1" : {'256':122 , 'full': '#87ffd7'}, \
"darkslategray1" : {'256':123 , 'full': '#87ffff'}, \
"darkslategrey1" : {'256':123 , 'full': '#87ffff'}, \
"red3" : {'256':124 , 'full': '#af0000'}, \
"deeppink4" : {'256':125 , 'full': '#af005f'}, \
"mediumvioletred" : {'256':126 , 'full': '#af0087'}, \
"magenta3" : {'256':127 , 'full': '#af00af'}, \
"darkviolet" : {'256':128 , 'full': '#af00d7'}, \
"purple" : {'256':129 , 'full': '#af00ff'}, \
"darkorange3" : {'256':130 , 'full': '#af5f00'}, \
"indianred" : {'256':131 , 'full': '#af5f5f'}, \
"hotpink3" : {'256':132 , 'full': '#af5f87'}, \
"mediumorchid3" : {'256':133 , 'full': '#af5faf'}, \
"mediumorchid" : {'256':134 , 'full': '#af5fd7'}, \
"mediumpurple2" : {'256':135 , 'full': '#af5fff'}, \
"darkgoldenrod" : {'256':136 , 'full': '#af8700'}, \
"lightsalmon3" : {'256':137 , 'full': '#af875f'}, \
"rosybrown" : {'256':138 , 'full': '#af8787'}, \
"gray63" : {'256':139 , 'full': '#af87af'}, \
"grey63" : {'256':139 , 'full': '#af87af'}, \
"mediumpurple2" : {'256':140 , 'full': '#af87d7'}, \
"mediumpurple1" : {'256':141 , 'full': '#af87ff'}, \
"gold3" : {'256':142 , 'full': '#afaf00'}, \
"darkkhaki" : {'256':143 , 'full': '#afaf5f'}, \
"navajowhite3" : {'256':144 , 'full': '#afaf87'}, \
"gray69" : {'256':145 , 'full': '#afafaf'}, \
"grey69" : {'256':145 , 'full': '#afafaf'}, \
"lightsteelblue3" : {'256':146 , 'full': '#afafd7'}, \
"lightsteelblue" : {'256':147 , 'full': '#afafff'}, \
"yellow3" : {'256':148 , 'full': '#afd700'}, \
"darkolivegreen3" : {'256':149 , 'full': '#afd75f'}, \
"darkseagreen3" : {'256':150 , 'full': '#afd787'}, \
"darkseagreen2" : {'256':151 , 'full': '#afd7af'}, \
"lightcyan3" : {'256':152 , 'full': '#afd7d7'}, \
"lightskyblue1" : {'256':153 , 'full': '#afd7ff'}, \
"greenyellow" : {'256':154 , 'full': '#afff00'}, \
"darkolivegreen2" : {'256':155 , 'full': '#afff5f'}, \
"palegreen1" : {'256':156 , 'full': '#afff87'}, \
"darkseagreen2" : {'256':157 , 'full': '#afffaf'}, \
"darkseagreen1" : {'256':158 , 'full': '#afffd7'}, \
"paleturquoise1" : {'256':159 , 'full': '#afffff'}, \
"red3" : {'256':160 , 'full': '#d70000'}, \
"deeppink3" : {'256':161 , 'full': '#d7005f'}, \
"deeppink3" : {'256':162 , 'full': '#d70087'}, \
"magenta3" : {'256':163 , 'full': '#d700af'}, \
"magenta3" : {'256':164 , 'full': '#d700d7'}, \
"magenta2" : {'256':165 , 'full': '#d700ff'}, \
"darkorange3" : {'256':166 , 'full': '#d75f00'}, \
"indianred" : {'256':167 , 'full': '#d75f5f'}, \
"hotpink3" : {'256':168 , 'full': '#d75f87'}, \
"hotpink2" : {'256':169 , 'full': '#d75faf'}, \
"orchid" : {'256':170 , 'full': '#d75fd7'}, \
"mediumorchid1" : {'256':171 , 'full': '#d75fff'}, \
"orange3" : {'256':172 , 'full': '#d78700'}, \
"lightsalmon3" : {'256':173 , 'full': '#d7875f'}, \
"lightpink3" : {'256':174 , 'full': '#d78787'}, \
"pink3" : {'256':175 , 'full': '#d787af'}, \
"plum3" : {'256':176 , 'full': '#d787d7'}, \
"violet" : {'256':177 , 'full': '#d787ff'}, \
"gold3" : {'256':178 , 'full': '#d7af00'}, \
"lightgoldenrod3" : {'256':179 , 'full': '#d7af5f'}, \
"tan" : {'256':180 , 'full': '#d7af87'}, \
"mistyrose3" : {'256':181 , 'full': '#d7afaf'}, \
"thistle3" : {'256':182 , 'full': '#d7afd7'}, \
"plum2" : {'256':183 , 'full': '#d7afff'}, \
"yellow3" : {'256':184 , 'full': '#d7d700'}, \
"khaki3" : {'256':185 , 'full': '#d7d75f'}, \
"lightgoldenrod2" : {'256':186 , 'full': '#d7d787'}, \
"lightyellow3" : {'256':187 , 'full': '#d7d7af'}, \
"gray84" : {'256':188 , 'full': '#d7d7d7'}, \
"grey84" : {'256':188 , 'full': '#d7d7d7'}, \
"lightsteelblue1" : {'256':189 , 'full': '#d7d7ff'}, \
"yellow2" : {'256':190 , 'full': '#d7ff00'}, \
"darkolivegreen1" : {'256':191 , 'full': '#d7ff5f'}, \
"darkolivegreen1" : {'256':192 , 'full': '#d7ff87'}, \
"darkseagreen1" : {'256':193 , 'full': '#d7ffaf'}, \
"honeydew2" : {'256':194 , 'full': '#d7ffd7'}, \
"lightcyan1" : {'256':195 , 'full': '#d7ffff'}, \
"red1" : {'256':196 , 'full': '#ff0000'}, \
"deeppink2" : {'256':197 , 'full': '#ff005f'}, \
"deeppink1" : {'256':198 , 'full': '#ff0087'}, \
"deeppink1" : {'256':199 , 'full': '#ff00af'}, \
"magenta2" : {'256':200 , 'full': '#ff00d7'}, \
"magenta1" : {'256':201 , 'full': '#ff00ff'}, \
"orangered1" : {'256':202 , 'full': '#ff5f00'}, \
"indianred1" : {'256':203 , 'full': '#ff5f5f'}, \
"indianred1" : {'256':204 , 'full': '#ff5f87'}, \
"hotpink" : {'256':205 , 'full': '#ff5faf'}, \
"hotpink" : {'256':206 , 'full': '#ff5fd7'}, \
"mediumorchid1" : {'256':207 , 'full': '#ff5fff'}, \
"darkorange" : {'256':208 , 'full': '#ff8700'}, \
"salmon1" : {'256':209 , 'full': '#ff875f'}, \
"lightcoral" : {'256':210 , 'full': '#ff8787'}, \
"palevioletred1" : {'256':211 , 'full': '#ff87af'}, \
"orchid2" : {'256':212 , 'full': '#ff87d7'}, \
"orchid1" : {'256':213 , 'full': '#ff87ff'}, \
"orange1" : {'256':214 , 'full': '#ffaf00'}, \
"sandybrown" : {'256':215 , 'full': '#ffaf5f'}, \
"lightsalmon1" : {'256':216 , 'full': '#ffaf87'}, \
"lightpink1" : {'256':217 , 'full': '#ffafaf'}, \
"pink1" : {'256':218 , 'full': '#ffafd7'}, \
"plum1" : {'256':219 , 'full': '#ffafff'}, \
"gold1" : {'256':220 , 'full': '#ffd700'}, \
"lightgoldenrod2" : {'256':221 , 'full': '#ffd75f'}, \
"lightgoldenrod2" : {'256':222 , 'full': '#ffd787'}, \
"navajowhite1" : {'256':223 , 'full': '#ffd7af'}, \
"mistyrose1" : {'256':224 , 'full': '#ffd7d7'}, \
"thistle1" : {'256':225 , 'full': '#ffd7ff'}, \
"yellow1" : {'256':226 , 'full': '#ffff00'}, \
"lightgoldenrod1" : {'256':227 , 'full': '#ffff5f'}, \
"khaki1" : {'256':228 , 'full': '#ffff87'}, \
"wheat1" : {'256':229 , 'full': '#ffffaf'}, \
"cornsilk1" : {'256':230 , 'full': '#ffffd7'}, \
"gray100" : {'256':231 , 'full': '#ffffff'}, \
"grey100" : {'256':231 , 'full': '#ffffff'}, \
"gray3" : {'256':232 , 'full': '#080808'}, \
"grey3" : {'256':232 , 'full': '#080808'}, \
"gray7" : {'256':233 , 'full': '#121212'}, \
"grey7" : {'256':233 , 'full': '#121212'}, \
"gray11" : {'256':234 , 'full': '#1c1c1c'}, \
"grey11" : {'256':234 , 'full': '#1c1c1c'}, \
"gray15" : {'256':235 , 'full': '#262626'}, \
"grey15" : {'256':235 , 'full': '#262626'}, \
"gray19" : {'256':236 , 'full': '#303030'}, \
"grey19" : {'256':236 , 'full': '#303030'}, \
"gray23" : {'256':237 , 'full': '#3a3a3a'}, \
"grey23" : {'256':237 , 'full': '#3a3a3a'}, \
"gray27" : {'256':238 , 'full': '#444444'}, \
"grey27" : {'256':238 , 'full': '#444444'}, \
"gray30" : {'256':239 , 'full': '#4e4e4e'}, \
"grey30" : {'256':239 , 'full': '#4e4e4e'}, \
"gray35" : {'256':240 , 'full': '#585858'}, \
"grey35" : {'256':240 , 'full': '#585858'}, \
"gray39" : {'256':241 , 'full': '#626262'}, \
"grey39" : {'256':241 , 'full': '#626262'}, \
"gray42" : {'256':242 , 'full': '#6c6c6c'}, \
"grey42" : {'256':242 , 'full': '#6c6c6c'}, \
"gray46" : {'256':243 , 'full': '#767676'}, \
"grey46" : {'256':243 , 'full': '#767676'}, \
"gray50" : {'256':244 , 'full': '#808080'}, \
"grey50" : {'256':244 , 'full': '#808080'}, \
"gray54" : {'256':245 , 'full': '#8a8a8a'}, \
"grey54" : {'256':245 , 'full': '#8a8a8a'}, \
"gray58" : {'256':246 , 'full': '#949494'}, \
"grey58" : {'256':246 , 'full': '#949494'}, \
"gray62" : {'256':247 , 'full': '#9e9e9e'}, \
"grey62" : {'256':247 , 'full': '#9e9e9e'}, \
"gray66" : {'256':248 , 'full': '#a8a8a8'}, \
"grey66" : {'256':248 , 'full': '#a8a8a8'}, \
"gray70" : {'256':249 , 'full': '#b2b2b2'}, \
"grey70" : {'256':249 , 'full': '#b2b2b2'}, \
"gray74" : {'256':250 , 'full': '#bcbcbc'}, \
"grey74" : {'256':250 , 'full': '#bcbcbc'}, \
"gray78" : {'256':251 , 'full': '#c6c6c6'}, \
"grey78" : {'256':251 , 'full': '#c6c6c6'}, \
"gray82" : {'256':252 , 'full': '#d0d0d0'}, \
"grey82" : {'256':252 , 'full': '#d0d0d0'}, \
"gray85" : {'256':253 , 'full': '#dadada'}, \
"grey85" : {'256':253 , 'full': '#dadada'}, \
"gray89" : {'256':254 , 'full': '#e4e4e4'}, \
"grey89" : {'256':254 , 'full': '#e4e4e4'}, \
"gray93" : {'256':255 , 'full': '#eeeeee'}, \
"grey93" : {'256':255 , 'full': '#eeeeee'}, \
}
try:
import matplotlib.colors as mcolors
named_colors = mcolors.get_named_colors_mapping()
col_list.update(named_colors)
except ImportError as e:
if verbose:
print('matplotlib is not imported.')
for i in range(101):
if 'gray{:d}'.format(i) in col_list:
continue
gray_level = int(255*i/100+0.5)
col_list['gray{:d}'.format(i)] = {'256':None, 'full': '#{:02x}{:02x}{:02x}'.format(gray_level, gray_level, gray_level)}
col_list['grey{:d}'.format(i)] = {'256':None, 'full': '#{:02x}{:02x}{:02x}'.format(gray_level, gray_level, gray_level)}
# print(col_list)
if not color_name in col_list:
if verbose:
print('no match color name {} found. return None.'.format(color_name))
return None
else:
col = col_list[color_name]
if type(col) == dict:
return col[color_type]
elif type(col) == str:
if color_type == 'full':
return col_list[color_name]
elif color_type == '256':
r = int(col[1:3], 16)
g = int(col[3:5], 16)
b = int(col[5:7], 16)
return convert_fullcolor_to_256(r, g, b)
else:
r, g, b = col
if color_type == 'full':
return '#{:02x}{:02x}{:02x}'.format(int(255*r), int(255*g), int(255*b))
elif color_type == '256':
r = int(r*255)
g = int(g*255)
b = int(b*255)
return convert_fullcolor_to_256(r, g, b)
def convert_256_to_fullcolor(color_index):
if color_index < 16:
color_list = [ \
'#000000', \
'#800000', \
'#008000', \
'#808000', \
'#000080', \
'#800080', \
'#008080', \
'#c0c0c0', \
'#808080', \
'#ff0000', \
'#00ff00', \
'#ffff00', \
'#0000ff', \
'#ff00ff', \
'#00ffff', \
'#ffffff', \
]
return color_list[color_index]
elif color_index < 232:
r_index = int((color_index-16)/36)
g_index = int((color_index-16-36*r_index)/6)
b_index = int( color_index-16-36*r_index-6*g_index)
print('r:{}, g:{}, b:{}'.format(r_index, g_index, b_index))
return '#{:02x}{:02x}{:02x}'.format(55+40*r_index, 55+40*g_index, 55+40*b_index)
elif color_index < 256:
gray_level = 8*10*(color_index-232)
return '#{:02x}{:02x}{:02x}'.format(gray_level, gray_level, gray_level)
def convert_fullcolor_to_256(r, g, b):
r_index = int((r-55)/40+0.5)
if r_index < 0:
r_index = 0
g_index = int((g-55)/40+0.5)
if g_index < 0:
g_index = 0
b_index = int((b-55)/40+0.5)
if b_index < 0:
b_index = 0
return 36*r_index+6*g_index+b_index+16
def main_test(num):
print('system colors')
for i in range(8):
if num == 1:
if i%2 == 0: # even
tmp_st = '{}{:02x}{}'.format(FG['w'], i, END)
else: # odd
tmp_st = '{}{:02x}{}'.format(FG['k'], i, END)
else:
tmp_st = ' '
print('{}{}{}'.format(BG256(i), tmp_st, END), end='')
print()
for i in range(8,16):
if num == 1:
if i%2 == 0: # even
tmp_st = '{}{:02x}{}'.format(FG['w'], i, END)
else: # odd
tmp_st = '{}{:02x}{}'.format(FG['k'], i, END)
else:
tmp_st = ' '
print('{}{}{}'.format(BG256(i), tmp_st, END), end='')
print('\n')
print('6x6x6 color blocks')
for g in range(6):
for r in range(6):
for b in range(6):
i = 36*r+6*g+b+16
if num == 0:
tmp_st = ' '
elif num == 1:
if i%2 == 0: # even
tmp_st = '{}{:02x}{}'.format(FG['w'], i, END)
else: # odd
tmp_st = '{}{:02x}{}'.format(FG['k'], i, END)
else:
# tmp_st = '{}{:02x}{}'.format(FG256(36*((r+3)%6)+6*((g+3)%6)+(b+3)%6+16), i, END)
dark_new, dark_old = isdark(r, g, b)
if dark_new:
tmp_st = '{}{:02x}{}'.format(FG256(255), i, END)
else:
tmp_st = '{}{:02x}{}'.format(FG256(234), i, END)
print('{}{}{}'.format(BG256(i), tmp_st, END), end='')
print(' ', end='')
print()
print()
print('gray scales')
st = 6*6*6+16
for i in range(st, 256):
if num == 1:
tmp_st = '{}{:02x}{}'.format(FG256(255+st-i), i, END)
else:
tmp_st = ' '
print('{}{}{}'.format(BG256(i), tmp_st, END), end='')
print('\n')
if num == 2:
for r in range(6):
for g in range(6):
for b in range(6):
i = 36*r+6*g+b+16
dark_new, dark_old = isdark(r, g, b)
if dark_new != dark_old:
if dark_new:
fg1 = FG256(255)
fg2 = FG256(234)
else:
fg1 = FG256(234)
fg2 = FG256(255)
print('{}{}{:02x}{} -> {}{}{:03d}{}'.format(BG256(i), fg2, i, END, BG256(i), fg1, i, END), end=' ')
print()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--num', help='0... no fg, 1... show number, 2... is_dark', choices=[0,1,2], type=int)
args = parser.parse_args()
if hasattr(args, 'num') and (args.num is not None):
num = args.num
else:
num = 0
main_test(num)
| 2.46875
| 2
|
wp/setup_posts.py
|
positiondev/offset
| 3
|
12784340
|
from subprocess import check_output
from os import system
system ("wp post delete 1")
system("wp post create --post_title='A first post' --post_status=publish --post_date='2014-10-01 07:00:00' --post_content=\"This is the content\" --post_author=1")
system("wp post create --post_title='A second post' --post_status=publish --post_date='2014-10-02 07:00:00' --post_content=\"This is the second post content\" --post_author=1 ")
system("wp post create --post_title='A third post' --post_status=publish --post_date='2014-10-10 07:00:00' --post_content=\"This is the third post content\" --post_author=1 ")
system("wp post create --post_title='A fourth post' --post_status=publish --post_date='2014-10-15 07:00:00' --post_content=\"This is the fourth post content\" --post_author=1 ")
system("wp post create --post_title='A sports post' --post_status=publish --post_date='2014-10-20 07:00:00' --post_content=\"This is the sports post content\" --post_author=1")
system("wp post create --post_type=page --post_title='A first page' --post_status=publish --post_date='2014-10-15 07:00:00' --post_content=\"This is the first page content\" --post_author=1")
system("wp option update permalink_structure '/%year%/%monthnum%/%postname%/' ")
system("wp plugin activate --all")
system("wp user update 1 --display_name='<NAME>' --first_name='Ira' --last_name='Rubel' ")
system("wp eval-file create_password.php")
(p5, p4, p3, p2, p1) = check_output(["wp","post","list","--field=ID"]).split()
system("wp post term add %s post_tag tag1" % p1)
system("wp post term add %s post_tag tag1" % p2)
system("wp post term add %s post_tag tag2" % p2)
system("wp post term add %s post_tag tag1" % p3)
system("wp post term add %s category cat1" % p1)
system("wp post term add %s departments sports" % p5)
| 2.375
| 2
|
xmeos/models/old/master_orig.py
|
aswolf/xmeos
| 1
|
12784341
|
import numpy as np
import scipy as sp
from abc import ABCMeta, abstractmethod
from scipy import integrate
import scipy.interpolate as interpolate
import matplotlib.pyplot as plt
#====================================================================
# xmeos: Xtal-Melt Equation of State package
# models - library of equation of state models
#====================================================================
#====================================================================
# SECT 0: Admin functions
#====================================================================
class Control(object):
@classmethod
def init_consts( cls, eos_d ):
eos_d['const_d'] = cls.default_consts()
pass
@classmethod
def default_consts(cls):
const_d = {}
const_d['eVperHa'] = 27.211 # eV/Ha
const_d['JperHa'] = 4.35974434e-18 # J/Ha
const_d['JperCal'] = 4.184 # J/Cal
const_d['Nmol'] = 6.0221413e+23 # atoms/mol
const_d['kJ_molpereV'] = 96.49 # kJ/mol/eV
const_d['R'] = 8.314462 # J/K/mol
const_d['kboltz'] = 8.617332e-5 # eV/K
const_d['ang3percc'] = 1e24 # ang^3/cm^3
const_d['PV_ratio'] = 160.2176487 # (GPa*ang^3)/eV
const_d['TS_ratio'] = const_d['R']/const_d['kboltz'] # (J/mol)/eV
return const_d
@classmethod
def set_consts( cls, name_l, val_l, eos_d ):
if 'const_d' in eos_d.keys():
const_d = eos_d['const_d']
else:
cls.init_consts( eos_d )
for name, val in zip( name_l, val_l ):
const_d[name] = val
pass
@classmethod
def get_consts( cls, name_l, eos_d ):
"""
Retrieve list of desired consts stored in eos_d['const_d']
"""
const_d = eos_d['const_d']
const_l = []
for name in name_l:
const_l.append( const_d[name] )
return tuple( const_l )
@classmethod
def set_params( cls, name_l, val_l, eos_d ):
if 'param_d' in eos_d.keys():
param_d = eos_d['param_d']
else:
param_d = {}
eos_d['param_d'] = param_d
for name, val in zip( name_l, val_l ):
param_d[name] = val
pass
@classmethod
def get_params( cls, name_l, eos_d ):
"""
Retrieve list of desired params stored in eos_d['param_d']
"""
param_d = eos_d['param_d']
param_l = []
for name in name_l:
param_l.append( param_d[name] )
return tuple( param_l )
@classmethod
def swap_params( cls, name_l, eos_d ):
"""
Retrieve list of desired params stored in eos_d['param_d']
"""
# Use shallow copy to avoid unneeded duplication
eos_swap_d = copy.copy( eos_d )
# Use deep copy on params to ensure swap without affecting original
param_swap_d = copy.deepcopy(eos_d['param_d'])
eos_swap_d['param_d'] = param_swap_d
cls.set_params( name_l, eos_swap_d )
return eos_swap_d
@classmethod
def set_array_params( cls, basename, param_arr_a, eos_d ):
name_l = []
for i in range(len(param_arr_a)):
iname = basename+'_'+np.str(i)
name_l.append(iname)
cls.set_params(name_l, param_arr_a, eos_d)
@classmethod
def get_array_params( cls, basename, eos_d ):
param_d = eos_d['param_d']
paramkeys_a = np.array(param_d.keys())
baselen = len(basename+'_')
mask = np.array([key.startswith(basename+'_') for key in paramkeys_a])
arrindlist = []
vallist = []
for key in paramkeys_a[mask]:
idstr = key[baselen:]
try:
idnum = np.array(idstr).astype(np.float)
assert np.equal(np.mod(idnum,1),0), \
'Parameter keys that are part of a parameter array must '+\
'have form "basename_???" where ??? are integers.'
idnum = idnum.astype(np.int)
except:
assert False, 'That basename does not correspond to any valid parameter arrays stored in eos_d'
arrindlist.append(idnum)
vallist.append(param_d[key])
arrind_a = np.array(arrindlist)
val_a = np.array(vallist)
if arrind_a.size==0:
return np.array([])
else:
indmax = np.max(arrind_a)
param_arr = np.zeros(indmax+1)
for arrind, val in zip(arrind_a,val_a):
param_arr[arrind] = val
return param_arr
@classmethod
def set_modtypes( cls, name_l, val_l, eos_d ):
if 'modtype_d' in eos_d.keys():
modtype_d = eos_d['modtype_d']
else:
modtype_d = {}
eos_d['modtype_d'] = modtype_d
# Should we verify match?
for name, val in zip( name_l, val_l ):
if globals().has_key(name):
# modtype = globals()[name]
# modtype_d[name] = modtype()
modtype_d[name] = val
else:
print name + " is not a valid modtype object"
pass
@classmethod
def get_modtypes( cls, name_l, eos_d ):
"""
Retrieve list of desired model types stored in eos_d['modtype_d']
"""
modtype_d = eos_d['modtype_d']
modtype_l = []
for name in name_l:
modtype_l.append( modtype_d[name] )
return tuple( modtype_l )
@classmethod
def set_args( cls, name_l, val_l, eos_d ):
if 'arg_d' in eos_d.keys():
arg_d = eos_d['arg_d']
else:
arg_d = {}
eos_d['arg_d'] = arg_d
for name, val in zip( name_l, val_l ):
arg_d[name] = val
pass
#====================================================================
#====================================================================
# SECT 1: Fitting Routines
#====================================================================
class ModFit(object):
def __init__( self ):
pass
def get_resid_fun( self, eos_fun, eos_d, param_key_a, sys_state_tup,
val_a, err_a=1.0):
# def resid_fun( param_in_a, eos_fun=eos_fun, eos_d=eos_d,
# param_key_a=param_key_a, sys_state_tup=sys_state_tup,
# val_a=val_a, err_a=err_a ):
# # Don't worry about transformations right now
# param_a = param_in_a
# # set param value in eos_d dict
# globals()['set_param']( param_key_a, param_a, eos_d )
# # Take advantage of eos model function input format
# # uses tuple expansion for input arguments
# mod_val_a = eos_fun( *(sys_state_tup+(eos_d,)) )
# resid_a = (mod_val_a - val_a)/err_a
# return resid_a
wrap_eos_fun = self.get_wrap_eos_fun( eos_fun, eos_d, param_key_a )
def resid_fun( param_in_a, wrap_eos_fun=wrap_eos_fun,
sys_state_tup=sys_state_tup,
val_a=val_a, err_a=err_a ):
mod_val_a = wrap_eos_fun( param_in_a, sys_state_tup )
resid_a = (mod_val_a - val_a)/err_a
return resid_a
return resid_fun
def get_wrap_eos_fun( self, eos_fun, eos_d, param_key_a ):
def wrap_eos_fun(param_in_a, sys_state_tup, eos_fun=eos_fun,
eos_d=eos_d, param_key_a=param_key_a ):
# Don't worry about transformations right now
param_a = param_in_a
# set param value in eos_d dict
Control.set_params( param_key_a, param_a, eos_d )
# Take advantage of eos model function input format
# uses tuple expansion for input arguments
mod_val_a = eos_fun( *(sys_state_tup+(eos_d,)) )
return mod_val_a
return wrap_eos_fun
#====================================================================
# SECT N: Code Utility Functions
#====================================================================
def fill_array( var1, var2 ):
"""
fix fill_array such that it returns two numpy arrays of equal size
use numpy.full_like
"""
var1_a = np.asarray( var1 )
var2_a = np.asarray( var2 )
if var1_a.shape==():
var1_a = np.asarray( [var1] )
if var2_a.shape==():
var2_a = np.asarray( [var2] )
# Begin try/except block to handle all cases for filling an array
while True:
try:
assert var1_a.shape == var2_a.shape
break
except: pass
try:
var1_a = np.full_like( var2_a, var1_a )
break
except: pass
try:
var2_a = np.full_like( var1_a, var2_a )
break
except: pass
# If none of the cases properly handle it, throw error
assert False, 'var1 and var2 must both be equal shape or size=1'
return var1_a, var2_a
#====================================================================
#====================================================================
# SECT 3: EOS Objects
#====================================================================
# 3.1: Base Clases
#====================================================================
class EosMod(object):
"""
Abstract Equation of State Parent Base class
"""
__metaclass__ = ABCMeta
def __init__( self ):
pass
def get_param_scale( self, eos_d, apply_expand_adj=True):
"""Return scale values for each parameter"""
return self.get_param_scale_sub( eos_d )
def get_param_scale_sub( self, eos_d ):
raise NotImplementedError("'get_param_scale_sub' function not implimented for this model")
#====================================================================
class CompressMod(EosMod):
"""
Abstract Equation of State class to describe Compression Behavior
generally depends on both vol and temp
"""
__metaclass__ = ABCMeta
def press( self, V_a, T_a, eos_d ):
"""Returns Press behavior due to compression."""
return self.calc_press(V_a, T_a, eos_d)
def vol( self, P_a, T_a, eos_d ):
"""Returns Vol behavior due to compression."""
return self.calc_vol(V_a, T_a, eos_d)
def energy( self, V_a, T_a, eos_d ):
"""Returns Energy behavior due to compression."""
return self.calc_energy(V_a, T_a, eos_d)
def energy_perturb( self, V_a, T_a, eos_d ):
"""Returns Energy pertubation basis functions resulting from fractional changes to EOS params."""
return self.calc_energy_perturb(V_a, T_a, eos_d)
def bulk_mod( self, V_a, T_a, eos_d ):
"""Returns Bulk Modulus behavior due to compression."""
return self.calc_bulk_mod(V_a, T_a, eos_d)
def bulk_mod_deriv( self, V_a, T_a, eos_d ):
"""Returns Bulk Modulus Deriv (K') behavior due to compression."""
return self.calc_bulk_mod_deriv(V_a, T_a, eos_d)
# Standard methods must be overridden (as needed) by implimentation model
def calc_press( self, V_a, T_a, eos_d ):
"""Calculates Press behavior due to compression."""
raise NotImplementedError("'calc_press' function not implimented for this model")
def calc_vol( self, P_a, T_a, eos_d ):
"""Calculates Vol behavior due to compression."""
raise NotImplementedError("'calc_vol' function not implimented for this model")
def calc_energy( self, V_a, T_a, eos_d ):
"""Calculates Energy behavior due to compression."""
raise NotImplementedError("'calc_energy' function not implimented for this model")
def calc_energy_perturb( self, V_a, T_a, eos_d ):
"""Calculates Energy pertubation basis functions resulting from fractional changes to EOS params."""
raise NotImplementedError("'calc_energy_perturb' function not implimented for this model")
def calc_bulk_mod( self, V_a, T_a, eos_d ):
"""Calculates Bulk Modulus behavior due to compression."""
raise NotImplementedError("'calc_bulk_mod' function not implimented for this model")
def calc_bulk_mod_deriv( self, V_a, T_a, eos_d ):
"""Calculates Bulk Modulus Deriv (K') behavior due to compression."""
raise NotImplementedError("'calc_bulk_mod_deriv' function not implimented for this model")
#====================================================================
class CompressPathMod(CompressMod):
"""
Abstract Equation of State class for a reference Compression Path
Path can either be isothermal (T=const) or adiabatic (S=const)
For this restricted path, thermodyn properties depend only on volume
"""
__metaclass__ = ABCMeta
path_opts = ['T','S']
supress_energy = False
supress_press = False
def __init__( self, path_const='T', level_const=300, expand_adj_mod=None,
expand_adj=None, supress_energy=False, supress_press=False ):
assert path_const in self.path_opts, path_const + ' is not a valid ' + \
'path const. You must select one of: ' + path_opts
self.path_const = path_const
self.level_const = level_const
self.supress_energy = supress_energy
self.supress_press = supress_press
# Use Expansion Adjustment for negative pressure region?
if expand_adj is None:
self.expand_adj = False
else:
self.expand_adj = expand_adj
if expand_adj_mod is None:
self.expand_adj = False
self.expand_adj_mod = None
else:
self.expand_adj = True
self.expand_adj_mod = expand_adj_mod
pass
def validate_shared_param_scale( self, scale_pos_a, paramkey_pos_a,
scale_neg_a, paramkey_neg_a ):
TOL = 1e-4
assert np.all(np.in1d(paramkey_pos_a,paramkey_neg_a)),\
'paramkey_neg_a must be a superset of paramkey_pos_a'
assert len(paramkey_neg_a) <= len(paramkey_pos_a)+1,\
'paramkey_neg_a must have at most one more parameter than paramkey_neg_a'
# shared_mask = np.in1d(paramkey_neg_a,paramkey_pos_a)
# paramkey_shared_a = paramkey_neg_a[shared_mask]
# scale_shared_a = scale_neg_a[shared_mask]
ind_pos_a = np.array([np.where(paramkey_neg_a==paramkey)[0][0] \
for paramkey in paramkey_pos_a])
# scale_a[ind_pos_a] = scale_pos_a
assert np.all(np.log(scale_neg_a[ind_pos_a]/scale_pos_a)<TOL),\
'Shared param scales must match to within TOL.'
return ind_pos_a
def get_param_scale( self, eos_d, apply_expand_adj=False , output_ind=False):
if not self.expand_adj :
return self.get_param_scale_sub( eos_d )
else:
scale_a, paramkey_a = self.get_param_scale_sub( eos_d )
scale_a = np.append(scale_a,0.01)
paramkey_a = np.append(paramkey_a,'logPmin')
return scale_a, paramkey_a
# paramkey_pos_a = np.append(paramkey_pos_a,1.0)
# scale_neg_a, paramkey_neg_a = self.expand_adj_mod.get_param_scale_sub( eos_d )
# ind_pos_a = self.validate_shared_param_scale(scale_pos_a,paramkey_pos_a,
# scale_neg_a,paramkey_neg_a)
# # Since negative expansion EOS model params are a superset of those
# # required for the positive compression model, we can simply return the
# # scale and paramkey values from the negative expansion model
# scale_a = scale_neg_a
# paramkey_a = paramkey_neg_a
# if output_ind:
# return scale_a, paramkey_a, ind_pos_a
# else:
# return scale_a, paramkey_a
def get_ind_exp( self, V_a, eos_d ):
V0 = Control.get_params( ['V0'], eos_d )
ind_exp = np.where( V_a > V0 )[0]
return ind_exp
def get_path_const( self ):
return self.path_const
def get_level_const( self ):
return self.level_const
# EOS property functions
def param_deriv( self, fname, paramname, V_a, eos_d, dxfrac=1e-6):
scale_a, paramkey_a = self.get_param_scale( eos_d, apply_expand_adj=True )
scale = scale_a[paramkey_a==paramname][0]
# print 'scale: ' + np.str(scale)
#if (paramname is 'E0') and (fname is 'energy'):
# return np.ones(V_a.shape)
try:
fun = getattr(self, fname)
# Note that self is implicitly included
val0_a = fun( V_a, eos_d)
except:
assert False, 'That is not a valid function name ' + \
'(e.g. it should be press or energy)'
try:
param = Control.get_params( [paramname], eos_d )[0]
dparam = scale*dxfrac
# print 'param: ' + np.str(param)
# print 'dparam: ' + np.str(dparam)
except:
assert False, 'This is not a valid parameter name'
# set param value in eos_d dict
Control.set_params( [paramname,], [param+dparam,], eos_d )
# Note that self is implicitly included
dval_a = fun(V_a, eos_d) - val0_a
# reset param to original value
Control.set_params( [paramname], [param], eos_d )
deriv_a = dval_a/dxfrac
return deriv_a
def press( self, V_a, eos_d, apply_expand_adj=True):
if self.supress_press:
zero_a = 0.0*V_a
return zero_a
else:
press_a = self.calc_press(V_a, eos_d)
if self.expand_adj and apply_expand_adj:
ind_exp = self.get_ind_exp(V_a, eos_d)
if (ind_exp.size>0):
press_a[ind_exp] = self.expand_adj_mod.calc_press( V_a[ind_exp], eos_d )
return press_a
pass
def energy( self, V_a, eos_d, apply_expand_adj=True ):
if self.supress_energy:
zero_a = 0.0*V_a
return zero_a
else:
energy_a = self.calc_energy(V_a, eos_d)
if self.expand_adj and apply_expand_adj:
ind_exp = self.get_ind_exp(V_a, eos_d)
if apply_expand_adj and (ind_exp.size>0):
energy_a[ind_exp] = self.expand_adj_mod.calc_energy( V_a[ind_exp], eos_d )
return energy_a
def bulk_mod( self, V_a, eos_d, apply_expand_adj=True ):
bulk_mod_a = self.calc_bulk_mod(V_a, eos_d)
if self.expand_adj and apply_expand_adj:
ind_exp = self.get_ind_exp(V_a, eos_d)
if apply_expand_adj and (ind_exp.size>0):
bulk_mod_a[ind_exp] = self.expand_adj_mod.calc_bulk_mod( V_a[ind_exp], eos_d )
return bulk_mod_a
def bulk_mod_deriv( self,V_a, eos_d, apply_expand_adj=True ):
bulk_mod_deriv_a = self.calc_bulk_mod_deriv(V_a, eos_d)
if self.expand_adj and apply_expand_adj:
ind_exp = self.get_ind_exp(V_a, eos_d)
if apply_expand_adj and (ind_exp.size>0):
bulk_mod_deriv_a[ind_exp] = self.expand_adj_mod_deriv.calc_bulk_mod_deriv( V_a[ind_exp], eos_d )
return bulk_mod_deriv_a
def energy_perturb( self, V_a, eos_d, apply_expand_adj=True ):
# Eval positive press values
Eperturb_pos_a, scale_a, paramkey_a = self.calc_energy_perturb( V_a, eos_d )
if (self.expand_adj==False) or (apply_expand_adj==False):
return Eperturb_pos_a, scale_a, paramkey_a
else:
Nparam_pos = Eperturb_pos_a.shape[0]
scale_a, paramkey_a, ind_pos = \
self.get_param_scale( eos_d, apply_expand_adj=True,
output_ind=True )
Eperturb_a = np.zeros((paramkey_a.size, V_a.size))
Eperturb_a[ind_pos,:] = Eperturb_pos_a
# Overwrite negative pressure Expansion regions
ind_exp = self.get_ind_exp(V_a, eos_d)
if ind_exp.size>0:
Eperturb_adj_a = \
self.expand_adj_mod.calc_energy_perturb( V_a[ind_exp],
eos_d )[0]
Eperturb_a[:,ind_exp] = Eperturb_adj_a
return Eperturb_a, scale_a, paramkey_a
# Standard methods must be overridden (as needed) by implimentation model
def get_param_scale_sub( self, eos_d):
raise NotImplementedError("'get_param_scale_sub' function not implimented for this model")
def calc_press( self, V_a, eos_d ):
"""Returns Press variation along compression curve."""
raise NotImplementedError("'press' function not implimented for this model")
def calc_energy( self, V_a, eos_d ):
"""Returns Energy along compression curve."""
raise NotImplementedError("'energy' function not implimented for this model")
def calc_energy_perturb( self, V_a, eos_d ):
"""Returns Energy pertubation basis functions resulting from fractional changes to EOS params."""
fname = 'energy'
scale_a, paramkey_a = self.get_param_scale\
( eos_d, apply_expand_adj=self.expand_adj )
Eperturb_a = []
for paramname in paramkey_a:
iEperturb_a = self.param_deriv( fname, paramname, V_a, eos_d)
Eperturb_a.append(iEperturb_a)
Eperturb_a = np.array(Eperturb_a)
return Eperturb_a, scale_a, paramkey_a
def calc_bulk_mod( self, V_a, eos_d ):
"""Returns Bulk Modulus variation along compression curve."""
raise NotImplementedError("'bulk_mod' function not implimented for this model")
def calc_bulk_mod_deriv( self, V_a, eos_d ):
"""Returns Bulk Modulus Deriv (K') variation along compression curve."""
raise NotImplementedError("'bulk_mod_deriv' function not implimented for this model")
#====================================================================
class ThermalMod(EosMod):
"""
Abstract Equation of State class to describe Thermal Behavior
generally depends on both vol and temp
"""
__metaclass__ = ABCMeta
# EOS property functions
def energy( self, V_a, T_a, eos_d ):
return self.calc_energy( V_a, T_a, eos_d )
def heat_capacity( self, V_a, T_a, eos_d ):
return self.calc_heat_capacity( V_a, T_a, eos_d )
def press( self, V_a, T_a, eos_d ):
return self.calc_press( V_a, T_a, eos_d )
def entropy( self, V_a, T_a, eos_d ):
return self.calc_entropy( V_a, T_a, eos_d )
def vol( self, P_a, T_a, eos_d ):
return self.calc_vol( P_a, T_a, eos_d )
# Standard methods must be overridden (as needed) by implimentation model
def calc_energy( self, V_a, T_a, eos_d ):
"""Returns Thermal Component of Energy."""
raise NotImplementedError("'energy' function not implimented for this model")
def calc_heat_capacity( self, V_a, T_a, eos_d ):
"""Returns Heat Capacity."""
raise NotImplementedError("'heat_capacity' function not implimented for this model")
def calc_entropy( self, V_a, T_a, eos_d ):
"""Returns Entropy."""
raise NotImplementedError("'entropy' function not implimented for this model")
def calc_press( self, V_a, T_a, eos_d ):
"""Returns thermal contribution to pressure."""
raise NotImplementedError("'press' function not implimented for this model")
def calc_vol( self, V_a, T_a, eos_d ):
"""Returns thermally expanded volume."""
raise NotImplementedError("'vol' function not implimented for this model")
#====================================================================
class ThermalPathMod(ThermalMod):
"""
Abstract Equation of State class for a reference Thermal Path
Path can either be isobaric (P=const) or isochoric (V=const)
For this restricted path, thermodyn properties depend only on temperature.
"""
__metaclass__ = ABCMeta
path_opts = ['P','V']
def __init__( self, path_const='V', level_const=np.nan ):
assert path_const in self.path_opts, path_const + ' is not a valid ' + \
'path const. You must select one of: ' + path_opts
self.path_const = path_const
self.level_const = level_const
pass
def get_path_const( self ):
return self.path_const
def get_level_const( self ):
return self.level_const
# EOS property functions
def energy( self, T_a, eos_d ):
return self.calc_energy( T_a, eos_d )
def heat_capacity( self, T_a, eos_d ):
return self.calc_heat_capacity( T_a, eos_d )
def press( self, T_a, eos_d ):
return self.calc_press( T_a, eos_d )
def entropy( self, T_a, eos_d ):
return self.calc_entropy( T_a, eos_d )
def vol( self, T_a, eos_d ):
return self.calc_vol( T_a, eos_d )
# Standard methods must be overridden (as needed) by implimentation model
def calc_energy( self, T_a, eos_d ):
"""Returns Thermal Component of Energy along heating path."""
raise NotImplementedError("'energy' function not implimented for this model")
def calc_heat_capacity( self, T_a, eos_d ):
"""Returns Heat Capacity along heating path."""
raise NotImplementedError("'heat_capacity' function not implimented for this model")
def calc_entropy( self, T_a, eos_d ):
"""Returns Entropy along heating path."""
raise NotImplementedError("'entropy' function not implimented for this model")
def calc_press( self, T_a, eos_d ):
"""Returns thermal contribution to pressure along heating path."""
raise NotImplementedError("'press' function not implimented for this model")
def calc_vol( self, T_a, eos_d ):
"""Returns thermally expanded volume along heating path."""
raise NotImplementedError("'vol' function not implimented for this model")
#====================================================================
class GammaMod(EosMod):
"""
Abstract Equation of State class for Gruneisen Parameter curves
"""
__metaclass__ = ABCMeta
def __init__( self, V0ref=True ):
self.V0ref = V0ref
pass
@abstractmethod
def gamma( self, V_a, eos_d ):
"""Returns Gruneisen Param (gamma) variation due to compression."""
# @abstractmethod
# def temp( self, V_a, T0, eos_d ):
# """Returns Gruneisen Param (gamma) variation due to compression."""
def temp( self, V_a, TR, eos_d ):
"""
Return temperature for debye model
V_a: sample volume array
TR: temperature at V=VR
"""
if np.isscalar(V_a):
V_a = np.array([V_a])
TOL = 1e-8
Nsamp = 81
# Nsamp = 281
# Nsamp = 581
if self.V0ref:
VR, = Control.get_params( ['V0'], eos_d )
else:
VR, = Control.get_params( ['VR'], eos_d )
Vmin = np.min(V_a)
Vmax = np.max(V_a)
dVmax = np.log(Vmax/VR)
dVmin = np.log(Vmin/VR)
T_a = TR*np.ones(V_a.size)
if np.abs(dVmax) < TOL:
dVmax = 0.0
if np.abs(dVmin) < TOL:
dVmin = 0.0
if dVmax > TOL:
indhi_a = np.where(np.log(V_a/VR) > TOL)[0]
# indhi_a = np.where(V_a > VR)[0]
# ensure numerical stability by shifting
# if (Vmax-VR)<=TOL:
# T_a[indhi_a] = TR
# else:
Vhi_a = np.linspace(VR,Vmax,Nsamp)
gammahi_a = self.gamma( Vhi_a, eos_d )
logThi_a = integrate.cumtrapz(-gammahi_a/Vhi_a,x=Vhi_a)
logThi_a = np.append([0],logThi_a)
logtemphi_f = interpolate.interp1d(Vhi_a,logThi_a,kind='cubic')
T_a[indhi_a] = TR*np.exp(logtemphi_f(V_a[indhi_a]))
if dVmin < -TOL:
indlo_a = np.where(np.log(V_a/VR) < -TOL)[0]
# indlo_a = np.where(V_a <= VR)[0]
# # ensure numerical stability by shifting
# if (VR-Vmin)<TOL:
# T_a[indlo_a] = TR
# else:
Vlo_a = np.linspace(VR,Vmin,Nsamp)
gammalo_a = self.gamma( Vlo_a, eos_d )
logTlo_a = integrate.cumtrapz(-gammalo_a/Vlo_a,x=Vlo_a)
logTlo_a = np.append([0],logTlo_a)
logtemplo_f = interpolate.interp1d(Vlo_a,logTlo_a,kind='cubic')
T_a[indlo_a] = TR*np.exp(logtemplo_f(V_a[indlo_a]))
return T_a
#====================================================================
class FullMod(EosMod):
"""
Abstract Equation of State class for Full Model (combines all EOS terms)
"""
__metaclass__ = ABCMeta
# Standard methods must be overridden (as needed) by implimentation model
def press( self, V_a, T_a, eos_d ):
"""Returns Total Press."""
raise NotImplementedError("'press' function not implimented for this model")
def energy( self, V_a, T_a, eos_d ):
"""Returns Toal Energy."""
raise NotImplementedError("'energy' function not implimented for this model")
def therm_exp( self, V_a, T_a, eos_d ):
TOL = 1e-4
V_a, T_a = fill_array( V_a, T_a )
dlogV = 1e-4
S_a = self.entropy( V_a, T_a, eos_d )
KT_a = self.bulk_modulus( V_a, T_a, eos_d )
S_hi_a = self.entropy( np.exp(dlogV)*V_a, T_a, eos_d )
S_lo_a = self.entropy( np.exp(-dlogV)*V_a, T_a, eos_d )
dSdlogV_a = (S_hi_a-S_lo_a) / (2*dlogV)
alpha_a = dSdlogV_a/(KT_a*V_a*eos_d['const_d']['PV_ratio'])
return alpha_a
def bulk_mod( self, V_a, T_a, eos_d ):
"""Returns Total Bulk Modulus."""
raise NotImplementedError("'bulk_mod' function not implimented for this model")
#====================================================================
# 3.2: Model Implementations
#====================================================================
# 3.2.1: CompressPathMod Implementations
#====================================================================
class BirchMurn3(CompressPathMod):
def calc_press( self, V_a, eos_d ):
V0, K0, KP0 = Control.get_params( ['V0','K0','KP0'], eos_d )
vratio_a = 1.0*V_a/V0
press_a = 3.0/2*K0 * (vratio_a**(-7.0/3) - vratio_a**(-5.0/3)) * \
(1 + 3.0/4*(KP0-4)*(vratio_a**(-2.0/3)-1))
return press_a
def calc_energy( self, V_a, eos_d ):
V0, K0, KP0, E0 = Control.get_params( ['V0','K0','KP0','E0'], eos_d )
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
vratio_a = 1.0*V_a/V0
fstrain_a = 0.5*(vratio_a**(-2.0/3) - 1)
energy_a = E0 + 9.0/2*(V0*K0/PV_ratio)*\
( KP0*fstrain_a**3 + fstrain_a**2*(1-4*fstrain_a) )
return energy_a
#====================================================================
class BirchMurn4(CompressPathMod):
def get_param_scale_sub( self, eos_d):
"""Return scale values for each parameter"""
V0, K0, KP0, KP20 = Control.get_params( ['V0','K0','KP0','KP20'], eos_d )
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
paramkey_a = np.array(['V0','K0','KP0','KP20','E0'])
scale_a = np.array([V0,K0,KP0,KP0/K0,K0*V0/PV_ratio])
return scale_a, paramkey_a
def calc_strain_energy_coeffs(self, nexp, K0, KP0, KP20 ):
a1 = 3./2*(KP0-nexp-2)
a2 = 3./2*(K0*KP20 + KP0*(KP0-2*nexp-3)+3+4*nexp+11./9*nexp**2)
return a1,a2
def calc_press( self, V_a, eos_d ):
# globals()['set_param']( ['nexp'], [self.nexp], eos_d )
# press_a = self.gen_finite_strain_mod.press( V_a, eos_d )
V0, K0, KP0, KP20 = Control.get_params( ['V0','K0','KP0','KP20'], eos_d )
nexp = +2.0
vratio_a = 1.0*V_a/V0
fstrain_a = 1./nexp*(vratio_a**(-nexp/3) - 1)
a1,a2 = self.calc_strain_energy_coeffs(nexp,K0,KP0,KP20)
press_a = 3.0*K0*(1+a1*fstrain_a + a2*fstrain_a**2)*\
fstrain_a*(nexp*fstrain_a+1)**((nexp+3)/nexp)
return press_a
def calc_energy( self, V_a, eos_d ):
# globals()['set_param']( ['nexp'], [self.nexp], eos_d )
# energy_a = self.gen_finite_strain_mod.energy( V_a, eos_d )
V0, K0, KP0, KP20, E0 = Control.get_params( ['V0','K0','KP0','KP20','E0'], eos_d )
nexp = +2.0
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
vratio_a = 1.0*V_a/V0
fstrain_a = 1./nexp*(vratio_a**(-nexp/3) - 1)
a1,a2 = self.calc_strain_energy_coeffs(nexp,K0,KP0,KP20)
energy_a = E0 + 9.0*(V0*K0/PV_ratio)*\
( 0.5*fstrain_a**2 + a1/3*fstrain_a**3 + a2/4*fstrain_a**4)
return energy_a
#====================================================================
class GenFiniteStrain(CompressPathMod):
"""
Generalized Finite Strain EOS from Jeanloz1989b
Note: nexp=2 yields Birch Murnaghan (eulerian strain) EOS
nexp=-2 yields lagragian strain EOS
"""
def calc_strain_energy_coeffs(self, nexp, K0, KP0, KP20=None, KP30=None):
a1 = 3./2*(KP0-nexp-2)
if KP20 is None:
return a1
else:
a2 = 3./2*(K0*KP20 + KP0*(KP0-2*nexp-3)+3+4*nexp+11./9*nexp**2)
if KP30 is None:
return a1,a2
else:
a3 = 1./8*(9*K0**2*KP30 + 6*(6*KP0-5*nexp-6)*K0*KP20
+((3*KP0-5*nexp-6)**2 +10*nexp**2 + 30*nexp + 18)*KP0
-(50./3*nexp**3 + 70*nexp**2 + 90*nexp + 36))
return a1,a2,a3
def calc_press( self, V_a, eos_d ):
V0, K0, KP0, KP20, nexp = Control.get_params( ['V0','K0','KP0','KP20','nexp'], eos_d )
vratio_a = 1.0*V_a/V0
fstrain_a = 1./nexp*(vratio_a**(-nexp/3) - 1)
a1,a2 = self.calc_strain_energy_coeffs(nexp,K0,KP0,KP20=KP20)
press_a = 3.0*K0*(1+a1*fstrain_a + a2*fstrain_a**2)*\
fstrain_a*(nexp*fstrain_a+1)**((nexp+3)/nexp)
return press_a
def calc_energy( self, V_a, eos_d ):
V0, K0, KP0, KP20, E0, nexp = Control.get_params( ['V0','K0','KP0','KP20','E0','nexp'], eos_d )
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
vratio_a = 1.0*V_a/V0
fstrain_a = 1./nexp*(vratio_a**(-nexp/3) - 1)
a1,a2 = self.calc_strain_energy_coeffs(nexp,K0,KP0,KP20=KP20)
energy_a = E0 + 9.0*(V0*K0/PV_ratio)*\
( 0.5*fstrain_a**2 + a1/3*fstrain_a**3 + a2/4*fstrain_a**4)
return energy_a
#====================================================================
class Vinet(CompressPathMod):
def get_param_scale_sub( self, eos_d):
"""Return scale values for each parameter"""
V0, K0, KP0 = Control.get_params( ['V0','K0','KP0'], eos_d )
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
paramkey_a = np.array(['V0','K0','KP0','E0'])
scale_a = np.array([V0,K0,KP0,K0*V0/PV_ratio])
return scale_a, paramkey_a
def calc_press( self, V_a, eos_d ):
V0, K0, KP0 = Control.get_params( ['V0','K0','KP0'], eos_d )
eta = 3./2*(KP0-1)
vratio_a = 1.0*V_a/V0
x_a = vratio_a**(1./3)
press_a = 3*K0*(1-x_a)*x_a**(-2)*np.exp(eta*(1-x_a))
return press_a
def calc_energy( self, V_a, eos_d ):
V0, K0, KP0, E0 = Control.get_params( ['V0','K0','KP0','E0'], eos_d )
# print V0
# print K0
# print KP0
# print E0
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
eta = 3./2*(KP0-1)
vratio_a = 1.0*V_a/V0
x_a = vratio_a**(1.0/3)
energy_a = E0 + 9*K0*V0/PV_ratio/eta**2*\
(1 + (eta*(1-x_a)-1)*np.exp(eta*(1-x_a)))
return energy_a
def calc_energy_perturb( self, V_a, eos_d ):
"""Returns Energy pertubation basis functions resulting from fractional changes to EOS params."""
V0, K0, KP0, E0 = Control.get_params( ['V0','K0','KP0','E0'], eos_d )
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
eta = 3./2*(KP0-1)
vratio_a = 1.0*V_a/V0
x = vratio_a**(1./3)
scale_a, paramkey_a = self.get_param_scale_sub( eos_d )
# NOTE: CHECK UNITS (PV_RATIO) here
dEdp_a = 1.0/PV_ratio*np.vstack\
([-3*K0*(eta**2*x*(x-1) + 3*eta*(x-1) - 3*np.exp(eta*(x-1)) + 3)\
*np.exp(-eta*(x-1))/eta**2,
-9*V0*(eta*(x-1) - np.exp(eta*(x-1)) + 1)*np.exp(-eta*(x-1))/eta**2,
27*K0*V0*(2*eta*(x-1) + eta*(-x + (x-1)*(eta*(x-1) + 1) + 1)
-2*np.exp(eta*(x-1)) + 2)*np.exp(-eta*(x-1))/(2*eta**3),
PV_ratio*np.ones(V_a.shape)])
Eperturb_a = np.expand_dims(scale_a,1)*dEdp_a
#Eperturb_a = np.expand_dims(scale_a)*dEdp_a
return Eperturb_a, scale_a, paramkey_a
#====================================================================
class Tait(CompressPathMod):
def __init__( self, setlogPmin=False,
path_const='T', level_const=300, expand_adj_mod=None,
expand_adj=None, supress_energy=False, supress_press=False ):
super(Tait, self).__init__( expand_adj=None )
self.setlogPmin = setlogPmin
pass
# def __init__( self, setlogPmin=False, expand_adj=False ):
# self.setlogPmin = setlogPmin
# self.expand_adj = expand_adj
# pass
def get_eos_params(self, eos_d):
V0, K0, KP0 = Control.get_params( ['V0','K0','KP0'], eos_d )
if self.setlogPmin:
logPmin, = Control.get_params( ['logPmin'], eos_d )
Pmin = np.exp(logPmin)
# assert Pmin>0, 'Pmin must be positive.'
KP20 = (KP0+1)*(KP0/K0 - 1.0/Pmin)
else:
KP20, = Control.get_params( ['KP20'], eos_d )
return V0,K0,KP0,KP20
def get_param_scale_sub( self, eos_d ):
"""Return scale values for each parameter"""
V0, K0, KP0, KP20 = self.get_eos_params(eos_d)
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
if self.setlogPmin:
# [V0,K0,KP0,E0]
paramkey_a = np.array(['V0','K0','KP0','E0'])
scale_a = np.array([V0,K0,KP0,K0*V0/PV_ratio])
else:
# [V0,K0,KP0,KP20,E0]
paramkey_a = np.array(['V0','K0','KP0','KP20','E0'])
scale_a = np.array([V0,K0,KP0,KP0/K0,K0*V0/PV_ratio])
return scale_a, paramkey_a
def eos_to_abc_params(self, K0, KP0, KP20):
a = (KP0 + 1.0)/(K0*KP20 + KP0 + 1.0)
b = -KP20/(KP0+1.0) + KP0/K0
c = (K0*KP20 + KP0 + 1.0)/(-K0*KP20 + KP0**2 + KP0)
return a,b,c
def calc_press( self, V_a, eos_d ):
V0, K0, KP0, KP20 = self.get_eos_params(eos_d)
a,b,c = self.eos_to_abc_params(K0,KP0,KP20)
vratio_a = 1.0*V_a/V0
press_a = 1.0/b*(((vratio_a + a - 1.0)/a)**(-1.0/c) - 1.0)
return press_a
def calc_energy( self, V_a, eos_d ):
V0, K0, KP0, KP20 = self.get_eos_params(eos_d)
E0, = Control.get_params( ['E0'], eos_d )
a,b,c = self.eos_to_abc_params(K0,KP0,KP20)
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
vratio_a = 1.0*V_a/V0
press_a = self.calc_press( V_a, eos_d )
eta_a = b*press_a + 1.0
eta_pow_a = eta_a**(-c)
# NOTE: Need to simplify energy expression here
energy_a = E0 + (V0/b)/PV_ratio*(a*c/(c-1)-1)\
- (V0/b)/PV_ratio*( a*c/(c-1)*eta_a*eta_pow_a - a*eta_pow_a + a - 1)
return energy_a
def calc_energy_perturb_deprecate( self, V_a, eos_d ):
"""Returns Energy pertubation basis functions resulting from fractional changes to EOS params."""
V0, K0, KP0, KP20 = self.get_eos_params(eos_d)
E0, = Control.get_params( ['E0'], eos_d )
a,b,c = self.eos_to_abc_params(K0,KP0,KP20)
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
vratio_a = V_a/V0
press_a = self.calc_press( V_a, eos_d )
eta_a = b*press_a + 1.0
eta_pow_a = eta_a**(-c)
scale_a, paramkey_a = self.get_param_scale_sub( eos_d )
# [V0,K0,KP0,KP20,E0]
dEdp_a = np.ones((4, V_a.size))
# dEdp_a[0,:] = 1.0/(PV_ratio*b*(c-1))*eta_a*(-a*eta_pow_a -1 + (1-a)*(a+c))
dEdp_a[0,:] = 1.0/(PV_ratio*b*(c-1))*eta_a*(-a*eta_pow_a +a -1 -a*c+c) \
+ 1.0/(PV_ratio*b)*(a*c/(c-1)-1)
dEdp_a[-1,:] = 1.0
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# 1x3
dEdabc_a = np.vstack\
([V0*eta_a/(a*b*(c-1))*(-a*eta_pow_a + a*(1-c))+c*V0/(b*(c-1)),
V0/(b**2*(c-1))*((-a*eta_pow_a+a-1)*(c-1) + c*a*eta_a*eta_pow_a) \
- V0/b**2*(a*c/(c-1) - 1),
-a*V0/(b*(c-1)**2)*eta_a*eta_pow_a*(-c+(c-1)*(1-np.log(eta_a)))\
+a*V0/(b*(c-1))*(1-c/(c-1))])
# 3x3
abc_jac = np.array([[-KP20*(KP0+1)/(K0*KP20+KP0+1)**2,
K0*KP20/(K0*KP20+KP0+1)**2,
-K0*(KP0+1)/(K0*KP20+KP0+1)**2],
[-KP0/K0**2, KP20/(KP0+1)**2 + 1./K0, -1.0/(KP0+1)],
[KP20*(KP0**2+2.*KP0+1)/(-K0*KP20+KP0**2+KP0)**2,
(-K0*KP20+KP0**2+KP0-(2*KP0+1)*(K0*KP20+KP0+1))/\
(-K0*KP20+KP0**2+KP0)**2,
K0*(KP0**2+2*KP0+1)/(-K0*KP20+KP0**2+KP0)**2]])
dEdp_a[1:4,:] = 1.0/PV_ratio*np.dot(abc_jac.T,dEdabc_a)
print dEdp_a.shape
if self.setlogPmin:
# [V0,K0,KP0,E0]
print dEdp_a.shape
dEdp_a = dEdp_a[[0,1,2,4],:]
Eperturb_a = np.expand_dims(scale_a,1)*dEdp_a
#Eperturb_a = np.expand_dims(scale_a)*dEdp_a
return Eperturb_a, scale_a, paramkey_a
#====================================================================
class RosenfeldTaranzonaShiftedAdiabat(CompressPathMod):
def get_param_scale_sub( self, eos_d):
"""Return scale values for each parameter"""
V0, K0, KP0 = Control.get_params( ['V0','K0','KP0'], eos_d )
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
paramkey_a = np.array(['V0','K0','KP0','E0'])
scale_a = np.array([V0,K0,KP0,K0*V0/PV_ratio])
return scale_a, paramkey_a
def calc_press( self, V_a, eos_d ):
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
fac = 1e-3
Vhi_a = V_a*(1.0 + 0.5*fac)
Vlo_a = V_a*(1.0 - 0.5*fac)
dV_a = Vhi_a-Vlo_a
E0S_hi_a = self.calc_energy(Vhi_a, eos_d)
E0S_lo_a = self.calc_energy(Vlo_a, eos_d)
P0S_a = -PV_ratio*(E0S_hi_a - E0S_lo_a)/dV_a
return P0S_a
def calc_energy( self, V_a, eos_d ):
V0, T0, mexp = Control.get_params( ['V0','T0','mexp'], eos_d )
kB, = Control.get_consts( ['kboltz'], eos_d )
poly_blogcoef_a = Control.get_array_params( 'blogcoef', eos_d )
compress_path_mod, thermal_mod, gamma_mod = \
Control.get_modtypes( ['CompressPathMod', 'ThermalMod', 'GammaMod'],
eos_d )
free_energy_isotherm_a = compress_path_mod.energy(V_a,eos_d)
T0S_a = gamma_mod.temp(V_a,T0,eos_d)
bV_a = np.polyval(poly_blogcoef_a,np.log(V_a/V0))
dS_a = -mexp/(mexp-1)*bV_a/T0*((T0S_a/T0)**(mexp-1)-1)\
-3./2*kB*np.log(T0S_a/T0)
energy_isotherm_a = free_energy_isotherm_a + T0*dS_a
E0S_a = energy_isotherm_a + bV_a*((T0S_a/T0)**mexp-1)\
+3./2*kB*(T0S_a-T0)
return E0S_a
#====================================================================
class GenRosenfeldTaranzona(ThermalPathMod):
"""
Generalized Rosenfeld-Taranzona Equation of State Model (Rosenfeld1998)
- Cv takes on general form of shifted power-law as in original
Rosenfeld-Taranzona model, but the exponent and high-temp limit are
parameters rather than fixed
- only applicable to isochores
- must provide a method to evaluate properties along isochore
"""
__metaclass__ = ABCMeta
def get_param_scale_sub( self, eos_d):
"""Return scale values for each parameter"""
acoef, bcoef, mexp, lognfac = Control.get_params\
( ['acoef','bcoef','mexp','lognfac'], eos_d )
acoef_scl = 1.0 # This cannot be well-determined without more info
# ...like a reference temp or energy variation
bcoef_scl = np.abs(bcoef)
mexp_scl = 3./5
lognfac_scl = 0.01
paramkey_a = np.array(['acoef','bcoef','mexp','lognfac'])
scale_a = np.array([acoef_scl,bcoef_scl,mexp_scl,lognfac_scl])
return scale_a, paramkey_a
def get_param_override( self, paramkey, paramval, eos_d ):
if paramval is None:
paramval, = Control.get_params( [paramkey], eos_d )
return paramval
def calc_therm_dev( self, T_a, eos_d ):
"""
"""
# assert False, 'calc_thermal_dev is not yet implimented'
T0, = Control.get_params( ['T0'], eos_d )
mexp, = Control.get_params( ['mexp'], eos_d )
# therm_dev_a = (T_a/T0)**mexp
therm_dev_a = (T_a/T0)**mexp - 1.0
return therm_dev_a
def calc_therm_dev_deriv( self, T_a, eos_d ):
"""
"""
# assert False, 'calc_thermal_dev is not yet implimented'
T0, = Control.get_params( ['T0'], eos_d )
mexp, = Control.get_params( ['mexp'], eos_d )
dtherm_dev_a = (mexp/T0)*(T_a/T0)**(mexp-1.0)
return dtherm_dev_a
def calc_energy( self, T_a, eos_d, acoef_a=None, bcoef_a=None ):
"""Returns Thermal Component of Energy."""
mexp, lognfac = Control.get_params( ['mexp','lognfac'], eos_d )
energy_pot_a = self.calc_energy_pot( T_a, eos_d, acoef_a=acoef_a,
bcoef_a=bcoef_a )
energy_kin_a = self.calc_energy_kin( T_a, eos_d )
energy_a = energy_pot_a + energy_kin_a
return energy_a
def calc_energy_kin( self, T_a, eos_d ):
"""Returns Thermal Component of Energy."""
lognfac, = Control.get_params( ['lognfac'], eos_d )
kB, = Control.get_consts( ['kboltz'], eos_d )
nfac = np.exp(lognfac)
energy_kin_a = 3.0/2*nfac*kB*T_a
return energy_kin_a
def calc_energy_pot( self, T_a, eos_d, acoef_a=None, bcoef_a=None ):
"""Returns Thermal Component of Energy."""
acoef_a = self.get_param_override( 'acoef', acoef_a, eos_d )
energy_pot_diff_a = self.calc_energy_pot_diff( T_a, eos_d, bcoef_a=bcoef_a )
energy_pot_a = acoef_a + energy_pot_diff_a
return energy_pot_a
def calc_energy_pot_diff( self, T_a, eos_d, bcoef_a=None ):
bcoef_a = self.get_param_override( 'bcoef', bcoef_a, eos_d )
therm_dev_a = self.calc_therm_dev( T_a, eos_d )
energy_pot_diff_a = bcoef_a*therm_dev_a
return energy_pot_diff_a
def calc_heat_capacity( self, T_a, eos_d, bcoef_a=None ):
"""Calculate Heat Capacity usin."""
heat_capacity_pot = self.calc_heat_capacity_pot( T_a, eos_d,
bcoef_a=bcoef_a )
heat_capacity_kin = self.calc_heat_capacity_kin( T_a, eos_d )
heat_capacity_a = heat_capacity_pot+heat_capacity_kin
return heat_capacity_a
def calc_heat_capacity_pot( self, T_a, eos_d, bcoef_a=None ):
mexp, = Control.get_params( ['mexp'], eos_d )
bcoef_a = self.get_param_override( 'bcoef', bcoef_a, eos_d )
dtherm_dev_a = self.calc_therm_dev_deriv( T_a, eos_d )
heat_capacity_pot_a = bcoef_a*dtherm_dev_a
return heat_capacity_pot_a
def calc_heat_capacity_kin( self, T_a, eos_d ):
lognfac, = Control.get_params( ['lognfac'], eos_d )
kB, = Control.get_consts( ['kboltz'], eos_d )
nfac = np.exp(lognfac)
heat_capacity_kin_a = + 3.0/2*nfac*kB
return heat_capacity_kin_a
def calc_entropy_pot( self, T_a, eos_d, Tref=None, bcoef_a=None ):
mexp, = Control.get_params( ['mexp'], eos_d )
Tref = self.get_param_override( 'T0', Tref, eos_d )
Cv_pot = self.calc_heat_capacity_pot( T_a, eos_d, bcoef_a=bcoef_a )
Cv_ref_pot = self.calc_heat_capacity_pot( Tref, eos_d, bcoef_a=bcoef_a )
dSpot_a = (Cv_pot-Cv_ref_pot)/(mexp-1.0)
return dSpot_a
def calc_entropy_kin( self, T_a, eos_d, Tref=None ):
Tref = self.get_param_override( 'T0', Tref, eos_d )
Cv_kin = self.calc_heat_capacity_kin( T_a, eos_d )
dSkin_a = Cv_kin*np.log( T_a/Tref )
return dSkin_a
def calc_entropy_heat( self, T_a, eos_d, Tref=None, bcoef_a=None ):
"""Calculate Entropy change upon heating at constant volume."""
mexp, = Control.get_params( ['mexp'], eos_d )
Tref = self.get_param_override( 'T0', Tref, eos_d )
delS_pot = self.calc_entropy_pot( T_a, eos_d, Tref=Tref,
bcoef_a=bcoef_a )
delS_kin = self.calc_entropy_kin( T_a, eos_d, Tref=Tref )
delS_heat_a = delS_pot + delS_kin
return delS_heat_a
def calc_entropy( self, T_a, eos_d, Tref=None, Sref=None, bcoef_a=None ):
"""Calculate Full Entropy for isochore."""
Sref = self.get_param_override( 'S0', Sref, eos_d )
delS_heat_a = self.calc_entropy_heat( T_a, eos_d, Tref=Tref,
bcoef_a=bcoef_a )
entropy_a = Sref + delS_heat_a
return entropy_a
#====================================================================
class RosenfeldTaranzonaCompress(ThermalMod):
"""
Volume-dependent Rosenfeld-Taranzona Equation of State
- must impliment particular volume-dependence
"""
__metaclass__ = ABCMeta
#========================
# Override Method
#========================
@abstractmethod
def calc_entropy_compress( self, V_a, eos_d ):
"""
If compress path is
"""
return 0.0
#========================
# Initialization
#========================
def __init__( self, coef_kind='logpoly', temp_path_kind='T0', acoef_fun=None ):
self.set_empirical_coef( coef_kind, acoef_fun=acoef_fun )
self.set_temp_path( temp_path_kind )
pass
def set_empirical_coef( self, coef_kind, acoef_fun=None ):
coef_kind_typ = ['logpoly','poly','polynorm']
assert coef_kind in coef_kind_typ, 'coef_kind is not a valid type. '\
'Available types = '+str(coef_kind_typ)+'.'
self.coef_kind= coef_kind
calc_coef = getattr(self, 'calc_coef_'+coef_kind)
self.calc_bcoef = lambda V_a, eos_d, deriv=0: calc_coef( V_a, 'bcoef',
eos_d, deriv=deriv )
if acoef_fun is None:
self.calc_acoef = lambda V_a, eos_d: calc_coef( V_a, 'acoef', eos_d )
else:
self.calc_acoef = acoef_fun
pass
def set_temp_path( self, temp_path_kind ):
temp_path_kind_typ = ['S0','T0','abszero']
self.temp_path_kind = temp_path_kind
assert temp_path_kind in temp_path_kind_typ, 'temp_path_kind is not a valid type. '\
'Available types = '+str(temp_path_kind_typ)+'.'
self.temp_path_kind= temp_path_kind
self.calc_temp_path = getattr(self, 'calc_temp_path_'+temp_path_kind)
pass
#========================
def get_param_scale_sub( self, eos_d):
"""Return scale values for each parameter"""
bcoef_a = Control.get_array_params( 'bcoef', eos_d )
coef_param_key = ['bcoef_'+str(i) for i in range(bcoef_a.size)]
coef_param_scale = np.ones(bcoef_a.shape)
try:
acoef_a = Control.get_array_params( 'acoef', eos_d )
acoef_param_key = ['acoef_'+str(i) for i in range(acoef_a.size)]
acoef_param_scale = np.ones(acoef_a.shape)
coef_param_key = np.append(coef_param_key,acoef_param_key)
coef_param_scale = np.append(coef_param_scale,acoef_param_scale)
except:
# No bcoef
pass
paramkey_a = coef_param_key
scale_a = coef_param_scale
T0, = Control.get_params( ['T0'], eos_d )
T0_scl = T0
mexp_scl = 3./5
lognfac_scl = 0.01
paramkey_a = np.append(paramkey_a,['T0','mexp','lognfac'])
scale_a = np.append(scale_a,[T0_scl,mexp_scl,lognfac_scl])
return scale_a, paramkey_a
def calc_therm_dev( self, V_a, T_a, eos_d ):
"""
Extend thermal deviation concept to take difference from reference path,
rather than reference point
"""
T0, = Control.get_params( ['T0'], eos_d )
T_ref_a = self.calc_temp_path( V_a, eos_d )
therm_dev_f = GenRosenfeldTaranzona().calc_therm_dev
therm_dev_path_a = therm_dev_f( T_a, eos_d ) - therm_dev_f( T_ref_a, eos_d )
return therm_dev_path_a
def calc_energy_pot_diff( self, V_a, T_a, eos_d ):
# T0, = Control.get_params( ['T0'], eos_d )
# gamma_mod = eos_d['modtype_d']['GammaMod']
# T_ref_a = gamma_mod.temp( V_a, T0, eos_d )
# del_energy_pot_a = self.calc_energy_pot( V_a, T_a, eos_d ) \
# - self.calc_energy_pot( V_a, T_ref_a, eos_d )
therm_dev_a = self.calc_therm_dev( V_a, T_a, eos_d )
bcoef_a = self.calc_bcoef( V_a, eos_d )
del_energy_pot_a = bcoef_a*therm_dev_a
return del_energy_pot_a
def calc_energy_kin_diff( self, V_a, T_a, eos_d ):
T0, = Control.get_params( ['T0'], eos_d )
T_ref_a = self.calc_temp_path( V_a, eos_d )
del_energy_kin_a = GenRosenfeldTaranzona().calc_energy_kin( T_a, eos_d ) \
- GenRosenfeldTaranzona().calc_energy_kin( T_ref_a, eos_d )
return del_energy_kin_a
def calc_energy( self, V_a, T_a, eos_d ):
# acoef_a = self.calc_acoef( V_a, eos_d )
dE_pot = self.calc_energy_pot_diff( V_a, T_a, eos_d )
dE_kin = self.calc_energy_kin_diff( V_a, T_a, eos_d )
# E_tot = acoef_a + dE_pot + dE_kin
dE_tot = dE_pot + dE_kin
return dE_tot
def calc_free_energy( self, V_a, T_a, eos_d ):
E_tot = self.calc_energy( V_a, T_a, eos_d )
S_tot = self.calc_entropy( V_a, T_a, eos_d )
F_tot = E_tot - T_a*S_tot
return F_tot
def calc_press( self, V_a, T_a, eos_d ):
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
V0, = Control.get_params( ['V0'], eos_d )
# Use numerical deriv
dV = V0*1e-5
F_a = self.calc_free_energy( V_a, T_a, eos_d )
F_hi_a = self.calc_free_energy( V_a+dV, T_a, eos_d )
press_therm_a = -PV_ratio*(F_hi_a-F_a)/dV
return press_therm_a
def calc_RT_coef_deriv( self, V_a, eos_d ):
V0, = Control.get_params( ['V0'], eos_d )
# Use numerical deriv
dV = V0*1e-5
acoef_a = self.calc_acoef( V_a, eos_d )
bcoef_a = self.calc_bcoef( V_a, eos_d )
acoef_hi_a = self.calc_acoef( V_a+dV, eos_d )
bcoef_hi_a = self.calc_bcoef( V_a+dV, eos_d )
acoef_deriv_a = (acoef_hi_a-acoef_a)/dV
bcoef_deriv_a = (bcoef_hi_a-bcoef_a)/dV
return acoef_deriv_a, bcoef_deriv_a
def calc_heat_capacity( self, V_a, T_a, eos_d ):
"""Calculate Heat Capacity usin."""
bcoef_a = self.calc_bcoef( V_a, eos_d )
heat_capacity_a = GenRosenfeldTaranzona().calc_heat_capacity\
( T_a, eos_d, bcoef_a=bcoef_a )
return heat_capacity_a
def calc_entropy( self, V_a, T_a, eos_d ):
"""
Entropy depends on whether reference Compress Path is isotherm or adiabat
"""
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
V0, S0 = Control.get_params( ['V0','S0'], eos_d )
T0, = Control.get_params( ['T0'], eos_d )
bcoef_a = self.calc_bcoef( V_a, eos_d )
gamma_mod = eos_d['modtype_d']['GammaMod']
Tref_a = gamma_mod.temp( V_a, T0, eos_d )
dS_heat_a = GenRosenfeldTaranzona().calc_entropy_heat( T_a, eos_d,
Tref=Tref_a,
bcoef_a=bcoef_a )
dS_compress_a = self.calc_entropy_compress( V_a, eos_d )
S_a = S0 + dS_heat_a + dS_compress_a
return S_a
def calc_gamma( self, V_a, T_a, eos_d ):
gamma_mod = eos_d['modtype_d']['GammaMod']
T0, = Control.get_params( ['T0'], eos_d )
gamma_0S_a = gamma_mod.gamma(V_a,eos_d)
T_0S_a = gamma_mod.temp(V_a,T0,eos_d)
bcoef_a = self.calc_bcoef(V_a,eos_d)
bcoef_der1_a = self.calc_bcoef(V_a,eos_d,deriv=1)
CV_a = self.calc_heat_capacity( V_a, T_a, eos_d )
CV_0S_a = self.calc_heat_capacity( V_a, T_0S_a, eos_d )
dS_pot_a = GenRosenfeldTaranzona().calc_entropy_pot( T_a, eos_d,
Tref=T_0S_a,
bcoef_a=bcoef_a )
gamma_a = gamma_0S_a*(CV_0S_a/CV_a) \
+ V_a*(bcoef_der1_a/bcoef_a)*(dS_pot_a/CV_a)
return gamma_a
#========================
# Empirical Coefficient Model
#========================
def calc_coef_poly( self, V_a, coef_key, eos_d, deriv=0 ):
poly_coef_a = Control.get_array_params( coef_key, eos_d )
# coef_a = np.polyval(poly_coef_a[::-1], V_a)
if deriv==0:
coef_a = np.polyval(poly_coef_a, V_a)
else:
dpoly_coef_a = np.polyder(poly_coef_a,deriv)
coef_a = np.polyval(dpoly_coef_a, V_a)
return coef_a
def calc_coef_logpoly( self, V_a, coef_key, eos_d, deriv=0 ):
V0, = Control.get_params( ['V0'], eos_d )
logpoly_coef_a = Control.get_array_params( coef_key, eos_d )
# coef_a = np.polyval(logpoly_coef_a[::-1], np.log(V_a/V0))
if deriv==0:
coef_a = np.polyval(logpoly_coef_a, np.log(V_a/V0))
else:
dlogpoly_coef_a = np.polyder(logpoly_coef_a,deriv)
coef_a = V_a**(-deriv)*np.polyval(dlogpoly_coef_a, np.log(V_a/V0))
return coef_a
def calc_coef_polynorm( self, V_a, coef_key, eos_d, deriv=0 ):
V0, = Control.get_params( ['V0'], eos_d )
polynorm_coef_a = Control.get_array_params( coef_key, eos_d )
# coef_a = np.polyval(polynorm_coef_a[::-1], V_a/V0-1.0 )
if deriv==0:
coef_a = np.polyval(polynorm_coef_a, V_a/V0-1.0 )
else:
dpolynorm_coef_a = np.polyder(polynorm_coef_a,deriv)
coef_a = V0**(-deriv)*np.polyval(dpolynorm_coef_a, V_a)
return coef_a
#========================
# Ref Temp path
#========================
def calc_temp_path_T0( self, V_a, eos_d ):
T0, = Control.get_params( ['T0'], eos_d )
return T0
def calc_temp_path_S0( self, V_a, eos_d ):
T0, = Control.get_params( ['T0'], eos_d )
gamma_mod, = Control.get_modtypes( ['GammaMod'], eos_d )
Tref_a = gamma_mod.temp( V_a, T0, eos_d )
return Tref_a
def calc_temp_path_abszero( self, V_a, eos_d ):
Tref = 0.0
return Tref
#====================================================================
class RosenfeldTaranzonaAdiabat(RosenfeldTaranzonaCompress):
def __init__( self, coef_kind='logpoly' ):
temp_path_kind = 'S0'
acoef_fun= self.calc_energy_adiabat_ref
self.set_temp_path( temp_path_kind )
self.set_empirical_coef( coef_kind, acoef_fun=acoef_fun )
pass
def calc_entropy_compress( self, V_a, eos_d ):
dS_a = np.zeros(V_a.shape)
return dS_a
def calc_entropy_isotherm( self, V_a, eos_d ):
kB, = Control.get_consts( ['kboltz'], eos_d )
T0, mexp = Control.get_params( ['T0','mexp'], eos_d )
T0S_a = self.calc_temp_path( V_a, eos_d )
bV_a = self.calc_bcoef( V_a, eos_d )
dS_T0_a = -1.0*mexp/(mexp-1)*bV_a/T0*((T0S_a/T0)**(mexp-1.0)-1.0)\
-3./2*kB*np.log(T0S_a/T0)
return dS_T0_a
def calc_energy_adiabat_ref( self, V_a, eos_d ):
kB, = Control.get_consts( ['kboltz'], eos_d )
T0, mexp = Control.get_params( ['T0','mexp'], eos_d )
compress_path_mod = eos_d['modtype_d']['CompressPathMod']
energy_isotherm_a = compress_path_mod.energy(V_a,eos_d)
if compress_path_mod.path_const == 'S':
# compress path mod directly describes adiabat
# internal energy is given directly by integral of compress path
E0S_a = energy_isotherm_a
else:
# compress path mod describes isotherm
# free energy is given by integral of compress path
# adjustments are needed to obtain internal energy
free_energy_isotherm_a = energy_isotherm_a
T0S_a = self.calc_temp_path( V_a, eos_d )
bV_a = self.calc_bcoef( V_a, eos_d )
dS_T0_a = self.calc_entropy_isotherm( V_a, eos_d )
energy_isotherm_a = free_energy_isotherm_a + T0*dS_T0_a
E0S_a = energy_isotherm_a +bV_a*((T0S_a/T0)**mexp-1)\
+3./2*kB*(T0S_a-T0)
return E0S_a
#====================================================================
class RosenfeldTaranzonaIsotherm(RosenfeldTaranzonaCompress):
def __init__( self, coef_kind='logpoly' ):
temp_path_kind = 'T0'
acoef_fun = None
self.set_temp_path( temp_path_kind )
self.set_empirical_coef( coef_kind, acoef_fun=acoef_fun )
pass
def calc_entropy_compress( self, V_a, eos_d ):
dS_T0_a = self.calc_entropy_isotherm( V_a, eos_d )
return dS_T0_a
def calc_entropy_isotherm( self, V_a, eos_d ):
kB, = Control.get_consts( ['kboltz'], eos_d )
T0, mexp = Control.get_params( ['T0','mexp'], eos_d )
compress_path_mod = eos_d['modtype_d']['CompressPathMod']
free_energy_isotherm_a = compress_path_mod.energy(V_a,eos_d)
T0S_a = self.calc_temp_path( V_a, eos_d )
bV_a = self.calc_bcoef( V_a, eos_d )
dS_a = -mexp/(mexp-1)*bV_a/T0*((T0S_a/T0)**(mexp-1)-1)\
-3./2*kB*np.log(T0S_a/T0)
return dS_a
#====================================================================
class MieGrun(ThermalMod):
"""
Mie-Gruneisen Equation of State Model
(requires extension to define thermal energy model)
"""
__metaclass__ = ABCMeta
def press( self, V_a, T_a, eos_d ):
V_a, T_a = fill_array( V_a, T_a )
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
gamma_mod, = Control.get_modtypes( ['GammaMod'], eos_d )
# Needed functions
energy_therm_a = self.calc_energy( V_a, T_a, eos_d )
gamma_a = gamma_mod.gamma( V_a, eos_d )
press_therm_a = PV_ratio*(gamma_a/V_a)*energy_therm_a
return press_therm_a
@abstractmethod
def calc_energy( self, V_a, T_a, eos_d ):
"""Returns Thermal Component of Energy."""
#====================================================================
class MieGrunDebye(MieGrun):
def __init__( self ):
super(MieGrunDebye, self).__init__()
path_const='V'
self.path_const = path_const
def calc_energy( self, V_a, T_a, eos_d ):
'''
Thermal Energy for Debye model
Relies on reference profile properties stored in eos_d defined by:
* debye_temp_f( V_a, T_a )
* ref_temp_f( V_a, T_a )
'''
V_a, T_a = fill_array( V_a, T_a )
# NOTE: T0 refers to temp on ref adiabat evaluated at V0
Cvmax, T0, thetaR = Control.get_params( ['Cvmax','T0','thetaR'], eos_d )
TS_ratio, = Control.get_consts( ['TS_ratio'], eos_d )
gamma_mod, = Control.get_modtypes( ['GammaMod'], eos_d )
theta_a = gamma_mod.temp( V_a, thetaR, eos_d )
# Tref_a = gamma_mod.temp( V_a, T0, eos_d )
Tref_a = self.calc_temp_path(V_a,eos_d)
# print theta_a
######################
# NOTE: Some weird issue with TS_ratio!!!
######################
# energy_therm_a = (Cvmax/TS_ratio)*(
# + T_a*self.debye_func( theta_a/T_a )
# - Tref_a*self.debye_func( theta_a/Tref_a ) )
energy_therm_a = (Cvmax)*(
+ T_a*self.debye_func( theta_a/T_a )
- Tref_a*self.debye_func( theta_a/Tref_a ) )
return energy_therm_a
def calc_entropy( self, V_a, T_a, eos_d ):
V_a, T_a = fill_array( V_a, T_a )
Cvmax, thetaR = Control.get_params( ['Cvmax','thetaR'], eos_d )
TS_ratio, = Control.get_consts( ['TS_ratio'], eos_d )
gamma_mod, = Control.get_modtypes( ['GammaMod'], eos_d )
theta_a = gamma_mod.temp( V_a, thetaR, eos_d )
x_a = theta_a/T_a
# entropy_a = Cvmax*Cv_const/3. \
# *(4*debye_func( x_a )-3*np.log( 1-np.exp( -x_a ) ) )
# TS_ratio????
# entropy_a = 1.0/3*(Cvmax/TS_ratio) \
# *(4*self.debye_func( x_a )-3*np.log( np.exp( x_a ) - 1 ) + 3*x_a)
entropy_a = 1.0/3*(Cvmax) \
*(4*self.debye_func( x_a )-3*np.log( np.exp( x_a ) - 1 ) + 3*x_a)
return entropy_a
def calc_heat_capacity( self, V_a, T_a, eos_d ):
V_a, T_a = fill_array( V_a, T_a )
Cvmax, thetaR = Control.get_params( ['Cvmax','thetaR'], eos_d )
TS_ratio, = Control.get_consts( ['TS_ratio'], eos_d )
gamma_mod, = Control.get_modtypes( ['GammaMod'], eos_d )
theta_a = gamma_mod.temp( V_a, thetaR, eos_d )
# The reference adiabat terms in the internal energy are temperature
# independent, and thus play no role in heat capacity
x_a = theta_a/T_a
# heat_capacity_a = (Cvmax/TS_ratio)*\
# (4*self.debye_func( x_a )-3*x_a/(np.exp(x_a)-1))
######################
# NOTE: Some weird issue with TS_ratio!!!
######################
heat_capacity_a = (Cvmax)*\
(4*self.debye_func( x_a )-3*x_a/(np.exp(x_a)-1))
return heat_capacity_a
def debye_func( self, x_a ):
"""
Return debye integral value
- calculation done using interpolation in a lookup table
- interpolation done in log-space where behavior is close to linear
- linear extrapolation is implemented manually
"""
if np.isscalar( x_a ):
assert x_a >= 0, 'x_a values must be greater than zero.'
else:
assert all( x_a >= 0 ), 'x_a values must be greater than zero.'
# Lookup table
# interpolate in log space where behavior is nearly linear
debyex_a = np.array( [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.1, 1.2, 1.3, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8,
3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5.0,
5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0] )
debyelogf_a = np.array( [ 0.0, -0.03770187, -0.07580279, -0.11429475,
-0.15316866, -0.19241674, -0.2320279 , -0.27199378,
-0.31230405, -0.35294619, -0.39390815, -0.43518026,
-0.47674953, -0.51860413, -0.56072866, -0.64573892,
-0.73167389, -0.81841793, -0.90586032, -0.99388207,
-1.08236598, -1.17119911, -1.26026101, -1.34944183,
-1.43863241, -1.52771969, -1.61660856, -1.70519469,
-1.79338479, -1.88108917, -1.96822938, -2.05471771,
-2.14049175, -2.35134476, -2.55643273, -2.75507892,
-2.94682783, -3.13143746, -3.30880053, -3.47894273,
-3.64199587, -3.79820337, -3.94785746] )
# Create interpolation function
logdeb_func = interpolate.interp1d( debyex_a, debyelogf_a,
kind='cubic',
bounds_error=False,
fill_value=np.nan )
logfval_a = logdeb_func( x_a )
# Check for extrapolated values indicated by NaN
# - replace with linear extrapolation
logfextrap_a = debyelogf_a[-1] + (x_a - debyex_a[-1]) \
*(debyelogf_a[-1]-debyelogf_a[-2])\
/(debyex_a[-1]-debyex_a[-2])
logfval_a = np.where( x_a > debyex_a[-1], logfextrap_a,
logfval_a )
# exponentiate to get integral value
return np.exp( logfval_a )
def calc_temp_path( self, V_a, eos_d ):
T0, = Control.get_params( ['T0'], eos_d )
gamma_mod, = Control.get_modtypes( ['GammaMod'], eos_d )
Tref_a = gamma_mod.temp( V_a, T0, eos_d )
return Tref_a
def calc_free_energy( self, V_a, T_a, eos_d ):
E_tot = self.calc_energy( V_a, T_a, eos_d )
S_tot = self.calc_entropy( V_a, T_a, eos_d )
F_tot = E_tot - T_a*S_tot
return F_tot
def calc_gamma( self, V_a, T_a, eos_d ):
gamma_mod = eos_d['modtype_d']['GammaMod']
T0, = Control.get_params( ['T0'], eos_d )
gamma_0S_a = gamma_mod.gamma(V_a,eos_d)
return gamma_0S_a
#====================================================================
class GammaPowLaw(GammaMod):
def __init__( self, V0ref=True, use_gammap=False ):
self.V0ref = V0ref
self.use_gammap = use_gammap
pass
def get_paramkey( self, eos_d ):
if self.use_gammap:
gammaderiv_typ = 'gammap'
else:
gammaderiv_typ = 'q'
if self.V0ref:
VRkey = 'V0'
gammaRkey = 'gamma0'
gammaderivkey = gammaderiv_typ+'0'
else:
VRkey = 'VR'
gammaRkey = 'gammaR'
gammaderivkey = gammaderiv_typ+'R'
paramkey_a = [gammaRkey, gammaderivkey, VRkey]
return paramkey_a
def get_model_params( self, eos_d, ):
paramkey_a = self.get_paramkey( eos_d )
gammaR, gammaderiv, VR = Control.get_params(paramkey_a, eos_d)
if self.use_gammap:
qR = gammaderiv/gammaR
else:
qR = gammaderiv
return ( gammaR, qR, VR )
def get_param_scale_sub( self, eos_d ):
"""Return scale values for each parameter"""
paramkey_a = self.get_paramkey( eos_d )
gammaR, gammaderiv, VR = Control.get_params(paramkey_a, eos_d)
gammaR_scl = 1.0
VR_scl = VR
# scale value for gammaderiv is equal to 1 for both gammap and q
gammaderiv_scl = 1.0
scale_a = np.array([gammaR_scl,gammaderiv_scl,VR_scl])
return scale_a, paramkey_a
def gamma( self, V_a, eos_d ):
# OLD version fixed to zero-press ref volume
# V0, gamma0, qR = Control.get_params( ['V0','gamma0','qR'], eos_d )
# gamma_a = gamma0 *(V_a/V0)**qR
# generalized version
gammaR, qR, VR = self.get_model_params( eos_d )
gamma_a = gammaR *(V_a/VR)**qR
return gamma_a
def temp( self, V_a, TR, eos_d ):
"""
Return temperature for debye model
V_a: sample volume array
TR: temperature at V=VR
"""
# generalized version
gammaR, qR, VR = self.get_model_params( eos_d )
gamma_a = self.gamma( V_a, eos_d )
T_a = TR*np.exp( -(gamma_a - gammaR)/qR )
return T_a
#====================================================================
class GammaFiniteStrain(GammaMod):
def get_paramkey( self, eos_d ):
if self.V0ref:
VRkey = 'V0'
gammaRkey = 'gamma0'
gammapRkey = 'gammap0'
else:
VRkey = 'VR'
gammaRkey = 'gammaR'
gammapRkey = 'gammapR'
paramkey_a = [gammaRkey, gammapRkey, VRkey]
return paramkey_a
def calc_strain_coefs( self, eos_d ):
paramkey_a = self.get_paramkey( eos_d )
gammaR, gammapR, VR = Control.get_params(paramkey_a, eos_d)
a1 = 6*gammaR
a2 = -12*gammaR +36*gammaR**2 -18*gammapR
return a1, a2
def get_param_scale_sub( self, eos_d ):
"""Return scale values for each parameter"""
paramkey_a = self.get_paramkey( eos_d )
gammaR, gammapR, VR = Control.get_params( paramkey_a, eos_d )
gammaR_scl = 1.0
gammapR_scl = 1.0
VR_scl = VR
scale_a = np.array([gammaR_scl,gammapR_scl,VR_scl])
return scale_a, paramkey_a
def calc_fstrain( self, V_a, eos_d ):
paramkey_a = self.get_paramkey( eos_d )
gammaR, gammapR, VR = Control.get_params(paramkey_a, eos_d)
fstr = 0.5*((VR/V_a)**(2./3)-1.0)
# print (V_a)
# if np.any(np.isnan(V_a)):
return fstr
def gamma( self, V_a, eos_d ):
a1, a2 = self.calc_strain_coefs( eos_d )
fstr_a = self.calc_fstrain( V_a, eos_d )
gamma_a = (2*fstr_a+1)*(a1+a2*fstr_a)/(6*(1+a1*fstr_a+0.5*a2*fstr_a**2))
return gamma_a
def temp( self, V_a, TR, eos_d ):
a1, a2 = self.calc_strain_coefs( eos_d )
fstr_a = self.calc_fstrain( V_a, eos_d )
T_a = TR*np.sqrt(1 + a1*fstr_a + 0.5*a2*fstr_a**2)
return T_a
#====================================================================
class ThermalPressMod(FullMod):
# Need to impliment get_param_scale_sub
def press( self, V_a, T_a, eos_d ):
"""Returns Press variation along compression curve."""
V_a, T_a = fill_array( V_a, T_a )
# compress_path_mod, thermal_mod = Control.get_modtypes( ['CompressPathMod', 'ThermalMod'],
# eos_d )
# press_a = np.squeeze( compress_path_mod.press( V_a, eos_d )
# + thermal_mod.press( V_a, T_a, eos_d ) )
# return press_a
TOL = 1e-4
PV_ratio, = Control.get_consts( ['PV_ratio'], eos_d )
F_mod_a = self.free_energy(V_a,T_a,eos_d)
F_hi_mod_a = self.free_energy(V_a*(1.0+TOL),T_a,eos_d)
P_mod_a = -PV_ratio*(F_hi_mod_a-F_mod_a)/(V_a*TOL)
return P_mod_a
def energy( self, V_a, T_a, eos_d ):
"""Returns Internal Energy."""
V_a, T_a = fill_array( V_a, T_a )
compress_path_mod, thermal_mod = Control.get_modtypes( ['CompressPathMod', 'ThermalMod'],
eos_d )
energy_compress_a = compress_path_mod.energy( V_a, eos_d )
if compress_path_mod.path_const=='T':
"""
Convert free energy to internal energy
"""
free_energy_compress_a = energy_compress_a
T0, = Control.get_params(['T0'],eos_d)
# wrong
# S_a = thermal_mod.entropy( V_a, T_a, eos_d )
# dF_a = thermal_mod.calc_free_energy( V_a, T_a, eos_d )
# Ftot_a = free_energy_compress_a + dF_a
# energy_a = Ftot_a+T_a*S_a
# correct
# energy_a = thermal_mod.calc_energy_adiabat_ref(V_a,eos_d)\
# +thermal_mod.calc_energy(V_a,T_a,eos_d)
S_T0_a = thermal_mod.entropy( V_a, T0, eos_d )
energy_T0_a = free_energy_compress_a + T0*S_T0_a
energy_S0_a = energy_T0_a - thermal_mod.calc_energy(V_a,T0,eos_d)
energy_a = energy_S0_a + thermal_mod.calc_energy(V_a,T_a,eos_d)
else:
energy_a = np.squeeze( energy_compress_a
+ thermal_mod.energy( V_a, T_a, eos_d ) )
return energy_a
def bulk_modulus( self, V_a, T_a, eos_d ):
TOL = 1e-4
P_lo_a = self.press( V_a*(1.0-TOL/2), T_a, eos_d )
P_hi_a = self.press( V_a*(1.0+TOL/2), T_a, eos_d )
K_a = -V_a*(P_hi_a-P_lo_a)/(V_a*TOL)
return K_a
def dPdT( self, V_a, T_a, eos_d, Tscl=1000.0 ):
TOL = 1e-4
P_lo_a = self.press( V_a, T_a*(1.0-TOL/2), eos_d )
P_hi_a = self.press( V_a, T_a*(1.0+TOL/2), eos_d )
dPdT_a = (P_hi_a-P_lo_a)/(T_a*TOL)*Tscl
# # By a maxwell relation dPdT_V = dSdV_T
# S_lo_a = self.entropy( V_a*(1.0-TOL/2), T_a, eos_d )
# S_hi_a = self.entropy( V_a*(1.0+TOL/2), T_a, eos_d )
# dSdV_a = (S_hi_a-S_lo_a)/(V_a*TOL)
# dPdT_a = dSdV_a*Tscl
return dPdT_a
def free_energy( self, V_a, T_a, eos_d ):
"""Returns Free Energy."""
V_a, T_a = fill_array( V_a, T_a )
compress_path_mod, thermal_mod = Control.get_modtypes( ['CompressPathMod', 'ThermalMod'],
eos_d )
T0,S0 = Control.get_params(['T0','S0'],eos_d)
energy_compress_a = compress_path_mod.energy( V_a, eos_d )
if compress_path_mod.path_const=='T':
free_energy_compress_a = energy_compress_a
S_T0_a = thermal_mod.entropy( V_a, T0, eos_d )
# wrong
# S_a = thermal_mod.entropy( V_a, T_a, eos_d )
# dF_a = thermal_mod.calc_free_energy( V_a, T_a, eos_d )
# Ftot_a = free_energy_compress_a + dF_a
# energy_a = Ftot_a+T_a*S_a
# correct
# energy_a = thermal_mod.calc_energy_adiabat_ref(V_a,eos_d)\
# +thermal_mod.calc_energy(V_a,T_a,eos_d)
energy_T0_a = free_energy_compress_a + T0*S_T0_a
energy_S0_a = energy_T0_a - thermal_mod.calc_energy(V_a,T0,eos_d)
else:
energy_S0_a = energy_compress_a
Tref_a = thermal_mod.calc_temp_path(V_a,eos_d)
free_energy_S0_a = energy_S0_a - Tref_a*S0
# Fix bug for nonzero ref entropy values, need to subtract off reference
dF_a = thermal_mod.calc_free_energy( V_a, T_a, eos_d ) \
- thermal_mod.calc_free_energy( V_a, Tref_a, eos_d )
free_energy_a = free_energy_S0_a + dF_a
return free_energy_a
def entropy( self, V_a, T_a, eos_d ):
"""Returns Free Energy."""
V_a, T_a = fill_array( V_a, T_a )
thermal_mod, = Control.get_modtypes( ['ThermalMod'], eos_d )
S_a = np.squeeze( thermal_mod.entropy( V_a, T_a, eos_d ) )
return S_a
def heat_capacity( self, V_a, T_a, eos_d ):
"""Returns Free Energy."""
V_a, T_a = fill_array( V_a, T_a )
thermal_mod, = Control.get_modtypes( ['ThermalMod'], eos_d )
Cv_a = np.squeeze( thermal_mod.heat_capacity( V_a, T_a, eos_d ) )
return Cv_a
#====================================================================
class MieGrunPtherm(FullMod):
# Need to impliment get_param_scale_sub
def dPdT( self, V_a, T_a, eos_d ):
V_a, T_a = fill_array( V_a, T_a )
poly_blogcoef_a = Control.get_array_params( 'blogcoef', eos_d )
dPdT_a = eos_d['Ptherm_f'] (V_a)
return dPdT_a
def press( self, V_a, T_a, eos_d ):
V_a, T_a = fill_array( V_a, T_a )
Tref = eos_d['Tref']
Pref_a = eos_d['Pref_f'] (V_a)
dPdT_a = self.dPdT( V_a, T_a, eos_d )
dPtherm_a = (T_a-Tref)*dPdT_a
P_a = Pref_a + dPtherm_a
return P_a
def energy( self, V_a, T_a, eos_d ):
V_a, T_a = fill_array( V_a, T_a )
Tref = eos_d['Tref']
Eref_a = eos_d['Eref_f'] (V_a)
dPtherm_a = (T_a-Tref)*eos_d['Ptherm_f'] (V_a)
gamma_a = eos_d['gamma_f'] (V_a)
dPdT_a = self.dPdT( V_a, T_a, eos_d )
dPtherm_a = (T_a-Tref)*dPdT_a
dEtherm_a = dPtherm_a/(gamma_a/V_a)/eos_d['const_d']['PV_ratio']
E_a = Eref_a + dEtherm_a
return E_a
def bulk_modulus( self, V_a, T_a, eos_d ):
TOL = 1e-4
P_lo_a = self.press( V_a*(1.0-TOL/2), T_a, eos_d )
P_hi_a = self.press( V_a*(1.0+TOL/2), T_a, eos_d )
K_a = -V_a*(P_hi_a-P_lo_a)/(V_a*TOL)
return K_a
def dPdT( self, V_a, T_a, eos_d, Tscl=1000.0 ):
TOL = 1e-4
P_lo_a = self.press( V_a, T_a*(1.0-TOL/2), eos_d )
P_hi_a = self.press( V_a, T_a*(1.0+TOL/2), eos_d )
dPdT_a = (P_hi_a-P_lo_a)/(T_a*TOL)*Tscl
# # By a maxwell relation dPdT_V = dSdV_T
# S_lo_a = self.entropy( V_a*(1.0-TOL/2), T_a, eos_d )
# S_hi_a = self.entropy( V_a*(1.0+TOL/2), T_a, eos_d )
# dSdV_a = (S_hi_a-S_lo_a)/(V_a*TOL)
# dPdT_a = dSdV_a*Tscl
return dPdT_a
def free_energy( self, V_a, T_a, eos_d ):
"""Returns Free Energy."""
V_a, T_a = fill_array( V_a, T_a )
compress_path_mod, thermal_mod = Control.get_modtypes( ['CompressPathMod', 'ThermalMod'],
eos_d )
T0,S0 = Control.get_params(['T0','S0'],eos_d)
energy_compress_a = compress_path_mod.energy( V_a, eos_d )
if compress_path_mod.path_const=='T':
free_energy_compress_a = energy_compress_a
S_T0_a = thermal_mod.entropy( V_a, T0, eos_d )
# wrong
# S_a = thermal_mod.entropy( V_a, T_a, eos_d )
# dF_a = thermal_mod.calc_free_energy( V_a, T_a, eos_d )
# Ftot_a = free_energy_compress_a + dF_a
# energy_a = Ftot_a+T_a*S_a
# correct
# energy_a = thermal_mod.calc_energy_adiabat_ref(V_a,eos_d)\
# +thermal_mod.calc_energy(V_a,T_a,eos_d)
energy_T0_a = free_energy_compress_a + T0*S_T0_a
energy_S0_a = energy_T0_a - thermal_mod.calc_energy(V_a,T0,eos_d)
else:
energy_S0_a = energy_compress_a
Tref_a = thermal_mod.calc_temp_path(V_a,eos_d)
free_energy_S0_a = energy_S0_a - Tref_a*S0
dF_a = thermal_mod.calc_free_energy( V_a, T_a, eos_d )
free_energy_a = free_energy_S0_a + dF_a
return free_energy_a
def entropy( self, V_a, T_a, eos_d ):
"""Returns Free Energy."""
V_a, T_a = fill_array( V_a, T_a )
thermal_mod, = Control.get_modtypes( ['ThermalMod'], eos_d )
S_a = np.squeeze( thermal_mod.entropy( V_a, T_a, eos_d ) )
return S_a
def heat_capacity( self, V_a, T_a, eos_d ):
"""Returns Free Energy."""
V_a, T_a = fill_array( V_a, T_a )
thermal_mod, = Control.get_modtypes( ['ThermalMod'], eos_d )
Cv_a = np.squeeze( thermal_mod.heat_capacity( V_a, T_a, eos_d ) )
return Cv_a
#====================================================================
| 2.265625
| 2
|
scripts/venv/lib/python2.7/site-packages/cogent/parse/fastq.py
|
sauloal/cnidaria
| 3
|
12784342
|
<reponame>sauloal/cnidaria
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def MinimalFastqParser(data, strict=True):
"""yields name, seq, qual from fastq file
Arguments:
- strict: checks the quality and sequence labels are the same
"""
if type(data) == str:
data = open(data)
# fastq format is very simple, defined by blocks of 4 lines
line_num = -1
record = []
for line in data:
line_num += 1
if line_num == 4:
if strict: # make sure the seq and qual labels match
assert record[0][1:] == record[2][1:], \
'Invalid format: %s -- %s' % (record[0][1:], record[2][1:])
yield record[0][1:], record[1], record[3]
line_num = 0
record = []
record.append(line.strip())
if record:
if strict and record[0]: # make sure the seq and qual labels match
assert record[0][1:] == record[2][1:], 'Invalid format'
if record[0]: # could be just an empty line at eof
yield record[0][1:], record[1], record[3]
if type(data) == file:
data.close()
| 2.734375
| 3
|
lib/id3c/cli/utils.py
|
UWIT-IAM/uw-redcap-client
| 21
|
12784343
|
"""
CLI utilities.
"""
import click
def running_command_name() -> str:
"""
Returns the current CLI command name as a space-separated string, or
``id3c`` if not running under any command.
"""
appname = None
context = click.get_current_context(silent = True)
if context:
appname = context.command_path
if not appname:
appname = "id3c"
return appname
| 2.71875
| 3
|
components/mgmtworker/scripts/start.py
|
cloudify-cosmo/cloudify-manager-blueprints
| 35
|
12784344
|
#!/usr/bin/env python
from os.path import join, dirname
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py'))
import utils # NOQA
runtime_props = ctx.instance.runtime_properties
SERVICE_NAME = runtime_props['service_name']
HOME_DIR = runtime_props['home_dir']
@utils.retry(ValueError)
def check_worker_running():
"""Use `celery status` to check if the worker is running."""
work_dir = join(HOME_DIR, 'work')
celery_path = join(HOME_DIR, 'env', 'bin', 'celery')
result = utils.sudo([
'CELERY_WORK_DIR={0}'.format(work_dir),
celery_path,
'--config=cloudify.broker_config',
'status'
], ignore_failures=True)
if result.returncode != 0:
raise ValueError('celery status: worker not running')
ctx.logger.info('Starting Management Worker Service...')
utils.start_service(SERVICE_NAME)
utils.systemd.verify_alive(SERVICE_NAME)
try:
check_worker_running()
except ValueError:
ctx.abort_operation('Celery worker failed to start')
| 1.921875
| 2
|
backup/Version 0.7/EvaClientU.py
|
gabrieloandco/BB84py
| 0
|
12784345
|
<gh_stars>0
# Copyright (c) 2016 <NAME> (<<EMAIL>>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import socket
import pickle
from QuantumClasses import Bob
import threading
import time
import select
from Encrypt import Decrypt
import Queue
rLock = threading.Lock()
breaking = False
key = 0
def EvaTunnel(blocks,connecttoeva, connecttoalice,start_updating,stop_updating,stop_receivingeva,stop_receivingbob,breakevent):
breaking = False
host = '192.168.0.18'
port = 5001
s = socket.socket()
s.bind((host,port))
s.listen(2)
print "Tunneling Started."
connecttoeva.set()
ceva, addreva = s.accept()
print "Eva address is:" + str(addreva)
cbob, addrbob = s.accept()
connecttoalice.set()
print "Bob address is:" + str(addrbob)
receivedfromeva = threading.Event()
receivedfrombob = threading.Event()
qdatabob = Queue.Queue()
qdataeva = Queue.Queue()
RFET = threading.Thread(target=ReceiveFrom, args = (blocks,ceva,receivedfromeva,start_updating,stop_receivingeva,stop_updating,qdataeva,"Eva",breakevent))
RFBT = threading.Thread(target=ReceiveFrom, args = (blocks,cbob,receivedfrombob,start_updating,stop_receivingbob,stop_updating,qdatabob,"Bob",breakevent))
RFET.start()
RFBT.start()
while not breaking:
if receivedfromeva.is_set():
dataeva = qdataeva.get()
cbob.send(dataeva)
print "Sent to Bob" + dataeva
receivedfromeva.clear()
if receivedfrombob.is_set():
databob = qdatabob.get()
ceva.send(databob)
print "Sent to Eva" + databob
receivedfrombob.clear()
if breakevent.is_set():
breaking = True
RFET.join()
RFBT.join()
ceva.close()
cbob.close()
s.close()
def ReceiveFrom(blocks,socket,receivedfrom,start_updating,stop_receivingfrom,stop_updating,qdatafrom,person,breakevent,block=False):
breaking = False
while not breaking:
datafrom = socket.recv(324*(1+blocks)/2)
qdatafrom.put(datafrom)
print "Received from: " + person + datafrom
receivedfrom.set()
if not datafrom:
breaking = True
breakevent.set()
if datafrom == "R3c1V3!":
stop_receivingfrom.set()
stop_updating.clear()
start_updating.set()
if not stop_receivingfrom.is_set():
print DecryptMessage(datafrom,start_updating)
if block:
stop_updating.wait()
def DecryptMessage(data,start_updating):
global key
if not start_updating.is_set():
try:
print "key: " + str(key)
print "message: " + Decrypt(data,key)[0:20]
except:
print "Couldn't Decrypt"
else:
pass
def UpdateKey(blocks,socketalice,socketeva,stop_receivingbob,stop_receivingeva,stop_receivingfromtunnel,stop_receivingfromalice,stop_updating,start_updating,start_clock,breakingevent):
global key
breaking = False
def uk():
global key
try:
start_updating.wait()
stop_receivingfromalice.wait()
stop_receivingbob.wait()
stop_receivingeva.wait()
stop_receivingfromtunnel.wait()
stop_updating.clear()
rLock.acquire()
print "Updating"
ready = select.select([socketalice],[],[],3)
if ready[0]:
AliceMbits = socketalice.recv(324*(1+blocks)/2)
print "Received Alice's bases"
print "bases: " + AliceMbits
AliceM= pickle.loads(AliceMbits)
EvaM = Bob(blocks)
ResultM= AliceM.Measure(EvaM)
ResultMbits = pickle.dumps(ResultM)
socketeva.send(ResultMbits)
print "Sent State to Tunnel"
consbits = socketeva.recv(47*(1+blocks)/2)
print "Received Coincidences from Eva"
socketalice.send(consbits)
print "Sent Coincidences To Alice"
cons = pickle.loads(consbits)
newkey = EvaM.Key(ResultM,cons)
if newkey == []:
newkey = [0]
key= int("0b"+"".join(str(i) for i in newkey),2)
done = socketalice.recv(1024)
socketeva.send(done)
done = socketeva.recv(1024)
socketalice.send(done)
print done
print "Key Updated; new key: " + bin(key)
stop_receivingeva.clear()
stop_receivingbob.clear()
stop_updating.set()
start_clock.set()
start_updating.clear()
rLock.release()
return key
except:
print "Update Failed"
stop_receivingbob.clear()
stop_receivingeva.clear()
stop_updating.set()
start_clock.set()
start_updating.clear()
return 1
while not breaking:
uk()
if breakingevent.is_set():
breaking = True
socket.close()
if True:
blocks=int(raw_input('give me blocks: '))
delay = 20
connecttoeva = threading.Event()
connecttoalice = threading.Event()
start_updating = threading.Event()
stop_receivingeva = threading.Event()
stop_receivingbob = threading.Event()
stop_updating=threading.Event()
start_clock = threading.Event()
breakevent = threading.Event()
Tunnel = threading.Thread(target=EvaTunnel, args=(blocks,connecttoeva, connecttoalice,start_updating,stop_updating,stop_receivingeva,stop_receivingbob,breakevent))
Tunnel.start()
connecttoeva.wait()
hostEva='192.168.0.18'
portEva = 5001
seva = socket.socket()
seva.connect((hostEva,portEva))
connecttoalice.wait()
hostAlice='192.168.0.18'
portAlice = 5000
salice = socket.socket()
salice.connect((hostAlice,portAlice))
receivedfromtunnel = threading.Event()
receivedfromalice = threading.Event()
stop_receivingfromtunnel = threading.Event()
stop_receivingfromalice = threading.Event()
qdatatunnel = Queue.Queue()
qdataalice = Queue.Queue()
RFTT = threading.Thread(target=ReceiveFrom, args = (blocks,seva,receivedfromtunnel,start_updating,stop_receivingfromtunnel,stop_updating,qdatatunnel,"Tunnel",breakevent,True))
RFAT = threading.Thread(target=ReceiveFrom, args = (blocks,salice,receivedfromalice,start_updating,stop_receivingfromalice,stop_updating,qdataalice,"Alice",breakevent, True))
RFTT.start()
RFAT.start()
uT = threading.Thread(target=UpdateKey, args=(blocks,salice,seva,stop_receivingbob,stop_receivingeva,stop_receivingfromtunnel,stop_receivingfromalice,stop_updating,start_updating,start_clock,breakevent))
uT.start()
stop_updating.set()
start_clock.set()
while not breaking:
if receivedfromalice.is_set():
datatunnel = qdatatunnel.get()
seva.send(datatunnel)
print "Sent to Tunnel"
receivedfromalice.clear()
if receivedfromtunnel.is_set():
dataalice = qdataalice.get()
salice.send(dataalice)
print "Sent to Alice"
receivedfromtunnel.clear()
if breakevent.is_set():
breaking = True
uT.join()
RFTT.join()
RFAT.join()
salice.close()
seva.close()
| 2.046875
| 2
|
src/fhir_types/FHIR_ClaimResponse.py
|
anthem-ai/fhir-types
| 2
|
12784346
|
from typing import Any, List, Literal, TypedDict
from .FHIR_Attachment import FHIR_Attachment
from .FHIR_ClaimResponse_AddItem import FHIR_ClaimResponse_AddItem
from .FHIR_ClaimResponse_Adjudication import FHIR_ClaimResponse_Adjudication
from .FHIR_ClaimResponse_Error import FHIR_ClaimResponse_Error
from .FHIR_ClaimResponse_Insurance import FHIR_ClaimResponse_Insurance
from .FHIR_ClaimResponse_Item import FHIR_ClaimResponse_Item
from .FHIR_ClaimResponse_Payment import FHIR_ClaimResponse_Payment
from .FHIR_ClaimResponse_ProcessNote import FHIR_ClaimResponse_ProcessNote
from .FHIR_ClaimResponse_Total import FHIR_ClaimResponse_Total
from .FHIR_code import FHIR_code
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_dateTime import FHIR_dateTime
from .FHIR_Element import FHIR_Element
from .FHIR_id import FHIR_id
from .FHIR_Identifier import FHIR_Identifier
from .FHIR_Meta import FHIR_Meta
from .FHIR_Narrative import FHIR_Narrative
from .FHIR_Period import FHIR_Period
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
from .FHIR_uri import FHIR_uri
# This resource provides the adjudication details from the processing of a Claim resource.
FHIR_ClaimResponse = TypedDict(
"FHIR_ClaimResponse",
{
# This is a ClaimResponse resource
"resourceType": Literal["ClaimResponse"],
# The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes.
"id": FHIR_id,
# The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource.
"meta": FHIR_Meta,
# A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc.
"implicitRules": FHIR_uri,
# Extensions for implicitRules
"_implicitRules": FHIR_Element,
# The base language in which the resource is written.
"language": FHIR_code,
# Extensions for language
"_language": FHIR_Element,
# A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety.
"text": FHIR_Narrative,
# These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope.
"contained": List[Any],
# May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# A unique identifier assigned to this claim response.
"identifier": List[FHIR_Identifier],
# The status of the resource instance.
"status": FHIR_code,
# Extensions for status
"_status": FHIR_Element,
# A finer grained suite of claim type codes which may convey additional information such as Inpatient vs Outpatient and/or a specialty service.
"type": FHIR_CodeableConcept,
# A finer grained suite of claim type codes which may convey additional information such as Inpatient vs Outpatient and/or a specialty service.
"subType": FHIR_CodeableConcept,
# A code to indicate whether the nature of the request is: to request adjudication of products and services previously rendered; or requesting authorization and adjudication for provision in the future; or requesting the non-binding adjudication of the listed products and services which could be provided in the future.
"use": FHIR_code,
# Extensions for use
"_use": FHIR_Element,
# The party to whom the professional services and/or products have been supplied or are being considered and for whom actual for facast reimbursement is sought.
"patient": FHIR_Reference,
# The date this resource was created.
"created": FHIR_dateTime,
# Extensions for created
"_created": FHIR_Element,
# The party responsible for authorization, adjudication and reimbursement.
"insurer": FHIR_Reference,
# The provider which is responsible for the claim, predetermination or preauthorization.
"requestor": FHIR_Reference,
# Original request resource reference.
"request": FHIR_Reference,
# The outcome of the claim, predetermination, or preauthorization processing.
"outcome": FHIR_code,
# Extensions for outcome
"_outcome": FHIR_Element,
# A human readable description of the status of the adjudication.
"disposition": FHIR_string,
# Extensions for disposition
"_disposition": FHIR_Element,
# Reference from the Insurer which is used in later communications which refers to this adjudication.
"preAuthRef": FHIR_string,
# Extensions for preAuthRef
"_preAuthRef": FHIR_Element,
# The time frame during which this authorization is effective.
"preAuthPeriod": FHIR_Period,
# Type of Party to be reimbursed: subscriber, provider, other.
"payeeType": FHIR_CodeableConcept,
# A claim line. Either a simple (a product or service) or a 'group' of details which can also be a simple items or groups of sub-details.
"item": List[FHIR_ClaimResponse_Item],
# The first-tier service adjudications for payor added product or service lines.
"addItem": List[FHIR_ClaimResponse_AddItem],
# The adjudication results which are presented at the header level rather than at the line-item or add-item levels.
"adjudication": List[FHIR_ClaimResponse_Adjudication],
# Categorized monetary totals for the adjudication.
"total": List[FHIR_ClaimResponse_Total],
# Payment details for the adjudication of the claim.
"payment": FHIR_ClaimResponse_Payment,
# A code, used only on a response to a preauthorization, to indicate whether the benefits payable have been reserved and for whom.
"fundsReserve": FHIR_CodeableConcept,
# A code for the form to be used for printing the content.
"formCode": FHIR_CodeableConcept,
# The actual form, by reference or inclusion, for printing the content or an EOB.
"form": FHIR_Attachment,
# A note that describes or explains adjudication results in a human readable form.
"processNote": List[FHIR_ClaimResponse_ProcessNote],
# Request for additional supporting or authorizing information.
"communicationRequest": List[FHIR_Reference],
# Financial instruments for reimbursement for the health care products and services specified on the claim.
"insurance": List[FHIR_ClaimResponse_Insurance],
# Errors encountered during the processing of the adjudication.
"error": List[FHIR_ClaimResponse_Error],
},
total=False,
)
| 1.710938
| 2
|
python_scripts/nand/legacyftl.py
|
slango20/iphone-dataprotection
| 19
|
12784347
|
<filename>python_scripts/nand/legacyftl.py
from carver import NANDCarver
from construct.core import Struct
from construct.macros import ULInt32, ULInt16, Array, ULInt8, Padding
from pprint import pprint
from structs import SpareData
from util import hexdump
from vfl import VFL
import plistlib
"""
openiboot/plat-s5l8900/ftl.c
openiboot/plat-s5l8900/includes/s5l8900/ftl.h
"""
FTLCxtLog = Struct("FTLCxtLog",
ULInt32("usn"),
ULInt16("wVbn"),
ULInt16("wLbn"),
ULInt32("wPageOffsets"),
ULInt16("pagesUsed"),
ULInt16("pagesCurrent"),
ULInt32("isSequential")
)
FTLCxtElement2 = Struct("FTLCxtElement2",
ULInt16("field_0"),
ULInt16("field_2")
)
FTLCxt = Struct("FTLCxt",
ULInt32("usnDec"),
ULInt32("nextblockusn"),
ULInt16("wNumOfFreeVb"),
ULInt16("nextFreeIdx"),
ULInt16("swapCounter"),
Array(20, ULInt16("awFreeVb")),
ULInt16("field_36"),
Array(18, ULInt32("pages_for_pawMapTable")),
Array(36, ULInt32("pages_for_pawEraseCounterTable")),
Array(34, ULInt32("pages_for_wPageOffsets")),
ULInt32("pawMapTable"),
ULInt32("pawEraseCounterTable"),
ULInt32("wPageOffsets"),
Array(18, FTLCxtLog),
ULInt32("eraseCounterPagesDirty"),
ULInt16("unk3"),
Array(3, ULInt16("FTLCtrlBlock")),
ULInt32("FTLCtrlPage"),
ULInt32("clean"),
Array(36, ULInt32("pages_for_pawReadCounterTable")),
ULInt32("pawReadCounterTable"),
Array(5, FTLCxtElement2),
ULInt32("field_3C8"),
ULInt32("totalReadCount"),
ULInt32("page_for_FTLCountsTable"),
ULInt32("hasFTLCountsTable"),
Padding(0x420), #, ULInt8("field_3D8")),
ULInt32("versionLower"),
ULInt32("versionUpper")
)
FTL_CTX_TYPE = 0x43
FTL_BLOCK_MAP = 0x44
FTL_ERASE_COUNTER = 0x46
FTL_MOUNTED = 0x47
FTL_CTX_TYPE_MAX = 0x4F
USER_TYPE = 0x40
USER_LAST_TYPE = 0x41 #last user page in superblock?
class FTL(object):
def __init__(self, nand, vfl):
self.nand = nand
self.vfl = vfl
self.pawMapTable = {} #maps logical blocks to virtual blocks
self.pLogs = {}
if not self.FTL_open():
self.FTL_restore()
def FTL_open(self):
minUsnDec = 0xffffffff
ftlCtrlBlock = 0xffff
for vb in self.vfl.VFL_get_FTLCtrlBlock():
s, d = self.vfl.read_single_page(vb * self.vfl.pages_per_sublk)
if not s:
continue
if s.type >= FTL_CTX_TYPE and s.type <= FTL_CTX_TYPE_MAX:
if s.usn < minUsnDec:
ftlCtrlBlock = vb
minUsnDec = s.usn
print ftlCtrlBlock
self.ftlCtrlBlock = ftlCtrlBlock
for p in xrange(self.vfl.pages_per_sublk-1,1, -1):
s, d = self.vfl.read_single_page(ftlCtrlBlock * self.vfl.pages_per_sublk + p)
if not s:
continue
#print s
#print p
if s.type == FTL_CTX_TYPE:
print s.usn
ctx = FTLCxt.parse(d)
if ctx.versionLower == 0x46560001:
print ctx
assert ctx.FTLCtrlPage == (ftlCtrlBlock * self.vfl.pages_per_sublk + p)
break
else:
print "Unclean shutdown, last type 0x%x" % s.type
return False
self.ctx = ctx
print "FTL_open OK !"
return True
def determine_block_type(self, block):
maxUSN = 0
isSequential = True
for page in xrange(self.vfl.pages_per_sublk-1,1, -1):
s, _ = self.vfl.read_single_page(block * self.vfl.pages_per_sublk + page)
if not s:
continue
if s.usn > maxUSN:
maxUSN = s.usn
if s.lpn % self.vfl.pages_per_sublk != page:
isSequential = False
return isSequential, maxUSN
return isSequential, maxUSN
def FTL_restore(self):
self.pLogs = self.vfl.nand.loadCachedData("pLogs")
self.pawMapTable = self.vfl.nand.loadCachedData("pawMapTable")
if self.pLogs and self.pawMapTable:
print "Found cached FTL restore information"
return
self.pawMapTable = {}
self.pLogs = {}
ctx = None
for p in xrange(self.vfl.pages_per_sublk-1,1, -1):
s, d = self.vfl.read_single_page(self.ftlCtrlBlock * self.vfl.pages_per_sublk + p)
if not s:
continue
if s.type == FTL_CTX_TYPE:
print s.usn
ctx = FTLCxt.parse(d)
if ctx.versionLower == 0x46560001:
print ctx
assert ctx.FTLCtrlPage == (self.ftlCtrlBlock * self.vfl.pages_per_sublk + p)
print "Found most recent ctx"
break
if not ctx:
print "FTL_restore fail did not find ctx"
raise
blockMap = {}
self.nonSequential = {}
print "FTL_restore in progress ..."
for sblock in xrange(self.vfl.userSuBlksTotal + 23):
for page in xrange(self.vfl.pages_per_sublk):
s, d = self.vfl.read_single_page(sblock * self.vfl.pages_per_sublk + page)
if not s:
continue
if s.type >= FTL_CTX_TYPE and s.type <= FTL_CTX_TYPE_MAX:
break
if s.type != USER_TYPE and s.type != USER_LAST_TYPE:
print "Weird page type %x at %x %x" % (s.type, sblock, page)
continue
if s.lpn % self.vfl.pages_per_sublk != page:
print "Block %d non sequential" % sblock
self.nonSequential[sblock] = 1
blockMap[sblock] = (s.lpn / self.vfl.pages_per_sublk, s.usn)
break
z = dict([(i, [(a, blockMap[a][1]) for a in blockMap.keys() if blockMap[a][0] ==i]) for i in xrange(self.vfl.userSuBlksTotal)])
for k,v in z.items():
if len(v) == 2:
print k, v
vbA, usnA = v[0]
vbB, usnB = v[1]
if usnA > usnB: #smallest USN is map block, highest log block
self.pawMapTable[k] = vbB
self.restoreLogBlock(k, vbA)
else:
self.pawMapTable[k] = vbA
self.restoreLogBlock(k, vbB)
elif len(v) > 2:
raise Exception("fufu", k, v)
else:
self.pawMapTable[k] = v[0][0]
self.vfl.nand.cacheData("pLogs", self.pLogs)
self.vfl.nand.cacheData("pawMapTable", self.pawMapTable)
def restoreLogBlock(self, lbn, vbn):
log = {"wVbn": vbn, "wPageOffsets": {}}
for page in xrange(self.vfl.pages_per_sublk):
s, d = self.vfl.read_single_page(vbn * self.vfl.pages_per_sublk + page)
if not s:
break
log["wPageOffsets"][s.lpn % self.vfl.pages_per_sublk] = page
self.pLogs[lbn] = log
def mapPage(self, lbn, offset):
if self.pLogs.has_key(lbn):
if self.pLogs[lbn]["wPageOffsets"].has_key(offset):
offset = self.pLogs[lbn]["wPageOffsets"][offset]
#print "mapPage got log %d %d" % (lbn, offset)
return self.pLogs[lbn]["wVbn"] * self.vfl.pages_per_sublk + offset
if not self.pawMapTable.has_key(lbn):
return 0xFFFFFFFF
return self.pawMapTable[lbn] * self.vfl.pages_per_sublk + offset
def readLPN(self, lpn, key=None):
lbn = lpn / self.vfl.pages_per_sublk
offset = lpn % self.vfl.pages_per_sublk
vpn = self.mapPage(lbn, offset)
if vpn == 0xFFFFFFFF:
print "lbn not found %d" % lbn
return "\xFF" * self.nand.pageSize
s,d = self.vfl.read_single_page(vpn, key, lpn)
if not s:
return None
if s.lpn != lpn:
raise Exception("FTL translation FAIL spare lpn=%d vs expected %d" % (s.lpn, lpn))
return d
| 1.945313
| 2
|
labugr/integrate/__init__.py
|
lserraga/labUGR
| 1
|
12784348
|
from .quadpack import quad, dblquad, tplquad, nquad
excluded = ['excluded', 'quadpack']
__all__ = [s for s in dir() if not ((s in excluded)or s.startswith('_'))]
from labugr.testing.utils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 2.03125
| 2
|
src/rollit/ast/util.py
|
russells-crockpot/roll-with-it
| 2
|
12784349
|
<filename>src/rollit/ast/util.py
"""
"""
import re
from contextlib import suppress
from .elements import StringLiteral, BinaryOp, Negation, SpecialReference, OneSidedOperator, \
TwoSidedOperator, OverloadOnlyOperator
from ..util import is_valid_iterable, ensure_tuple
__all__ = [
'was_evaluated',
'flatten_tuple',
'negate',
'get_operator',
]
def was_evaluated(item):
"""
"""
return (item is None or item == SpecialReference.NONE
or isinstance(item, (int, bool, float, StringLiteral)))
def flatten_tuple(item):
"""
"""
if not is_valid_iterable(item):
return ensure_tuple(item)
if item and is_valid_iterable(item[0]) and len(item) == 1:
item = item[0]
if item and is_valid_iterable(item[0]):
item = (*item[0], item[1])
return tuple(item)
# pylint: disable=too-many-function-args
def negate(element, codeinfo=None, script=None):
"""
"""
if element is None or element == SpecialReference.NONE:
return 1
if isinstance(element, (int, float, bool)):
return 0 if element else 1
if isinstance(element, StringLiteral):
return 0 if element.value else 1
if isinstance(element, Negation):
return element.value
if isinstance(element, BinaryOp) and element.op == TwoSidedOperator.NOT_EQUALS:
return BinaryOp(element.left,
TwoSidedOperator.EQUALS,
element.right,
codeinfo=element.codeinfo)
return Negation(element, codeinfo=codeinfo)
# pylint: disable=no-value-for-parameter
def get_operator(symbol):
"""
"""
with suppress(ValueError):
return OneSidedOperator(symbol)
with suppress(ValueError):
return TwoSidedOperator(symbol)
with suppress(ValueError):
return OverloadOnlyOperator(re.sub(r'\s+', '', symbol))
raise ValueError(f'Unknown operator: {symbol}')
| 2.640625
| 3
|
RadarDataProcessAlg/Mp4ToGif.py
|
CaptainEven/PyScripts
| 5
|
12784350
|
<gh_stars>1-10
# encoding=utf-8
import argparse
import os
import cv2
import imageio
# name of the video file to convert
input_path = os.path.abspath('./output.mp4')
# targetFormat must be .gif
def ToGif(input_path,
target_format,
num_frames=60, # max frame number
out_size=(1600, 797)): # (640, 360), (854, 480), (1920, 1080)
"""
转换成gif格式
"""
output_path = os.path.splitext(input_path)[0] + target_format # 'codeblog', 'mp4'
print('converting ', input_path, ' to ', output_path)
# -----
reader = imageio.get_reader(input_path)
fps = reader.get_meta_data()['fps']
writer = imageio.get_writer(output_path, fps=fps)
for i, frame in enumerate(reader):
if i < num_frames:
frame = cv2.resize(frame, out_size, interpolation=cv2.INTER_CUBIC)
writer.append_data(frame)
# print(f'frame: {frame}')
writer.close()
# -----
print("Converting done.")
class Video2GifConverter(object):
def __init__(self, video_path, out_f_path):
if not os.path.isfile(video_path):
print('[Err]: invalid video file path.')
return
self.in_f_path = video_path
self.out_f_path = out_f_path
def convert(self):
reader = imageio.get_reader(self.in_f_path)
fps = reader.get_meta_data()['fps']
writer = imageio.get_writer(self.out_f_path, fps=fps)
cnt = 0
for frame in reader:
writer.append_data(frame)
cnt += 1
print('Total {:d} frames.'.format(cnt))
writer.close()
print('Converting done.')
# ToGif(input_path, '.gif')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--video',
type=str,
default='./output.mp4',
help='Video path to be processed')
parser.add_argument('--frames',
type=int,
default=60,
help='Number of frames to be processed.')
opt = parser.parse_args()
ToGif(opt.video, '.gif', opt.frames)
| 2.90625
| 3
|