max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
src/ParseIngredientsBase.py
|
GregCKrause/hello-openai
| 0
|
12782151
|
# Standard library
import argparse
import os
# Third party
import openai
# Consts
PROMPT = """
Given a cooking ingredient and quantity, return only the ingredient name
2 cups flour
Flour
Cinnamon ~1 tablespoon
Cinnamon
About one tsp salt
Salt
1.5-2 cups grated raw zucchini
Raw zucchini
1c walnuts (optional)
Walnuts
%s
"""
def parse(ingredient_description):
try:
openai.api_key = os.environ["OPENAI_API_KEY"]
response = openai.Completion.create(
engine="davinci",
prompt=PROMPT % (ingredient_description),
temperature=0,
max_tokens=64,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["\n"]
)
return response.choices[0].text
except:
return ingredient_description
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse ingredients using OpenAI")
parser.add_argument("-i", "--ingredient", help="Ingredient description to parse")
args = parser.parse_args()
print(parse(args.ingredient))
| 2.984375
| 3
|
reviews/views.py
|
YaroslavChyhryn/EshopReviewAPI
| 0
|
12782152
|
from .serializers import ReviewSerializer, ShopSerializer, UserReviewSerializer
from rest_framework import viewsets
from rest_framework.pagination import LimitOffsetPagination
from .models import ReviewModel, ShopModel
from rest_framework.generics import ListAPIView
from rest_framework import filters
from django.db.models import Count, Avg
from rest_framework import permissions
from .permissions import IsOwnerOrReadOnly
from django.contrib.auth import get_user_model
UserModel = get_user_model()
class ReviewViewSet(viewsets.ModelViewSet):
"""
ViewSet for Reviews
"""
queryset = ReviewModel.objects.all()
serializer_class = ReviewSerializer
pagination_class = LimitOffsetPagination
permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ShopView(ListAPIView):
queryset = ShopModel.objects.annotate(review_count=Count('reviews'), avg_rating=Avg('reviews__rating'))
serializer_class = ShopSerializer
filter_backends = [filters.OrderingFilter, filters.SearchFilter]
ordering_fields = ['review_count', 'avg_rating']
ordering = ['avg_rating']
search_fields = ['domain']
class UserReviewView(ListAPIView):
queryset = UserModel.objects.all()
serializer_class = UserReviewSerializer
| 2.015625
| 2
|
graalpython/com.oracle.graal.python.test/src/tests/cpyext/test_object.py
|
cmueh/graalpython
| 0
|
12782153
|
# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from . import CPyExtType, CPyExtTestCase, CPyExtFunction
__dir__ = __file__.rpartition("/")[0]
class AttroClass(object):
def __getattribute__(self, key):
if key == "foo":
return "foo"
else:
return object.__getattribute__(self, key)
class TestObject(object):
def test_add(self):
TestAdd = CPyExtType("TestAdd",
"""
PyObject* test_add(PyObject* a, PyObject* b) {
return PyTuple_Pack(2, a, b);
}
""",
nb_add="test_add"
)
tester = TestAdd()
assert tester + 12 == (tester, 12)
def test_pow(self):
TestPow = CPyExtType("TestPow",
"""
PyObject* test_pow(PyObject* a, PyObject* b, PyObject* c) {
return PyTuple_Pack(3, a, b, c);
}
""",
nb_power="test_pow"
)
tester = TestPow()
assert tester ** 12 == (tester, 12, None), tester ** 12
assert 12 ** tester == (12, tester, None), 12 ** tester
assert pow(tester, 48, 2) == (tester, 48, 2), pow(tester, 48, 2)
assert pow(48, tester, 2) == (48, tester, 2), pow(48, tester, 2)
def test_int(self):
TestInt = CPyExtType("TestInt",
"""
PyObject* test_int(PyObject* self) {
return PyLong_FromLong(42);
}
""",
nb_int="test_int"
)
tester = TestInt()
assert int(tester) == 42
def test_index(self):
TestIndex = CPyExtType("TestIndex",
"""
PyObject* test_index(PyObject* self) {
return PyLong_FromLong(1);
}
""",
nb_index="test_index"
)
tester = TestIndex()
assert [0, 1][tester] == 1
def test_getattro(self):
return # TODO: not working yet
# XXX: Cludge to get type into C
sys.modules["test_getattro_AttroClass"] = AttroClass
try:
TestInt = CPyExtType("TestGetattro",
"""
""",
ready_code="""
PyObject* AttroClass = PyDict_GetItemString(PyImport_GetModuleDict(), "test_getattro_AttroClass");
TestGetattroType.tp_getattro = ((PyTypeObject*)AttroClass)->tp_getattro;
"""
)
finally:
del sys.modules["test_getattro_AttroClass"]
tester = TestInt()
assert tester.foo == "foo"
def test_dict(self):
TestDict = CPyExtType("TestDict",
"""static PyObject* custom_dict = NULL;
static PyObject* get_dict(PyObject* self, PyObject* kwargs) {
Py_INCREF(custom_dict);
return custom_dict;
}
""",
ready_code="""
custom_dict = PyDict_New();
PyDict_SetItemString(custom_dict, "hello", PyUnicode_FromString("first custom property"));
TestDictType.tp_dict = custom_dict;
""",
post_ready_code="""
PyDict_SetItemString(TestDictType.tp_dict, "world", PyUnicode_FromString("second custom property"));
""",
tp_methods='{"get_dict", get_dict, METH_NOARGS, ""}'
)
tester = TestDict()
assert tester.hello == "first custom property"
assert tester.world == "second custom property"
assert "hello" in tester.get_dict().keys() and "world" in tester.get_dict().keys(), "was: %s" % tester.get_dict().keys()
tester.get_dict()["extra"] = "blah"
assert tester.extra == "blah"
def test_repr(self):
TestRepr = CPyExtType("TestRepr", '')
tester = TestRepr()
try:
repr(tester)
except Exception:
assert False
assert True
class TestObjectFunctions(CPyExtTestCase):
def compile_module(self, name):
type(self).mro()[1].__dict__["test_%s" % name].create_module(name)
super().compile_module(name)
test_PyCallable_Check = CPyExtFunction(
lambda args: callable(args[0]),
lambda: (
(len,),
(sum,),
(int,),
("hello",),
(3,),
(None,),
),
arguments=["PyObject* callable"],
resultspec="i",
argspec="O",
)
| 1.234375
| 1
|
code/python/modules/loggerOld.py
|
jnromero/steep
| 0
|
12782154
|
<gh_stars>0
from __future__ import print_function,division,absolute_import
import os
from twisted.python import log
import time
import sys
import pickle
#start the logger
class TwistedLogger:
def __init__(self,config):
self.fileCount=1
self.entries=0
self.fullLogFile=config['fullLogFile']
self.currentLogFile=config['logFolder']+"/log!!!NUMBERHERE!!!.pickle"
# log.startLogging(open(self.fullLogFile,'ab'))#,setStdout=False)
log.startLogging(sys.stdout,setStdout=True)
log.addObserver(self.writeToFile)
def addLine(self,thisType,thisLine):
toAdd=(time.time(),thisType,thisLine)
self.entries+=1
file = open(self.fullLogFile,'ab')
#protocol for python 3 compatibility
pickle.dump(toAdd,file,protocol=2)
file.close()
thisFile=self.currentLogFile.replace("!!!NUMBERHERE!!!",str(self.fileCount))
file = open(thisFile,'ab')
#protocol for python 3 compatibility
pickle.dump(toAdd,file,protocol=2)
file.close()
if self.entries>500:
self.entries=0
self.fileCount+=1
def writeToFile(self,this):
if 'log_namespace' not in this:
self.addLine("regular",str(this))
elif this['isError']==1:
if 'log_text' in this:
for k in this['log_text'].split("\n"):
self.addLine("stdErr",k)
else:
self.addLine("stdErr",str(this['log_io']))
elif this['log_namespace']=='stdout':
self.addLine("stdOut",str(this['log_io']))
elif this['log_namespace']=='twisted.python.log':
if this['log_io'].find("\"GET")>-1:
thisLine=""
else:
self.addLine("stdTwisted",str(this['log_io']))
self.addLine("stdTwisted",str(this))
else:
if "log_text" in this:
self.addLine("stdOther",str(this['log_text']))
elif "log_format" in this:
self.addLine("stdOther",str(this['log_format']))
else:
self.addLine("stdOther",str(this['message']))
| 2.421875
| 2
|
leetcode/0-250/260-1071. Greatest Common Divisor of Strings.py
|
palash24/algorithms-and-data-structures
| 23
|
12782155
|
# 1071. Greatest Common Divisor of Strings
class Solution:
def gcdOfStrings(self, str1: str, str2: str) -> str:
if len(str1) == len(str2):
return str1 if str1 == str2 else ''
else:
if len(str1) < len(str2):
str1, str2, = str2, str1
if str1[:len(str2)] == str2:
return self.gcdOfStrings(str1[len(str2):], str2)
else:
return ''
print(Solution().gcdOfStrings(str1 = "ABCABC", str2 = "ABC"))
print(Solution().gcdOfStrings(str1 = "ABABAB", str2 = "ABAB"))
| 3.515625
| 4
|
netbox/dcim/exceptions.py
|
BrnoPCmaniak/netbox
| 6
|
12782156
|
<reponame>BrnoPCmaniak/netbox
class LoopDetected(Exception):
"""
A loop has been detected while tracing a cable path.
"""
pass
| 1.648438
| 2
|
venv/lib/python3.8/site-packages/poetry/core/masonry/utils/include.py
|
GiulianaPola/select_repeats
| 2
|
12782157
|
/home/runner/.cache/pip/pool/14/6a/45/959613aab7d0674a9eb24e47bacc8b7eaa8cef7583b8cdb8b75e967ae6
| 0.804688
| 1
|
ftp_client/ftp_client.py
|
nathanwiens/sdk-samples
| 44
|
12782158
|
<reponame>nathanwiens/sdk-samples
"""
This app will create a file and then upload it to an FTP server.
The file will be deleted when the app is stopped.
"""
from csclient import EventingCSClient
from ftplib import FTP
cp = EventingCSClient('ftp_client')
TEMP_FILE = 'my_file.txt'
cp.log('ftp_client send_ftp_file()...')
# Create a temporary file to upload to an FTP server
try:
f = open(TEMP_FILE, 'w')
f.write('This is a test!!')
f.write('This is another test!!')
f.close()
except OSError as msg:
cp.log('Failed to open file: {}. error: {}'.format(TEMP_FILE, msg))
try:
# Connect to an FTP test server
ftp = FTP('speedtest.tele2.net')
# Login to the server
reply = ftp.login('anonymous', 'anonymous')
cp.log('FTP login reply: {}'.format(reply))
# Change to the proper directory for upload
ftp.cwd('/upload/')
# Open the file and upload it to the server
fh = open(TEMP_FILE, 'rb')
reply = ftp.storlines('STOR a.txt', fh)
cp.log('FTP STOR reply: {}'.format(reply))
except Exception as e:
cp.log('Exception occurred! exception: {}'.format(e))
raise
finally:
if fh:
fh.close()
| 2.8125
| 3
|
tests/test_base58check.py
|
joeblackwaslike/base58check
| 1
|
12782159
|
<filename>tests/test_base58check.py<gh_stars>1-10
import unittest
import base58check
TEST_DATA = [
(b'1BoatSLRHtKNngkdXEeobR76b53LETtpyT',
b'\x00v\x80\xad\xec\x8e\xab\xca\xba\xc6v\xbe\x9e\x83\x85J\xde\x0b\xd2,\xdb\x0b\xb9`\xde'),
(b'3QJmV3qfvL9SuYo34YihAf3sRCW3qSinyC',
b'\x05\xf8\x15\xb06\xd9\xbb\xbc\xe5\xe9\xf2\xa0\n\xbd\x1b\xf3\xdc\x91\xe9U\x10\xcd\x001\x07'),
(b'mkwV3DZkgYwKaXkphBtcXAjsYQEqZ8aB3x',
b'o;|F\xa5\xa6\x00\xb2\x98k\xd8\x04\x13|\xf9\x1d\xbbZE\xa2|\xa8\x00l+'),
(b'n1tpDjEJw32qGwkdQKPfACpcTtCa6hDVBw',
b'o\xdf\x84\xed0\x95\xc6_\xddu\xf4j\xd8|3\xe0\xb1\xf4\x14\xff\xe6\xf8\t\x8f\xaa'),
(b'LeF6vC9k1qfFDEj6UGjM5e4fwHtiKsakTd',
b'0\xd0\xa2\x07\xd1\x82\xa7\xe0]\x7fD\xb6\\5\xf9\xe1\xd1v\xeb\xde\xa7\xba\x08\x90\\'),
(b'muE4dcYXagWA7WT8ZnCriiy65FELikhdUy',
b'o\x96_\xfa\xccH\xe6\x87\xe0\xd3NJ\x8a\x86\x83*\x8dl\xfc\xf0{\xf1#\xb76')
]
CUSTOM_CHARSET = b'rpshnaf39wBUDNEGHJKLM4PQRST7VWXYZ2bcdeCg65jkm8oFqi1tuvAxyz'
CUSTOM_CHARSET_DATA = [
(b'rDTXLQ7ZKZVKz33zJbHjgVShjsBnqMBhmN',
b'\x00\x88\xa5\xa5|\x82\x9f@\xf2^\xa83\x85\xbb\xdel=\x8bL\xa0\x82\xedC\x86A')
]
class Base58Tests(unittest.TestCase):
def test_base(self):
"""Assert that BASE is equal to 58"""
self.assertEqual(58, len(base58check.DEFAULT_CHARSET))
def test_encoding_text_raises_typeerror(self):
"""Assert encoding text (nonbinary) raises TypeError"""
with self.assertRaises(TypeError):
base58check.b58encode('test text')
def test_encoding(self):
"""Assert correct encoding and return type"""
for encoded, raw in TEST_DATA:
result = base58check.b58encode(raw)
self.assertEqual(result, encoded)
self.assertIsInstance(result, bytes)
def test_decoding(self):
"""Assert correct decoding and return type from bytes"""
for encoded, raw in TEST_DATA:
result = base58check.b58decode(encoded)
self.assertEqual(result, raw)
self.assertIsInstance(result, bytes)
def test_decoding_from_unicode(self):
"""Assert correct decoding and return type from text"""
for encoded, raw in TEST_DATA:
result = base58check.b58decode(encoded.decode())
self.assertEqual(result, raw)
self.assertIsInstance(result, bytes)
def test_custom_charset_encoding(self):
"""Assert correct encoding and return type for custom character set"""
for encoded, raw in CUSTOM_CHARSET_DATA:
result = base58check.b58encode(raw, charset=CUSTOM_CHARSET)
self.assertEqual(result, encoded)
self.assertIsInstance(result, bytes)
def test_custom_charset_decoding(self):
"""Assert correct decoding and return type for custom character set"""
for encoded, raw in CUSTOM_CHARSET_DATA:
result = base58check.b58decode(encoded, charset=CUSTOM_CHARSET)
self.assertEqual(result, raw)
self.assertIsInstance(result, bytes)
if __name__ == '__main__':
unittest.main()
| 2
| 2
|
mining_rewards/mining_rewards.py
|
terra-project/research
| 43
|
12782160
|
<gh_stars>10-100
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import argparse
import math
# common hack to import from sibling directory utils
# alternatives involve making all directories packages, creating setup files etc
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from utils.gbm import gbm, gbm_cyclical
"""
----------------------------------------------------------
| "You can't eliminate risk, you can only move it around." |
----------------------------------------------------------
---------------------------------------------------------------------------------
Usage: |
To run simulation using smooth control rule: |
python mining_rewards.py smooth |
|
To run simulation using null control rule and fiat ratio of 50%: |
python mining_rewards.py null --fiat_ratio 0.5 |
|
For detailed usage instructions: |
python mining_rewards.py -h |
---------------------------------------------------------------------------------
Our core objective in designing Terra's stability mechanism is to contain volatility in Luna price changes
while permitting natural growth over time.
Pricing Luna
Luna has variable supply, so pricing one unit of Luna today is not as simple as discounting future rewards.
1 Luna may represent 1% of total rewards today and 0.5% tomorrow. We need to account for future fluctuations
in supply in order to determine what share of each future rewards 1 Luna can claim today.
We can formulate the price of 1 Luna today using DCF as follows:
SUM for all t from now to infinity: Total Rewards(t)/Luna Supply(t)/(1+r)^t for an appropriate discount rate r.
The main benefit of this formulation is that it captures the idea that "the market prices in future dilution",
in the sense that given a projection of Luna Supply in the future it can soundly price 1 Luna today.
Luna Price Volatility
Volatility in Luna price comes from volatility in future unit rewards, i.e. Total Rewards(t)/Luna Supply(t).
Unit rewards are by default highly cyclical: when the economy is growing rewards increase and supply tends
to decrease up to initial issuance; when the economy is shrinking rewards decrease and supply increases
as a result of the Terra contraction mechanism. Hence one way to contain Luna price volatility is to contain
volatility in unit rewards.
Targeting stable MRL growth
Our objective is to contain volatility in unit mining rewards, meaning Total Rewards(t)/Luna Supply(t),
which we call MRL for short. We also want to allow for growth, albeit gradual and stable. We have two levers
at our disposal to achieve this: transaction fees f(t), and the proportion of seigniorage that is allocated
towards Luna buybacks i.e. the buyback weight w(t). More precisely:
- Miining Rewards during period t are f(t)*TV(t), where TV is Transaction Volume
- Luna Buybacks during period t are w(t)*S(t), where S is Seigniorage (may be 0)
In what follows we implement this basic idea: adjust the two levers to achieve low-volatility growth in
unit mining rewards.
Control Rules
The control rule is the logic that adjusts f and w in response to economic conditions. It is the core
building block of the algorithm. We have implemented three control rules:
- null: no control at all -- f and w remain fixed at their genesis values
- debt: control based on the amount of "Luna debt" accumulated -- the higher Luna Supply above its genesis value
the higher f and w
- smooth: control that targets smooth MRL growth -- details to be found in the README
--------------------------------------------------------------------------------------------------------------------
Inputs (timeseries)
TV: Transaction Volume (in Terra)
--------------------------------------------------------------------------------------------------------------------
Outputs (timeseries)
f: Terra transaction fee (%)
w: buyback weight (%)
--------------------------------------------------------------------------------------------------------------------
Core Parameters
MRL_GROWTH_FACTOR: growth factor applied to MRL moving average
SB_TARGET: Seigniorage Burden Target (%)
MU_BOOM, MU_BUST: drift parameters for TV GBM
SIGMA: volatility parameter for TV GBM
V: Terra Velocity
GENESIS_LPE: Luna P/E ratio at genesis
--------------------------------------------------------------------------------------------------------------------
State (at time t)
t: week (0 to 519 ie 10 years)
TV: Transaction Volume (in Terra)
M: Terra Money Supply
S: Seigniorage >= 0 generated during this period
LMC: Luna Market Capitalization (in Terra)
LS: Luna Supply
LP: Luna Price
LPE: Luna P/E ratio
f: Terra transaction fee (%)
w: buyback weight (%)
MR: Mining Rewards from transaction fees, ie f*TV
MRL: Mining Rewards per Luna, ie MR/LS
--------------------------------------------------------------------------------------------------------------------
"""
V = 52 # annual Terra velocity
NUM_YEARS = 10
TOTAL_DAYS = NUM_YEARS*364
PERIOD = 7 # in days
PERIODS_PER_YEAR = int(364/PERIOD)
NUM_PERIODS = int(TOTAL_DAYS/PERIOD)
# TODO insert random injections of Luna supply!!
GENESIS_LUNA_SUPPLY = 100
GENESIS_TV = 1
GENESIS_LPE = 20
# fee parameters
F_GENESIS = 0.1/100
F_MIN = 0.05/100
F_MAX = 1/100
F_MAX_STEP = 0.025/100
# buyback weight parameters
FIAT_RATIO = 0
W_MIN_CAP = 5/100
W_MIN = 5/100
W_MAX = 1
W_GENESIS = 5/100
W_MAX_STEP = 2.5/100
MRL_GROWTH_FACTOR = 1.075
MRL_INC = 10**(-6)
SB_TARGET = 0.67
def plot_results(df):
# plot TV
ax = df.loc[:, ['TV','TV_MA52']].plot()
ax.set_xlabel('time (weeks)')
ax.set_ylabel('transaction volume ($)')
ax.legend(['transaction volume','transaction volume (annual avg)'])
# plot ΔΜ
ax = df.loc[:, ['ΔM', 'ΔM_MA']].plot()
ax.set_xlabel('time (weeks)')
ax.set_ylabel('ΔΜ ($)')
# plot f and w
ax = df.loc[:, ['f', 'w']].plot(secondary_y=['w'])
ax.set_xlabel('time (weeks)')
ax.set_ylabel('transaction fees (%)')
ax.right_ax.set_ylabel('Luna burn rate (%)')
ax.set_ylim(0, F_MAX)
ax.right_ax.set_ylim(0, 1)
y_ticks = ax.get_yticks()
ax.set_yticklabels(['{:,.2%}'.format(y) for y in y_ticks])
y_ticks_right = ax.right_ax.get_yticks()
ax.right_ax.set_yticklabels(['{:,.2%}'.format(y) for y in y_ticks_right])
lines = ax.get_lines() + ax.right_ax.get_lines()
ax.legend(lines, ['fees','Luna burn rate'])
# plot MR and MRL
#ax = df.loc[:, ['MR_MA', 'MRL_MA']].plot(secondary_y=['MRL_MA'])
ax = df.loc[:, ['MR_MA13', 'MR_MA52']].plot()
ax.set_xlabel('time (weeks)')
ax.set_ylabel('Mining Rewards ($)')
#ax.right_ax.set_ylabel('Mining Rewards per Luna ($)')
# plot MRL
ax = df.loc[:, ['MRL_MA13', 'MRL_MA52']].plot()
ax.set_xlabel('time (weeks)')
ax.set_ylabel('unit mining rewards ($)')
ax.legend(['unit mining rewards (annual avg)'])
# plot LP
ax = df.loc[:, ['LP', 'LP_MA13']].plot()
ax.set_xlabel('time (weeks)')
ax.set_ylabel('Luna Price ($)')
# plot LS
ax = df.loc[:, ['LS']].plot()
ax.set_xlabel('time (weeks)')
ax.set_ylabel('Luna Supply')
# plot LPE
ax = df.loc[:, ['LPE']].plot()
ax.set_xlabel('time (weeks)')
ax.set_ylabel('Luna PE Ratio')
# plot M and LMC
ax = df.loc[:, ['M', 'LMC']].plot(secondary_y=['LMC'])
ax.set_xlabel('time (weeks)')
ax.set_ylabel('Terra Money Supply ($)')
ax.right_ax.set_ylabel('Luna Market Cap ($)')
# plot fiat
ax = df.loc[:, ['fiat']].plot()
ax.set_xlabel('time (weeks)')
ax.set_ylabel('fiat')
# plot FRR
ax = df.loc[:, ['FRR']].plot()
ax.set_xlabel('time (weeks)')
ax.set_ylabel('Fiat Reserve Ratio')
# plot LRR
ax = df.loc[:, ['LRR', 'LRR_MA']].plot()
ax.set_xlabel('time (weeks)')
ax.set_ylabel('Luna Reserve Ratio')
# plot SB
ax = df.loc[:, ['SB']].plot(kind='area')
ax.set_xlabel('time (weeks)')
ax.set_ylabel('Seigniorage Burden')
ax.set_ylim(0, 1)
plt.show()
def tv_gbm_jump():
early_stage = gbm(1, mu=0.1, sigma=0.1, num_periods=52, increments_per_period=1) # first year: high growth, high vol
growth_stage = gbm_cyclical(early_stage[-1], mu_boom=0.5, mu_bust=-0.2, sigma=0.4, cycle_lengths=[2,3,4], increments_per_period=52)
assert len(early_stage) + len(growth_stage) == 520
return np.append(early_stage, growth_stage)
"""
Stochastic P/E multiple for Luna at time t, LPE(t)
Basic idea is to increase multiple during growth, decrease during recession
We model LPE(t) as (1 + X(t))*LPE(t-1), where X(t) is N(μ, σ):
μ is (MA1/MA2 - 1)/100, where MA1, MA2 are 1 and 2 year MAs for TV
σ is 0.5% if MA1 >= MA2, otherwise it is 1%
Note that the updates are weekly, so eg 1% weekly vol is 7.2% annual vol
LPE is basically a random walk whose next value depends on the trend in TV
We make it more volatile when TV is in a downtrend
"""
# TODO may want to punish drops more by increasing negative mu's by 50-100%
# TODO explain why we are using TV rather than MR here
# TODO make LPE more responsive, 1 and 2 year MAs are too slow
def lpe(df, t):
prev_lpe = df.at[t-1,'LPE']
tv_ma1 = df['TV'].rolling(13, min_periods=1).mean().at[t]
tv_ma2 = df['TV'].rolling(2*13, min_periods=1).mean().at[t]
tv_delta = tv_ma1/tv_ma2 - 1
mu = tv_delta/50
if tv_ma1 < tv_ma2:
mu *= 1.5
sigma = 0.005 if tv_ma1 >= tv_ma2 else 0.01
x = np.random.normal(mu, sigma)
return (1 + x)*prev_lpe
# Transaction Volume to Terra Money Supply
def tv_to_m(tv):
annual_tv = tv*PERIODS_PER_YEAR
return annual_tv/V
# Earnings to Luna Market Cap
def earnings_to_lmc(df, t):
earnings_ma = (df['f']*df['TV']).rolling(13, min_periods=1).mean().at[t]
annualized_earnings = earnings_ma*PERIODS_PER_YEAR
lpe = df.at[t,'LPE']
return annualized_earnings*lpe
def set_genesis_state(df):
tv = df.at[0,'TV']
df.at[0,'M'] = tv_to_m(tv)
df.at[0,'fiat'] = df.at[0,'M']*FIAT_RATIO # adhere to FIAT_RATIO at genesis
df.at[0,'S'] = 0
df.at[0,'f'] = F_GENESIS
df.at[0,'w'] = W_GENESIS
df.at[0,'MR'] = df.at[0,'f']*df.at[0,'TV'] # seigniorage not defined at genesis
df.at[0,'LPE'] = GENESIS_LPE
df.at[0,'LMC'] = earnings_to_lmc(df, 0)
df.at[0,'LS'] = GENESIS_LUNA_SUPPLY
df.at[0,'MRL'] = df.at[0,'MR']/df.at[0,'LS']
# f and w are forward-computed for the following state
df.at[1,'f'] = F_GENESIS
df.at[1,'w'] = W_GENESIS
"""
Evaluate the state of the system at time period t. This is where all of the work happens.
Assumes t >= 1
Assumes states up to and including period t-1 have been already evaluated.
Assumes TV has already been set (independent variable).
Assumes f and w have already been set upon evaluation of state t-1.
This is because f and w are forward computed at all t for t+1.
"""
def evaluate_state(df, t, control_rule):
tv = df.at[t,'TV']
df.at[t,'M'] = tv_to_m(tv)
delta_m = df.at[t,'M'] - df.at[t-1,'M']
df.at[t,'S'] = max(delta_m, 0)
df.at[t,'MR'] = df.at[t,'f']*df.at[t,'TV']
df.at[t,'LPE'] = lpe(df, t)
df.at[t,'LMC'] = earnings_to_lmc(df, t)
lp_prev = df.at[t-1,'LMC']/df.at[t-1,'LS'] # previous Luna price
# start with fiat
delta_fiat = delta_m*FIAT_RATIO
df.at[t,'fiat'] = df.at[t-1,'fiat'] + delta_fiat
delta_m = delta_m - delta_fiat
if delta_m >= 0: # expansion
num_luna_burned = df.at[t,'w']*delta_m/lp_prev
df.at[t,'LS'] = df.at[t-1,'LS'] - num_luna_burned
else: # contraction
num_luna_issued = -delta_m/lp_prev
df.at[t,'LS'] = df.at[t-1,'LS'] + num_luna_issued
df.at[t,'MRL'] = df.at[t,'MR']/df.at[t,'LS']
if t < NUM_PERIODS-1:
next_f, next_w = control_rule(df, t)
df.at[t+1,'f'] = next_f
df.at[t+1,'w'] = next_w
"""
Clamp the value of x between lower and upper
"""
def clamp(x, lower, upper):
return max(lower, min(x, upper))
def null_control(df, t):
return (df.at[t,'f'], df.at[t,'w'])
def debt_control(df, t):
if df.at[t,'LS'] <= GENESIS_LUNA_SUPPLY:
return (F_MIN, 0)
debt_ratio = 1 - GENESIS_LUNA_SUPPLY/df.at[t,'LS']
next_f = debt_ratio*F_MAX
next_w = debt_ratio
next_f = clamp(next_f, F_MIN, F_MAX)
next_w = clamp(next_w, 0, 1) # effectively no bounds
return (next_f, next_w)
def smooth_control(df, t):
f, w = df.at[t,'f'], df.at[t,'w']
# fee update
MRL_short = (df['f']*df['TV']/df['LS']).rolling(4, min_periods=1).mean().at[t]
MRL_long = (df['f']*df['TV']/df['LS']).rolling(52, min_periods=1).mean().at[t]
next_f = f*MRL_long*MRL_GROWTH_FACTOR/MRL_short
# buyback weight update
buybacks_rolling_sum = (df['w']*df['S']).rolling(4, min_periods=1).sum().at[t]
fees_rolling_sum = (df['f']*df['TV']).rolling(4, min_periods=1).sum().at[t]
seigniorage_burden = buybacks_rolling_sum/(buybacks_rolling_sum + fees_rolling_sum)
if seigniorage_burden > 0:
next_w = w*SB_TARGET/seigniorage_burden
else:
next_w = W_MAX # no buybacks, so set next_w to max
# apply constraints
next_f = clamp(next_f, f - F_MAX_STEP, f + F_MAX_STEP)
next_f = clamp(next_f, F_MIN, F_MAX)
next_w = clamp(next_w, w - W_MAX_STEP, w + W_MAX_STEP)
next_w = clamp(next_w, W_MIN, W_MAX)
return (next_f, next_w)
"""
Simulates mining rewards.
Args:
mu_boom:
mu_bust:
sigma: GBM parameters for transaction volume (cyclical)
control_rule: 'null', 'debt' or 'smooth'
seed: random seed for simulation
Returns:
Dataframe with one row per state of the simulation
"""
def simulate_mr(mu_boom, mu_bust, sigma, control_rule='smooth', seed=0):
np.random.seed(seed)
control_rule = globals()[control_rule + '_control']
t = range(0, NUM_PERIODS)
tv = gbm_cyclical(GENESIS_TV, mu_boom, mu_bust, sigma, cycle_lengths=[2,3,5], increments_per_period=52)
df = pd.DataFrame(data = {'t': t, 'TV': tv})
df['M'] = np.NaN # Terra Money Supply
df['fiat'] = np.NaN
df['S'] = np.NaN # seigniorage
df['f'] = np.NaN # TX fee
df['w'] = np.NaN # buyback weight
df['MR'] = np.NaN # Mining Rewards
df['LPE'] = np.NaN # Luna PE ratio
df['LMC'] = np.NaN # Luna Market Cap
df['LS'] = np.NaN # Luna Supply
df['MRL'] = np.NaN # Mining Rewards per Luna
df.set_index('t', inplace=True)
set_genesis_state(df) # t=0
for t in range(1, NUM_PERIODS):
evaluate_state(df, t, control_rule)
# compute some extra columns
df['ΔM'] = df['M'] - df['M'].shift(1) # changes in M
df['FRR'] = df['fiat']/df['M'] # Fiat Reserve Ratio
df['LRR'] = df['LMC']/df['M'] # Luna Reserve Ratio
df['LP'] = df['LMC']/df['LS'] # Luna Price
buybacks_rolling_sum = (df['w']*df['S']).rolling(52, min_periods=1).sum()
fees_rolling_sum = (df['f']*df['TV']).rolling(52, min_periods=1).sum()
df['SB'] = buybacks_rolling_sum/(buybacks_rolling_sum + fees_rolling_sum)
df['TV_MA52'] = df['TV'].rolling(52, min_periods=1).mean()
df['MR_MA13'] = df['MR'].rolling(13, min_periods=1).mean()
df['MR_MA52'] = df['MR'].rolling(52, min_periods=1).mean()
df['MRL_MA4'] = df['MRL'].rolling(4, min_periods=1).mean()
df['MRL_MA13'] = df['MRL'].rolling(13, min_periods=1).mean()
df['MRL_MA52'] = df['MRL'].rolling(52, min_periods=1).mean()
df['ΔM_MA'] = df['ΔM'].rolling(13, min_periods=1).mean()
df['LRR_MA'] = df['LRR'].rolling(13, min_periods=1).mean()
df['LP_MA13'] = df['LP'].rolling(13, min_periods=1).mean()
df['f_MA'] = df['f'].rolling(13, min_periods=1).mean()
df['w_MA'] = df['w'].rolling(13, min_periods=1).mean()
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('control_rule', type=str, choices=['null', 'debt', 'smooth'], help='mining rewards control rule')
parser.add_argument('-f', '--fiat_ratio', type=float, default=0, dest='fiat_ratio', help='fiat ratio between 0 and 1')
args = parser.parse_args()
if args.fiat_ratio < 0 or args.fiat_ratio > 1:
parser.error("fiat ratio needs to be between 0 and 1")
FIAT_RATIO = args.fiat_ratio
W_MAX = 1 - FIAT_RATIO
W_MIN = min(W_MAX, W_MIN_CAP)
W_GENESIS = W_MIN
df = simulate_mr(mu_boom=0.5, mu_bust=-0.2, sigma=0.4, control_rule=args.control_rule)
print(df)
plot_results(df)
| 1.890625
| 2
|
scripts/load_data.py
|
nilsleiffischer/covid19
| 4
|
12782161
|
<filename>scripts/load_data.py
import pandas as pd
def load_jhu_data():
data = pd.read_csv(
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv',
index_col=1)
data.index = data.index + (" (" + data['Province/State'] + ")").fillna("")
del data["Province/State"]
del data["Lat"]
del data["Long"]
data = data.transpose()
data.index = pd.to_datetime(data.index)
return data
| 2.875
| 3
|
lib/packer.py
|
mitodl/concourse-packer-resource
| 0
|
12782162
|
# stdlib
import subprocess
from typing import List
# local
from lib.io import read_value_from_file
from lib.log import log, log_pretty
# =============================================================================
#
# private utility functions
#
# =============================================================================
# =============================================================================
# _parse_packer_machine_readable_output_line
# =============================================================================
def _parse_packer_machine_readable_output_line(output_line: str) -> dict:
# machine readable format
# from https://www.packer.io/docs/commands/index.html
parsed_line = None
if output_line:
message_item: dict = {
"timestamp": None,
"target": None,
"type": None,
"data": [],
}
# split each line on commas
line_tokens: list = output_line.split(",")
for i, line_token in enumerate(line_tokens):
# assign payload fields based on token number
if i == 0:
message_item["timestamp"] = line_token
elif i == 1:
message_item["target"] = line_token
elif i == 2:
message_item["type"] = line_token
elif i > 2:
# strip trailing newline from data
message_item["data"].append(line_token.rstrip("\n"))
parsed_line = message_item
return parsed_line
# =============================================================================
# _format_packer_machine_readable_output_line
# =============================================================================
def _format_packer_machine_readable_output_line(
timestamp: str, target: str, output_type: str, data: str, subtype=None
) -> str:
# most messages won't have a target which means it's global
if not target:
target = "global"
# consistent padding for the 'version' types
if output_type.startswith("version"):
output_type = f"{output_type:16}"
# replace the packer comma
data = data.replace("%!(PACKER_COMMA)", ",")
if subtype:
return f"{timestamp} | {target} | {output_type} | {subtype:8} | {data}"
return f"{timestamp} | {target} | {output_type} | {data}"
# =============================================================================
# _print_parsed_packer_machine_readable_output_line
# =============================================================================
def _print_parsed_packer_machine_readable_output_line(parsed_line: dict) -> None:
if parsed_line:
if len(parsed_line["data"]) > 0:
subtype = None
# check for subtype
if parsed_line["data"][0] in ["say", "error", "message"]:
# pop found subtype from the parsed line
subtype = parsed_line["data"].pop(0)
for item in parsed_line["data"]:
# split on \\n
item_lines = item.split("\\n")
for item_line in item_lines:
log(
_format_packer_machine_readable_output_line(
parsed_line["timestamp"],
parsed_line["target"],
parsed_line["type"],
item_line,
subtype=subtype,
)
)
# =============================================================================
# _parse_packer_parsed_output_for_build_manifest
# =============================================================================
def _parse_packer_parsed_output_for_build_manifest(parsed_output: List[dict]) -> dict:
manifest = {"artifacts": {}}
# create collection of targets
targets = {}
for parsed_item in parsed_output:
if parsed_item["target"]:
target_name = parsed_item["target"]
if target_name not in targets:
targets[target_name] = []
del parsed_item["target"]
targets[target_name].append(parsed_item)
# iterate on targets
for target_key, target_value in targets.items():
# split into artifacts
target_artifacts = {}
for target_item in target_value:
if target_item["type"] == "artifact":
# first index of data will be the artifact number
artifact_number = target_item["data"][0]
# second index of data will be the artifact key
artifact_key = target_item["data"][1]
# skip adding the 'end' key
if artifact_key == "end":
continue
# third index of data will be the artifact value, if present
if len(target_item["data"]) > 2:
artifact_value = target_item["data"][2]
else:
artifact_value = None
# create the target artifact dict, if missing
if artifact_number not in target_artifacts:
target_artifacts[artifact_number] = {}
# assign the artifact key and value
target_artifacts[artifact_number][artifact_key] = artifact_value
manifest["artifacts"][target_key] = target_artifacts
return manifest
# =============================================================================
#
# private exe functions
#
# =============================================================================
# =============================================================================
# _packer
# =============================================================================
def _packer(*args: str, working_dir=None) -> List[dict]:
# runs packer bin with forced machine readable output
process_args = ["packer", "-machine-readable", *args]
parsed_lines = []
# use Popen so we can read lines as they come
with subprocess.Popen(
process_args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, # redirect stderr to stdout
bufsize=1,
universal_newlines=True,
stdin=None,
cwd=working_dir,
) as pipe:
for line in pipe.stdout or "":
if "fmt" in args:
# determine log level
log_level = "warning" if "fmt" in args else "info"
# directly log the output
log(f"global | ui | {log_level} | {line.rstrip()}")
else:
# parse the machine readable output as it arrives
parsed_line = _parse_packer_machine_readable_output_line(line)
parsed_lines.append(parsed_line)
_print_parsed_packer_machine_readable_output_line(parsed_line)
if pipe.returncode != 0:
# args are masked to prevent credentials leaking
raise subprocess.CalledProcessError(pipe.returncode, ["packer"])
return parsed_lines
# =============================================================================
#
# public packer functions
#
# =============================================================================
# =============================================================================
# version
# =============================================================================
def version() -> None:
# execute version command
_packer("version")
# =============================================================================
# init
# =============================================================================
def init(working_dir_path: str, template_file_path: str) -> None:
# execute init command
_packer("init", template_file_path, working_dir=working_dir_path)
# =============================================================================
# format
# =============================================================================
def format_packer_cmd(working_dir_path: str, template_file_path: str) -> None:
# execute format command
_packer("fmt", "-check", "-diff", template_file_path, working_dir=working_dir_path)
# =============================================================================
# validate
# =============================================================================
def validate(
working_dir_path: str,
template_file_path: str,
var_file_paths: List[str] = None,
template_vars: dict = None,
vars_from_files: dict = None,
only: List[str] = None,
excepts: List[str] = None,
syntax_only: bool = False,
debug: bool = False,
) -> None:
packer_command_args = []
# add any specified var file paths
if var_file_paths:
for var_file_path in var_file_paths:
packer_command_args.append(f"-var-file={var_file_path}")
# add any specified vars
if template_vars:
for var_name, var_value in template_vars.items():
packer_command_args.append(f"-var={var_name}={var_value}")
# add any vars from files
if vars_from_files:
for var_name, file_path in vars_from_files.items():
var_value = read_value_from_file(file_path, working_dir=working_dir_path)
packer_command_args.append(f"-var={var_name}={var_value}")
# only build specified sources
if only:
packer_command_args.append(f"-only={','.join(only)}")
# build all sources except those specified
elif excepts:
packer_command_args.append(f"-except={','.join(excepts)}")
# optionally check only syntax
if syntax_only:
packer_command_args.append("-syntax-only")
# dump args on debug
if debug:
log("validate args:")
log_pretty(packer_command_args)
# execute validate command
_packer(
"validate",
*packer_command_args,
template_file_path,
working_dir=working_dir_path,
)
# =============================================================================
# build
# =============================================================================
def build(
working_dir_path: str,
template_file_path: str,
var_file_paths: List[str] = None,
template_vars: dict = None,
vars_from_files: dict = None,
only: List[str] = None,
excepts: List[str] = None,
debug: bool = False,
force: bool = False,
) -> dict:
packer_command_args = []
# add any specified var file paths
if var_file_paths:
for var_file_path in var_file_paths:
packer_command_args.append(f"-var-file={var_file_path}")
# add any specified vars
if template_vars:
for var_name, var_value in template_vars.items():
packer_command_args.append(f"-var={var_name}={var_value}")
# add any vars from files
if vars_from_files:
for var_name, file_path in vars_from_files.items():
var_value = read_value_from_file(file_path, working_dir=working_dir_path)
packer_command_args.append(f"-var={var_name}={var_value}")
# only build specified sources
if only:
packer_command_args.append(f"-only={','.join(only)}")
# build all sources except those specified
elif excepts:
packer_command_args.append(f"-except={','.join(excepts)}")
# add force if requested
if force:
packer_command_args.append("-force")
# dump args on debug
if debug:
log("build args:")
log_pretty(packer_command_args)
# execute build command
packer_command_result = _packer(
"build", *packer_command_args, template_file_path, working_dir=working_dir_path
)
# get build manifest from output
packer_build_manifest = _parse_packer_parsed_output_for_build_manifest(
packer_command_result
)
# return the manifest
return packer_build_manifest
| 2.40625
| 2
|
libra_client/lbrtypes/account_config/constants/account_limits.py
|
violas-core/violas-client
| 0
|
12782163
|
<filename>libra_client/lbrtypes/account_config/constants/account_limits.py
from libra_client.move_core_types.language_storage import ModuleId, CORE_CODE_ADDRESS
ACCOUNT_LIMITS_MODULE_NAME = "AccountLimits"
ACCOUNT_LIMITS_MODULE = ModuleId(CORE_CODE_ADDRESS, ACCOUNT_LIMITS_MODULE_NAME)
ACCOUNT_LIMITS_WINDOW_STRUCT_NAME = "Window"
def account_limits_module_name():
return ACCOUNT_LIMITS_MODULE
def account_limits_window_struct_name():
return ACCOUNT_LIMITS_WINDOW_STRUCT_NAME
| 1.445313
| 1
|
app/api/test_api.py
|
totoro0104/fastapi-example
| 2
|
12782164
|
from typing import Optional
from fastapi import APIRouter, Depends
import httpx
from app.models.models import User
from app.schema import User_Pydantic
router = APIRouter()
@router.get("/")
async def homepage():
# httpx代替requests进行异步请求
async with httpx.AsyncClient() as client:
res = await client.get('https://www.baidu.com')
return {"data": res.status_code}
@router.get("/test/items/{item_id}")
async def read_item(item_id: int, q: Optional[str] = None):
return {"item_id": item_id, "q": q}
@router.get('/test/users')
async def get_users():
# 创建用户
# user = User()
# user.name = 'test2'
# user.phone = '123'
# user.set_password('<PASSWORD>')
# await user.save()
# return 1
# QuerySet不进行数据库查询
users = User.all()
# User_Pydantic为序列化的模型,users为处理的QuerySet对象
data = await User_Pydantic.from_queryset(users)
return data
| 2.578125
| 3
|
backpack/extensions/firstorder/fisher/__init__.py
|
maryamhgf/backpack
| 0
|
12782165
|
<gh_stars>0
from torch.nn import (
Conv1d,
Conv2d,
Linear,
BatchNorm1d,
BatchNorm2d
)
from backpack.extensions.backprop_extension import BackpropExtension
from . import (
conv1d,
conv2d,
linear,
batchnorm1d,
batchnorm2d
)
class Fisher(BackpropExtension):
def __init__(self, silent=False):
self.silent = silent
super().__init__(
savefield="fisher",
fail_mode="WARNING",
module_exts={
Linear: linear.FisherLinear(self.silent),
Conv1d: conv1d.FisherConv1d(self.silent),
Conv2d: conv2d.FisherConv2d(self.silent),
BatchNorm1d: batchnorm1d.FisherBatchNorm1d(self.silent),
BatchNorm2d: batchnorm2d.FisherBatchNorm2d(self.silent),
},
)
| 2.21875
| 2
|
lib/surface/kms/keys/versions/get_certificate_chain.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
| 2
|
12782166
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Get a PEM-format certificate chain for a given version."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.kms import exceptions as kms_exceptions
from googlecloudsdk.command_lib.kms import flags
from googlecloudsdk.core import log
from googlecloudsdk.core.util import files
DETAILED_HELP = {
'EXAMPLES':
"""\
The following command saves the Cavium certificate chain for
CryptoKey ``frodo'' Version 2 to ``/tmp/my/cavium.pem'':
$ {command} 2 --key=frodo --keyring=fellowship --location=us-east1 --certificate-chain-type=cavium --output-file=/tmp/my/cavium.pem
""",
}
def _GetCertificateChainPem(chains, chain_type):
"""Returns the specified certificate chain(s) from a CertChains object.
Args:
chains: a KeyOperationAttestation.CertChains object.
chain_type: a string specifying the chain(s) to retrieve.
Returns:
A string containing the PEM-encoded certificate chain(s).
Raises:
exceptions.InvalidArgumentException if chain_type is not a valid chain type.
"""
if chain_type == 'cavium':
return ''.join(chains.caviumCerts)
elif chain_type == 'google-card':
return ''.join(chains.googleCardCerts)
elif chain_type == 'google-partition':
return ''.join(chains.googlePartitionCerts)
elif chain_type == 'all':
return ''.join(chains.caviumCerts + chains.googlePartitionCerts +
chains.googleCardCerts)
raise exceptions.InvalidArgumentException(
'{} is not a valid chain type.'.format(chain_type))
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class GetCertificateChain(base.DescribeCommand):
r"""Get a certificate chain for a given version.
Returns the PEM-format certificate chain for the specified key version.
The optional flag `output-file` indicates the path to store the PEM. If not
specified, the PEM will be printed to stdout.
"""
detailed_help = DETAILED_HELP
@staticmethod
def Args(parser):
flags.AddKeyVersionResourceArgument(
parser, 'from which to get the certificate chain')
flags.AddCertificateChainFlag(parser)
flags.AddOutputFileFlag(parser, 'to store PEM')
def Run(self, args):
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
version_ref = flags.ParseCryptoKeyVersionName(args)
if not version_ref.Name():
raise exceptions.InvalidArgumentException(
'version', 'version id must be non-empty.')
versions = client.projects_locations_keyRings_cryptoKeys_cryptoKeyVersions
version = versions.Get(
messages
.CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetRequest(
name=version_ref.RelativeName()))
if (version.protectionLevel !=
messages.CryptoKeyVersion.ProtectionLevelValueValuesEnum.HSM):
raise kms_exceptions.ArgumentError(
'Certificate chains are only available for HSM key versions.')
if (version.state ==
messages.CryptoKeyVersion.StateValueValuesEnum.PENDING_GENERATION):
raise kms_exceptions.ArgumentError(
'Certificate chains are unavailable until the version is generated.')
try:
log.WriteToFileOrStdout(
args.output_file if args.output_file else '-',
_GetCertificateChainPem(version.attestation.certChains,
args.certificate_chain_type),
overwrite=True,
binary=False)
except files.Error as e:
raise exceptions.BadFileException(e)
| 1.796875
| 2
|
ReconnaissanceFaciale/RecoFaciale.py
|
BasileAmeeuw/DroneDelivreur
| 0
|
12782167
|
#import libraries
import face_recognition
import numpy as np
from PIL import Image, ImageDraw
import matplotlib.image as mpimg
from IPython.display import display
import cv2
import os, re
import pyrebase
import time
cv2.VideoCapture(0).isOpened()
from dronekit import connect, VehicleMode, LocationGlobalRelative
import firebase_admin
from firebase_admin import credentials
from google.cloud import firestore
'''
A rajouter, tes librairies
'''
#Config de firestore et storage:
firebaseConfig = {
"apiKey": "<KEY>",
"authDomain": "delivreapp-5221e.firebaseapp.com",
"projectId": "delivreapp-5221e",
"databaseURL": "https://del-ivre-default-rtdb.europe-west1.firebasedatabase.app",
"storageBucket": "delivreapp-5221e.appspot.com",
"messagingSenderId": "661920641786",
"appId": "1:661920641786:web:dca2c085b5ff60f1b18f43",
"measurementId": "G-CLR5PFH3G4"
};
nDrone=5 #numéro du drone associé
firebase=pyrebase.initialize_app(firebaseConfig)
storage=firebase.storage()
try:
firebase_admin.get_app()
#print('firebase intialized.')
except ValueError as e:
#print('firebase not initialized. But now initialize.')
cred = credentials.Certificate("serviceAccountKey.json")
firebase_admin.initialize_app(cred)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="serviceAccountKey.json"
db = firestore.Client()
###récupération Coord GPS
print("Initialisation terminée, début de la boucle")
while True:
nbImg=[];
go=True
while True:
try:
doc=db.collection('Users').where("Drone","==",nDrone).get()[0]
break
except:
continue
nom=doc.get("Nom")
prenom=doc.get("Prenom")
coord=doc.get("GPS")
image=nom+"#"+prenom+".jpg"
print(nom,end=' ')
print(prenom,end="\n")
print(doc.get("Commande"),end="\n")
print(coord)
#téléchargement image
storage.child(image).download(image)
img1 = Image.open(image)
img1.save("img1.jpg","JPEG")
try:
time.sleep(0.001)
img1 = Image.open(image)
img1.save("img1.jpg","JPEG")
time.sleep(0.001)
img2=img1.rotate(90)
img2.show()
time.sleep(0.001)
img2.save("img2.jpg","JPEG")
img3=img2.rotate(90)
time.sleep(0.001)
img3.save("img3.jpg","JPEG")
img4=img3.rotate(90)
time.sleep(0.001)
img4.save("img4.jpg","JPEG")
#os.remove(image)
print("image enregistrée")
except:
print("probleme dans le téléchargement de l'image")
###variables d'initiation reconnaissance faciale
known_face_encodings = []
known_face_names = []
face_locations = []
face_encodings = []
process_this_frame = True
#Image enregistrée dans la base de donnée
for i in range(1,5):
try:
new_image=face_recognition.load_image_file("img"+ str(nbImg) + ".jpg")
new_face_encoding = face_recognition.face_encodings(new_image)[0]
known_face_encodings.append(new_face_encoding)
known_face_names.append(prenom + " " + nom)
nbImg=i
print("photo" , str(i) , " dans reconaissance faciale")
except:
os.remove("img"+ str(i) + ".jpg")
print(i)
print("photo ", str(i) , "non prise en compte")
Reco=True
#algo reconnaissance faciale
print("lancement algorithme de reconnaissance faciale")
while Reco:
# Grab a single frame of video
ret, frame = cv2.VideoCapture(0).read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
if name==prenom+" "+nom:
print(name, end=' ')
print("a bien été reconnu, on procède donc a l'attérissage.")
Reco=False
process_this_frame = not process_this_frame
#suppression image du PC
print("image supprimé de la mémoire du rpi")
for i in range(len(nbImg)):
os.remove("img"+ str(nbImg[i]) + ".jpg")
try:
id=str(int(doc.get("Id")))
db.collection('Users').document(id).delete()
print("la commande a été supprimée")
storage.delete_blob(image)
storage.delete()
except:
print("la commande était déja supprimée")
| 2.140625
| 2
|
.openapi-generator/update_type_with_interface.py
|
AntoineDao/honeybee-schema-dotnet
| 0
|
12782168
|
import re, os, json
root = os.path.dirname(os.path.dirname(__file__))
source_folder = os.path.join(root, 'src', 'HoneybeeSchema', 'Model')
#NamedReferenceable
class_files = [x for x in os.listdir(source_folder) if (x.endswith("Abridged.cs") and not x.endswith("SetAbridged.cs") and not x.endswith("PropertiesAbridged.cs") and not x.endswith("ScheduleRuleAbridged.cs") ) ]
abridged_file = os.path.join(root, 'src', 'HoneybeeSchema', 'BaseClasses', 'NamedReferenceable.cs')
with open(abridged_file, "wt", encoding='utf-8') as abridgeFile:
data = []
data.append('namespace HoneybeeSchema\n')
data.append('{\n')
for f in class_files:
type_name = f
data.append('public partial class %s: INamed {}\n' % f.replace('.cs',''))
data.append('public partial class ConstructionSetAbridged: INamed{}\n')
data.append('}')
abridgeFile.writelines(data)
abridgeFile.close()
#EnergyWindowMaterial
class_files = [x for x in os.listdir(source_folder) if (x.startswith("EnergyWindowMaterial")) ]
abridged_file = os.path.join(root, 'src', 'HoneybeeSchema', 'BaseClasses', 'EnergyWindowMaterial.cs')
with open(abridged_file, "wt", encoding='utf-8') as abridgeFile:
data = []
data.append('namespace HoneybeeSchema\n')
data.append('{\n')
for f in class_files:
type_name = f
data.append('public partial class %s: IEnergyWindowMaterial {}\n' % f.replace('.cs',''))
data.append('}')
abridgeFile.writelines(data)
abridgeFile.close()
#EnergyMaterial
class_files = [x for x in os.listdir(source_folder) if (x.startswith("EnergyMaterial")) ]
abridged_file = os.path.join(root, 'src', 'HoneybeeSchema', 'BaseClasses', 'EnergyMaterial.cs')
with open(abridged_file, "wt", encoding='utf-8') as abridgeFile:
data = []
data.append('namespace HoneybeeSchema\n')
data.append('{\n')
for f in class_files:
type_name = f
data.append('public partial class %s: IEnergyMaterial {}\n' % f.replace('.cs',''))
data.append('}')
abridgeFile.writelines(data)
abridgeFile.close()
| 2.171875
| 2
|
concentric/users/migrations/0005_user_name.py
|
guidotheelen/concentric
| 0
|
12782169
|
<filename>concentric/users/migrations/0005_user_name.py
# Generated by Django 3.0.10 on 2020-11-27 23:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20201127_2250'),
]
operations = [
migrations.AddField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=20, verbose_name='Name'),
),
]
| 1.460938
| 1
|
datasets/tr_cpi_parser.py
|
davidpaulkim/Stock-Market-Price-Prediction
| 8
|
12782170
|
<reponame>davidpaulkim/Stock-Market-Price-Prediction<gh_stars>1-10
# src= "https://tcmb.gov.tr/wps/wcm/connect/EN/TCMB+EN/Main+Menu/Statistics/Inflation+Data/Consumer+Prices"
cpi_values = """02-2021 15.61 0.91
01-2021 14.97 1.68
12-2020 14.60 1.25
11-2020 14.03 2.30
10-2020 11.89 2.13
09-2020 11.75 0.97
08-2020 11.77 0.86
07-2020 11.76 0.58
06-2020 12.62 1.13
05-2020 11.39 1.36
04-2020 10.94 0.85
03-2020 11.86 0.57
02-2020 12.37 0.35
01-2020 12.15 1.35
12-2019 11.84 0.74
11-2019 10.56 0.38
10-2019 8.55 2
09-2019 9.26 0.99
08-2019 15.01 0.86
07-2019 16.65 1.36
06-2019 15.72 0.03
05-2019 18.71 0.95
04-2019 19.50 1.69
03-2019 19.71 1.03
02-2019 19.67 0.16
01-2019 20.35 1.06
12-2018 20.30 -0.40
11-2018 21.62 -1.44
10-2018 25.24 2.67
09-2018 24.52 6.30
08-2018 17.90 2.30
07-2018 15.85 0.55
06-2018 15.39 2.61
05-2018 12.15 1.62
04-2018 10.85 1.87
03-2018 10.23 0.99
02-2018 10.26 0.73
01-2018 10.35 1.02
12-2017 11.92 0.69
11-2017 12.98 1.49
10-2017 11.90 2.08
09-2017 11.20 0.65
08-2017 10.68 0.52
07-2017 9.79 0.15
06-2017 10.90 -0.27
05-2017 11.72 0.45
04-2017 11.87 1.31
03-2017 11.29 1.02
02-2017 10.13 0.81
01-2017 9.22 2.46
12-2016 8.53 1.64
11-2016 7.00 0.52
10-2016 7.16 1.44
09-2016 7.28 0.18
08-2016 8.05 -0.29
07-2016 8.79 1.16
06-2016 7.64 0.47
05-2016 6.58 0.58
04-2016 6.57 0.78
03-2016 7.46 -0.04
02-2016 8.78 -0.02
01-2016 9.58 1.82
12-2015 8.81 0.21
11-2015 8.10 0.67
10-2015 7.58 1.55
09-2015 7.95 0.89
08-2015 7.14 0.40
07-2015 6.81 0.09
06-2015 7.20 -0.51
05-2015 8.09 0.56
04-2015 7.91 1.63
03-2015 7.61 1.19
02-2015 7.55 0.71
01-2015 7.24 1.10
12-2014 8.17 -0.44
11-2014 9.15 0.18
10-2014 8.96 1.90
09-2014 8.86 0.14
08-2014 9.54 0.09
07-2014 9.32 0.45
06-2014 9.16 0.31
05-2014 9.66 0.4
04-2014 9.38 1.34
03-2014 8.39 1.13
02-2014 7.89 0.43
01-2014 7.75 1.98
12-2013 7.4 0.46
11-2013 7.32 0.01
10-2013 7.71 1.8
09-2013 7.88 0.77
08-2013 8.17 -0.1
07-2013 8.88 0.31
06-2013 8.3 0.76
05-2013 6.51 0.15
04-2013 6.13 0.42
03-2013 7.29 0.66
02-2013 7.03 0.3
01-2013 7.31 1.65
12-2012 6.16 0.38
11-2012 6.37 0.38
10-2012 7.8 1.96
09-2012 9.19 1.03
08-2012 8.88 0.56
07-2012 9.07 -0.23
06-2012 8.87 -0.9
05-2012 8.28 -0.21
04-2012 11.14 1.52
03-2012 10.43 0.41
02-2012 10.43 0.56
01-2012 10.61 0.56
12-2011 10.45 0.58
11-2011 9.48 1.73
10-2011 7.66 3.27
09-2011 6.15 0.75
08-2011 6.65 0.73
07-2011 6.31 -0.41
06-2011 6.24 -1.43
05-2011 7.17 2.42
04-2011 4.26 0.87
03-2011 3.99 0.42
02-2011 4.16 0.73
01-2011 4.9 0.41
12-2010 6.4 -0.3
11-2010 7.29 0.03
10-2010 8.62 1.83
09-2010 9.24 1.23
08-2010 8.33 0.4
07-2010 7.58 -0.48
06-2010 8.37 -0.56
05-2010 9.1 -0.36
04-2010 10.19 0.6
03-2010 9.56 0.58
02-2010 10.13 1.45
01-2010 8.19 1.85
12-2009 6.53 0.53
11-2009 5.53 1.27
10-2009 5.08 2.41
09-2009 5.27 0.39
08-2009 5.33 -0.3
07-2009 5.39 0.25
06-2009 5.73 0.11
05-2009 5.24 0.64
04-2009 6.13 0.02
03-2009 7.89 1.1
02-2009 7.73 -0.34
01-2009 9.5 0.29
12-2008 10.06 -0.41
11-2008 10.76 0.83
10-2008 11.99 2.6
09-2008 11.13 0.45
08-2008 11.77 -0.24
07-2008 12.06 0.58
06-2008 10.61 -0.36
05-2008 10.74 1.49
04-2008 9.66 1.68
03-2008 9.15 0.96
02-2008 9.1 1.29
01-2008 8.17 0.8
12-2007 8.39 0.22
11-2007 8.4 1.95
10-2007 7.7 1.81
09-2007 7.12 1.03
08-2007 7.39 0.02
07-2007 6.9 -0.73
06-2007 8.6 -0.24
05-2007 9.23 0.5
04-2007 10.72 1.21
03-2007 10.86 0.92
02-2007 10.16 0.43
01-2007 9.93 1
12-2006 9.65 0.23
11-2006 9.86 1.29
10-2006 9.98 1.27
09-2006 10.55 1.29
08-2006 10.26 -0.44
07-2006 11.69 0.85
06-2006 10.12 0.34
05-2006 9.86 1.88
04-2006 8.83 1.34
03-2006 8.16 0.27
02-2006 8.15 0.22
01-2006 7.93 0.75
12-2005 7.72 0.42
11-2005 7.61 1.4
10-2005 7.52 1.79
09-2005 7.99 1.02
08-2005 7.91 0.85
07-2005 7.82 -0.57
06-2005 8.95 0.1
05-2005 8.7 0.92
04-2005 8.18 0.71
03-2005 7.94 0.26
02-2005 8.69 0.02
01-2005 9.24 0.55
12-2004 9.32 0.45
11-2004 9.79 1.54
10-2004 9.86 2.22
09-2004 9 0.94
08-2004 10.04 0.58
07-2004 9.57 0.22
06-2004 8.93 -0.13
05-2004 8.88 0.38
04-2004 10.18 0.59
03-2004 11.83 0.89
02-2004 14.28 0.55
01-2004 16.22 0.74
12-2003 18.36 0.88
11-2003 19.25 1.61
10-2003 20.78 1.42
09-2003 23 1.9
08-2003 24.91 0.15
07-2003 27.44 -0.37
06-2003 29.76 -0.17
05-2003 30.74 1.58
04-2003 29.45 2.09
03-2003 29.41 3.1
02-2003 27.01 2.26
01-2003 26.38 2.59
12-2002 29.75 1.64
11-2002 31.77 2.91
10-2002 33.45 3.29
09-2002 37.05 3.48
08-2002 40.24 2.18
07-2002 41.28 1.44
06-2002 42.6 0.58
05-2002 46.22 0.58
04-2002 52.72 2.06
03-2002 65.11 1.19
02-2002 73.08 1.75
01-2002 73.16 5.32
12-2001 68.53 3.22
11-2001 67.29 4.23
10-2001 66.47 6.07
09-2001 61.8 5.89
08-2001 57.5 2.94
07-2001 56.33 2.39
06-2001 56.1 3.13
05-2001 52.39 5.05
04-2001 48.27 10.33
03-2001 37.51 6.07
02-2001 33.42 1.8
01-2001 35.92 2.51"""
import pandas as pd
df_vals = [s.split(" ") for s in cpi_values.replace('\t', ' ').split("\n")]
df = pd.DataFrame(df_vals, columns=["date", "CPI (Year to Year % Changes)", "CPI (Month to Month % Changes)"])
df.to_csv("tr_cpi_values.csv")
| 2.5
| 2
|
dailyplot.py
|
nerrull/hackatown2018
| 38
|
12782171
|
<filename>dailyplot.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# This code is part of the Solar3Dcity package
# Copyright (c) 2015
# <NAME>
# Delft University of Technology
# <EMAIL>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from solpy import irradiation
import datetime
settings = []
place = (52.01, 4.36)
tzoffset = datetime.timedelta(hours=2)
settings.append({'Name' : 'Surface A',
'Tilt' : 40.0,
'Azimuth' : 180.0,
})
settings.append({'Name' : 'Surface B',
'Tilt' : 40.0,
'Azimuth' : 90.0,
})
#-- Which days
#-- month, day format
epochs = [[3, 27], [6, 21]]
#-- Sampling interval
interval = 5
#-- Results
res = {}
#-- Clouds: left for future work, at this moment the computations are clear-sky
ccddatabase = None
#-- Iterate the days
for epoch in epochs:
month = epoch[0]
day = epoch[1]
d = datetime.date(2015, month, day)
#-- Tweaking to get the proper key values for dates
if month < 10:
m_s = '0' + str(month)
else:
m_s = str(month)
if day < 10:
d_s = '0' + str(day)
else:
d_s = str(day)
#-- These are UTC times. The program is not smart enough to use sunrise and sunset times, but this works too
for hour in range(3, 20):
for minute in range(0, 60, interval):
#-- Datetime
t = datetime.time(hour, minute)
dt = datetime.datetime.combine(d, t)
#-- Get the historic cloud cover for that day
# if ccddatabase:
# cloud_cover = ccddatabase[str(m_s)+str(d_s)]
# else:
# cloud_cover = 0.0
for setting in settings:
if setting['Name'] not in res:
res[setting['Name']] = {}
e = str(m_s)+str(d_s)
if e not in res[setting['Name']]:
res[setting['Name']][e] = []
#-- Global synthetic irradiation from Solpy
global_irradiation_rec = irradiation.blave(dt, place, 0, 180)
#-- Adjust it for the tilt. The value is now in W/m^2
irrValue = irradiation.irradiation(global_irradiation_rec, place, None, setting['Tilt'], setting['Azimuth'], 'p9')
horr_irrValue = irradiation.irradiation(global_irradiation_rec, place, None, 0, 180, 'p9')
#-- Workaround to keep the data aligned
d_ = datetime.date(2013, 1, 1)
t_ = datetime.time(hour, minute)
dt_ = datetime.datetime.combine(d_, t_)
res[setting['Name']][e].append([dt_, irrValue, horr_irrValue])
import scipy
import os
#-- Fix LaTeX for macOS 10.12
os.environ['PATH'] = os.environ['PATH'] + ':/Library/TeX/texbin'
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
import seaborn as sns
#-- Plotting properties
sns.set(style="white", font='serif', rc={'axes.facecolor': '#FFFFFF', 'grid.linestyle': '', 'axes.grid' : False, 'font.family': ['serif'], 'legend.frameon': True})
colors = sns.color_palette()
# fig = plt.figure(1)
fig, ax1 = plt.subplots(figsize=(8, 4))
timestamp = []
total_ir = []
total_ir_h = []
for v in res['Surface A']['0327']:
timestamp.append(v[0] + tzoffset)
total_ir.append(v[1])
total_ir_h.append(v[2])
A_1 = plt.plot(timestamp, total_ir, color=colors[0], linestyle='--', marker='o', markevery=slice(60, 170, 15))
timestamp = []
total_ir = []
total_ir_h = []
for v in res['Surface B']['0327']:
timestamp.append(v[0] + tzoffset)
total_ir.append(v[1])
total_ir_h.append(v[2])
B_1 = plt.plot(timestamp, total_ir, color=colors[1], linestyle='--', marker='v', markevery=slice(60, 170, 15))
# H_1 = plt.plot(timestamp, total_ir_h, color=colors[2], linestyle='--')
timestamp = []
total_ir = []
total_ir_h = []
for v in res['Surface A']['0621']:
timestamp.append(v[0] + tzoffset)
total_ir.append(v[1])
total_ir_h.append(v[2])
A_2 = plt.plot(timestamp, total_ir, color=colors[0], linestyle='-', marker='o', markevery=slice(30, 185, 15))
timestamp = []
total_ir = []
total_ir_h = []
for v in res['Surface B']['0621']:
timestamp.append(v[0] + tzoffset)
total_ir.append(v[1])
total_ir_h.append(v[2])
B_2 = plt.plot(timestamp, total_ir, color=colors[1], linestyle='-', marker='v', markevery=slice(30, 185, 15))
# H_2 = plt.plot(timestamp, total_ir_h, color=colors[2], linestyle='-')
ddd = scipy.zeros(len(total_ir_h))
#plt.fill_between(timestamp, total_ir_h, where=total_ir_h>=ddd, interpolate=True, color='k')
import matplotlib.dates as md
xfmt = md.DateFormatter('%H:%M')
ax1.xaxis.set_major_formatter(xfmt)
sns.despine(left=False, bottom=False)
# ax1.tick_params(axis='x', which='major', bottom='on')
# ax1.set_xlim([734869.16, 734869.84])
ax1.set_ylim([0, 1050])
# plt.title('Clear-sky global solar irradiance for Delft, the Netherlands', size=15)
plt.xlabel('Local time', size=14)
#plt.ylabel(u'Total solar irradiation (W/m²)', size=14)
plt.ylabel(r'Global solar irradiance (W/m$^{2}$)', size=14)
plt.legend(['A on 27 Mar', 'B on 27 Mar', 'A on 21 Jun', 'B on 21 Jun'], loc='upper center', bbox_to_anchor=(0.5, 1.15), fancybox=1, shadow=0, ncol=4, numpoints=1, prop={'size':12})
plt.savefig('dailyplot.png', bbox_inches='tight', dpi=300)
plt.savefig('dailyplot.pdf', bbox_inches='tight')
# plt.show()
| 1.695313
| 2
|
controller.py
|
woxiang-H/auto-beaver
| 0
|
12782172
|
import sys
import os
from settings import beaver_broker_ip, beaver_broker_port, autotestdir, beaver_datanode_file, gflagsfile, config_path, log_dir, index_forsearch, pb_forsearch
import psutil
import time
import numpy as np
import requests
#MEM_MAX = psutil.virtual_memory().total
MEM_MAX = 0.8*32*1024*1024*1024 # memory size of tikv node, not current PC
#------------------knob controller------------------
# disable_auto_compactions
def set_disable_auto_compactions(ip, port, val):
cmd="./tikv-ctl --host "+ip+":"+port+" modify-tikv-config -m kvdb -n default.disable_auto_compactions -v "+str(val)
res=os.popen(cmd).read() # will return "success"
return(res)
knob_set=\
{
"--max_concurrency_tasks_per_search":
{
"changebyyml": True,
"set_func": None,
"minval": 0, # if type==int, indicate min possible value
"maxval": 0, # if type==int, indicate max possible value
"enumval": [4, 6, 8], # if type==enum, list all valid values
"type": "enum", # int / enum
"default": 0 # default value
},
"--max_per_search_ram":
{
"changebyyml": True,
"set_func": None,
"minval": 0, # if type==int, indicate min possible value
"maxval": 0, # if type==int, indicate max possible value
"enumval": [198], # if type==enum, list all valid values
"type": "enum", # int / enum
"default": 0 # default value
},
"--max_per_sub_search_ram":
{
"changebyyml": True,
"set_func": None,
"minval": 0, # if type==int, indicate min possible value
"maxval": 0, # if type==int, indicate max possible value
"enumval": [99], # if type==enum, list all valid values
"type": "enum", # int / enum
"default": 0 # default value
},
"--block_ids_per_batch":
{
"changebyyml": True,
"set_func": None,
"minval": 0, # if type==int, indicate min possible value
"maxval": 0, # if type==int, indicate max possible value
"enumval": [16, 18, 20], # if type==enum, list all valid values
"type": "enum", # int / enum
"default": 0 # default value
},
"--lease_timeout":
{
"changebyyml": True,
"set_func": None,
"minval": 0, # if type==int, indicate min possible value
"maxval": 0, # if type==int, indicate max possible value
"enumval": [4, 8, 16, 32, 64], # if type==enum, list all valid values
"type": "enum", # int / enum
"default": 0 # default value
},
"--enable_query_cache":
{
"changebyyml": True,
"set_func": None,
"minval": 0, # if type==int, indicate min possible value
"maxval": 0, # if type==int, indicate max possible value
"enumval": ['false', 'true'], # if type==enum, list all valid values
"type": "bool", # int / enum
"default": 0 # default value
},
}
#------------------metric controller------------------
def read_write_throughput(ip, port):
return(0) # DEPRECATED FUNCTION: throughput is instant and could be read from go-ycsb. No need to read in this function
def read_write_latency(ip, port):
return(0) # DEPRECATED FUNCTION: latency is instant and could be read from go-ycsb. No need to read in this function
def read_get_throughput(ip, port):
return(0) # DEPRECATED FUNCTION: throughput is instant and could be read from go-ycsb. No need to read in this function
def read_get_latency(ip, port):
return(0) # DEPRECATED FUNCTION: latency is instant and could be read from go-ycsb. No need to read in this function
def read_scan_throughput(ip, port):
return(0) # DEPRECATED FUNCTION: throughput is instant and could be read from go-ycsb. No need to read in this function
def read_scan_latency(ip, port):
return(0) # DEPRECATED FUNCTION: latency is instant and could be read from go-ycsb. No need to read in this function
def read_store_size(ip, port):
return(0)
def read_compaction_cpu(ip, port):
cmd="ps -aux|grep beaver_datanode|grep -v 'grep'|grep -v '/bin/sh'|awk -F' *' '{print $3}'"
res=os.popen(cmd).read()
if len(res) == 0:
return 0
else:
return(res)
def read_compaction_mem(ip, port):
cmd="ps -aux|grep beaver_datanode|grep -v 'grep'|grep -v '/bin/sh'|awk -F' *' '{print $4}'"
res=os.popen(cmd).read()
if len(res) == 0:
return 0
else:
return(res)
def read_search_latency(ip, port):
url = "http://"+ip+":"+port+"/_search?index="+index_forsearch+"&sid=test&rpc_timeout=60"
data = pb_forsearch
testnum = 20
num = 100
restime = []
# costime = []
for i in range(num + testnum):
start_api = beaverrequest(url, data)
if i >= testnum:
# restime.append(start_api[1])
restime.append(start_api[0]["timecost"])
sortedRestime = sorted(restime)
newrestime = sortedRestime[:-10]
return sum(newrestime) / len(newrestime)
def beaverrequest(url, data):
r = requests.post(url, data=data)
return [r.json(), r.elapsed.total_seconds(), r.status_code]
metric_set=\
{"write_throughput":
{
"read_func": read_write_throughput,
"lessisbetter": 0, # whether less value of this metric is better(1: yes)
"calc": "ins", #incremental
},
"write_latency":
{
"read_func": read_write_latency,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #instant
},
"get_throughput":
{
"read_func": read_get_throughput,
"lessisbetter": 0, # whether less value of this metric is better(1: yes)
"calc": "ins", #incremental
},
"get_latency":
{
"read_func": read_get_latency,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #instant
},
"scan_throughput":
{
"read_func": read_scan_throughput,
"lessisbetter": 0, # whether less value of this metric is better(1: yes)
"calc": "ins", #incremental
},
"scan_latency":
{
"read_func": read_scan_latency,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #instant
},
"store_size":
{
"read_func": read_store_size,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #instant
},
"compaction_cpu":
{
"read_func": read_compaction_cpu,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #incremental
},
"compaction_mem":
{
"read_func": read_compaction_mem,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #incremental
},
"search_latency":
{
"read_func": read_search_latency,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #incremental
},
}
#------------------workload controller------------------
def run_workload(wl_type):
return(None)
def load_workload(wl_type):
return(None)
#------------------common functions------------------
def set_tikvyml(knob_sessname, knob_val):
ymldir=os.path.join(autotestdir,"conf","beaver_test.gflags_new")
tmpdir=os.path.join(autotestdir,"conf","beaver_test.gflags")
if not os.path.exists(os.path.dirname(tmpdir)):
os.makedirs(os.path.dirname(tmpdir))
os.popen("cp "+gflagsfile+" "+tmpdir).read()
with open(tmpdir, 'r') as read_file, open(ymldir, 'w') as write_file:
dic={}
for line in read_file:
value = line.strip().split("=")
if len(value) > 1:
dic[value[0]] = value[1]
if(knob_set[knob_sessname]['type']=='enum'):
idx=knob_val
knob_val=knob_set[knob_sessname]['enumval'][idx]
if(knob_set[knob_sessname]['type']=='bool'):
if(knob_val==0):
knob_val='false'
else:
knob_val='true'
if(knob_sessname=='--max_shard_size'):
knob_val=str(knob_val)+"g"
if(knob_sessname=='--max_per_search_ram' or knob_sessname=='--max_per_sub_search_ram'):
knob_val=str(knob_val)+"m"
if(knob_sessname in dic):
dic[knob_sessname] = knob_val
else:
return('failed')
print("set_beaver_datanode_gflags:: ",knob_sessname, knob_val)
for kkk in dic:
write_file.write(kkk+"="+str(dic[kkk])+'\n')
# os.popen("rm "+tmpdir+" && "+"mv "+ymldir+" "+tmpdir)
os.remove(tmpdir)
os.rename(ymldir, tmpdir)
time.sleep(0.5)
return('success')
# if(knob_name=='block-size'):
# knob_val=str(knob_val)+"KB"
# if(knob_name=='write-buffer-size' or knob_name=='max-bytes-for-level-base' or knob_name=='target-file-size-base'):
# knob_val=str(knob_val)+"MB"
# if(knob_name in tmpcontent[knob_sess[0]][knob_sess[1]]): # TODO: only support 2 level of knob_sess currently
# tmpcontent[knob_sess[0]][knob_sess[1]][knob_name]=knob_val
# else:
# return('failed')
# print("set_tikvyml:: ",knob_sessname, knob_sess, knob_name, knob_val)
# ymlf=open(ymldir, 'w')
# yaml.dump(tmpcontent, ymlf, Dumper=yaml.RoundTripDumper)
# os.popen("rm "+tmpdir+" && "+"mv "+ymldir+" "+tmpdir)
# time.sleep(0.5)
# return('success')
def set_knob(knob_name, knob_val):
changebyyml=knob_set[knob_name]["changebyyml"]
if(changebyyml):
res=set_tikvyml(knob_name, knob_val)
else:
func=knob_set[knob_name]["set_func"]
res=func(beaver_broker_ip, beaver_broker_port, knob_val)
return res
def read_knob(knob_name, knob_cache):
res=knob_cache[knob_name]
return res
def read_metric(metric_name, rres=None):
if(rres!=None):
rl=rres.split('\n')
rl.reverse()
if(metric_name=="write_latency"):
i=0
while((not rl[i].startswith('UPDATE ')) and (not rl[i].startswith('INSERT '))):
i+=1
dat=rl[i][rl[i].find("Avg(us):") + 9:].split(",")[0]
dat=int(dat)
return(dat)
elif(metric_name=="get_latency"):
i=0
while(not rl[i].startswith('READ ')):
i+=1
dat=rl[i][rl[i].find("Avg(us):") + 9:].split(",")[0]
dat=int(dat)
return(dat)
elif(metric_name=="scan_latency"):
i=0
while(not rl[i].startswith('SCAN ')):
i+=1
dat=rl[i][rl[i].find("Avg(us):") + 9:].split(",")[0]
dat=int(dat)
return(dat)
elif(metric_name=="write_throughput"):
i=0
while((not rl[i].startswith('UPDATE ')) and (not rl[i].startswith('INSERT '))):
i+=1
dat=rl[i][rl[i].find("OPS:") + 5:].split(",")[0]
dat=float(dat)
return(dat)
elif(metric_name=="get_throughput"):
i=0
while(not rl[i].startswith('READ ')):
i+=1
dat=rl[i][rl[i].find("OPS:") + 5:].split(",")[0]
dat=float(dat)
return(dat)
elif(metric_name=="scan_throughput"):
i=0
while(not rl[i].startswith('SCAN ')):
i+=1
dat=rl[i][rl[i].find("OPS:") + 5:].split(",")[0]
dat=float(dat)
return(dat)
func=metric_set[metric_name]["read_func"]
res=func(beaver_broker_ip, beaver_broker_port)
return res
def init_knobs():
# if there are knobs whose range is related to PC memory size, initialize them here
pass
def calc_metric(metric_after, metric_before, metric_list):
num_metrics = len(metric_list)
new_metric = np.zeros([1, num_metrics])
for i, x in enumerate(metric_list):
if(metric_set[x]["calc"]=="inc"):
new_metric[0][i]=metric_after[0][i]-metric_before[0][i]
elif(metric_set[x]["calc"]=="ins"):
new_metric[0][i]=metric_after[0][i]
return(new_metric)
# def restart_db():
# #cmd="cd /home/tidb/tidb-ansible/ && ansible-playbook unsafe_cleanup_data.yml"
# dircmd="cd "+ autotestdir + " && "
# clrcmd="ansible-playbook unsafe_cleanup_data.yml"
# depcmd="ansible-playbook deploy.yml"
# runcmd="ansible-playbook start.yml"
# ntpcmd="ansible-playbook -i hosts.ini deploy_ntp.yml -u tidb -b" #need sleep 10s after ntpcmd
# print("-------------------------------------------------------")
# clrres = os.popen(dircmd+clrcmd).read()
# if("Congrats! All goes well" in clrres):
# print("unsafe_cleanup_data finished, res == "+clrres.split('\n')[-2])
# else:
# print(clrres)
# print("unsafe_cleanup_data failed")
# exit()
# print("-------------------------------------------------------")
# ntpres = os.popen(dircmd + ntpcmd).read()
# time.sleep(10)
# if ("Congrats! All goes well" in ntpres):
# print("set ntp finished, res == " + ntpres.split('\n')[-2])
# else:
# print(ntpres)
# print("set ntp failed")
# exit()
# print("-------------------------------------------------------")
# depres = os.popen(dircmd + depcmd).read()
# if ("Congrats! All goes well" in depres):
# print("deploy finished, res == "+depres.split('\n')[-2])
# else:
# print(depres)
# print("deploy failed")
# exit()
# print("-------------------------------------------------------")
# runres = os.popen(dircmd + runcmd).read()
# if ("Congrats! All goes well" in runres):
# print("start finished, res == "+runres.split('\n')[-2])
# else:
# print(runres)
# print("start failed")
# exit()
# print("-------------------------------------------------------")
def restart_beaver_datanode():
dircmd="cd "+ autotestdir + " && "
stopcmd="ps -ef|grep beaver_datanode|grep -v 'grep'|awk -F' *' '{print $2}'|xargs kill"
querycmd="ps -ef|grep beaver_datanode|grep -v 'grep'|awk -F' *' '{print $2}'"
beaver_conf=os.path.join(autotestdir,"conf","beaver_datanode.gflags")
test_conf=os.path.join(autotestdir,"conf","beaver_test.gflags")
startcmd=beaver_datanode_file+" --flagfile="+beaver_conf+" --config_path="+config_path+" --log_dir="+log_dir+" > /dev/null 2>&1"
print("-----------------------------stop beaver datanode--------------------------")
stopres = os.popen(stopcmd).read()
if len(os.popen(querycmd).read()) != 0:
for i in range(5):
time.sleep(2)
psres = os.popen(querycmd).read()
if len(psres) == 0 :
print("Beaver has been closed successfully!")
break
else:
print("Waiting beaver to close, pid is %s" % psres)
if i == 4:
print("Beaver close failed!")
exit()
else:
print("Beaver closed successfully!")
print("-----------------------------replace config file--------------------------")
if os.path.exists(beaver_conf):
os.remove(beaver_conf)
replaceres = os.popen("cp "+test_conf+" "+beaver_conf).read()
if len(replaceres) == 0:
print("replace config file finished!")
else:
print(replaceres)
print("replace config file failed!")
exit()
print("-----------------------------start beaver datanode--------------------------")
startres = os.popen(startcmd)
beaver_url = "http://"+beaver_broker_ip+":"+beaver_broker_port+"/_search?index="+index_forsearch+"&sid=test&rpc_timeout=60"
for i in range(20):
time.sleep(10)
curlres = requests.post(beaver_url, data=pb_forsearch).json()
if "result" in curlres and curlres['result'] == False:
print("Waiting beaver datanode to be available...")
else:
print("Beaver datanode is available!")
break
if i == 19:
print(curlres)
print("Beaver start failed!")
exit()
print("---------------------------------------------------------------------------")
| 1.96875
| 2
|
process.py
|
JamesMilnerUK/facebook-pop-viz
| 2
|
12782173
|
<reponame>JamesMilnerUK/facebook-pop-viz
from osgeo import gdal, ogr
from os import remove, getcwd
from os.path import isfile, join, basename
import sys
import numpy as np
import traceback
from shutil import copyfile
# this allows GDAL to throw Python Exceptions
gdal.UseExceptions()
def single_band( input_file, output_file ):
ds = gdal.Open(input_file)
band = ds.GetRasterBand(1)
block_sizes = band.GetBlockSize()
x_block_size = block_sizes[0]
y_block_size = block_sizes[1]
xsize = band.XSize
ysize = band.YSize
format = "GTiff"
driver = gdal.GetDriverByName( format )
dst_ds = driver.Create(output_file, xsize, ysize, 1, gdal.GDT_Byte )
dst_ds.SetGeoTransform(ds.GetGeoTransform())
dst_ds.SetProjection(ds.GetProjection())
band = dst_ds.GetRasterBand(1)
array = band.ReadAsArray(0,0,xsize,ysize).astype(np.float32)
band.WriteArray(array)
dst_ds = None
return output_file
def downsample_output (input_file, downsample):
ds_in = gdal.Open(input_file)
drv = gdal.GetDriverByName( "GTiff" )
output_file = input_tif.replace(".tif", "") + "_downsample.tif"
band = ds_in.GetRasterBand(1)
high_res = band.ReadAsArray(0,0,ds_in.RasterXSize, ds_in.RasterYSize).astype(np.float32)
random_prefix = ds_in.RasterCount
# Check for a temporary file
random_prefix = 0
while isfile(str(random_prefix) + "_temp.tif") == True:
random_prefix += 1
temp_file = str(random_prefix) + "_temp.tif"
print "Making temporary file " + temp_file + "..."
ds_out = drv.Create(temp_file, ds_in.RasterXSize, ds_in.RasterYSize, 1, gdal.GDT_Byte )
ds_out.SetGeoTransform( ds_in.GetGeoTransform())
ds_out.SetProjection ( ds_in.GetProjectionRef() )
ds_out.GetRasterBand(1).WriteArray ( high_res )
geoTransform = ds_in.GetGeoTransform()
drv = gdal.GetDriverByName( "GTiff" )
resampled = drv.Create( output_file , ds_in.RasterXSize/downsample, ds_in.RasterYSize/downsample, 1, gdal.GDT_Byte )
transform = ( geoTransform[0], geoTransform[1]*downsample, geoTransform[2], geoTransform[3],geoTransform[4], geoTransform[5]*downsample )
resampled.SetGeoTransform( transform )
resampled.SetProjection ( ds_in.GetProjectionRef() )
# We can set some meta data for use in the client
transform = resampled.GetGeoTransform()
width = resampled.RasterXSize
height = resampled.RasterYSize
minx = transform[0]
maxx = transform[0] + width * transform[1] + height * transform[2]
miny = transform[3] + width * transform[4] + height * transform[5]
maxy = transform[3]
resampled.SetMetadata({
"minX": str(minx), "maxX": str(maxx),
"minY": str(miny), "maxY": str(maxy)
})
print "Extent: "
print "Min X", str(minx)
print "Min Y", str(miny)
print "Max X", str(maxx)
print "Max Y", str(maxy)
gdal.RegenerateOverviews ( ds_out.GetRasterBand(1), [ resampled.GetRasterBand(1) ], 'mode' )
resampled.GetRasterBand(1).SetNoDataValue ( 0 )
resampled = None
ds_out = None
ds_in = None
print "Removing temporary file " + temp_file + "..."
try:
remove(temp_file)
except OSError:
pass
return output_file
if __name__ == '__main__':
try:
if len(sys.argv) == 1:
print "Input file not specified"
else:
input_tif = str(sys.argv[1])
print "Resampling", input_tif
try:
oneband = input_tif.replace(".tif", "") + "_oneband.tif"
single_band(input_tif, oneband)
except MemoryError:
print "Raster was too large to to put into memory during extract to single band"
if len(sys.argv) != 3:
downsample = 5
else:
downsample = int(sys.argv[2])
try:
output_file = downsample_output(input_tif, downsample)
copyfile(output_file, join(getcwd(), "data", basename(output_file)))
print "...Done!"
except MemoryError:
print "Raster was too large to put into memory during resampling"
except:
error = sys.exc_info()[0]
print "There was an error: ", error, "\n"
print traceback.format_exc()
| 2.421875
| 2
|
bionumpy/string_matcher.py
|
knutdrand/bionumpy
| 0
|
12782174
|
<filename>bionumpy/string_matcher.py<gh_stars>0
from bionumpy.rollable import RollableFunction
from bionumpy.sequences import as_sequence_array
import itertools
import numpy as np
import bionumpy as bnp
import re
from bionumpy.sequences import Sequence
class StringMatcher(RollableFunction):
def __init__(self, matching_sequence, encoding):
self._encoding = encoding
self._matching_sequence_array = as_sequence_array(matching_sequence, encoding=encoding)
@property
def window_size(self):
return len(self._matching_sequence_array)
def __call__(self, sequence):
return np.all(sequence == self._matching_sequence_array, axis=-1)
class FixedLenRegexMatcher(RollableFunction):
def __init__(self, matching_regex, encoding):
self._sub_matchers = construct_fixed_len_regex_matchers(matching_regex, encoding)
@property
def window_size(self):
return self._sub_matchers[0].window_size
def __call__(self, sequence):
union_of_sub_matches = self._sub_matchers[0](sequence)
for matcher in self._sub_matchers:
union_of_sub_matches = np.logical_or(union_of_sub_matches, matcher(sequence))
return union_of_sub_matches
return np.all(sequence == self._matching_sequence_array, axis=-1)
class MaskedStringMatcher(RollableFunction):
def __init__(self, matching_sequence_array, mask):
#assert isinstance(matching_sequence_array, Sequence), type(matching_sequence_array)
assert isinstance(mask, np.ndarray)
assert matching_sequence_array.shape == mask.shape
self._matching_sequence_array = matching_sequence_array
self._mask = mask
@property
def window_size(self):
return len(self._matching_sequence_array)
def __call__(self, sequence):
direct_match = ( sequence == self._matching_sequence_array )
masked_or_match = np.logical_or(direct_match, self._mask)
return np.all(masked_or_match, axis=-1)
def construct_fixed_len_regex_matchers(matching_regex : str, encoding):
r = re.compile('\[[^\]]+\]')
hit = r.search(matching_regex)
if hit is None:
return [construct_wildcard_matcher(matching_regex, encoding)]
else:
start, end = hit.span()
pre, post = matching_regex[0: start], matching_regex[end:]
return list(itertools.chain.from_iterable(
[construct_fixed_len_regex_matchers(pre+symbol+post, encoding)
for symbol in matching_regex[start+1 : end-1]]))
def construct_wildcard_matcher(matching_regex : str, encoding):
mask = np.array( [symbol=='.' for symbol in matching_regex] )
assert encoding == bnp.encodings.ACTGEncoding, "NotImplemented: Support for other encodings awaits a generic way to replace '.'with an arbitrary symbol supported by the encoding"
base_seq = as_sequence_array( matching_regex.replace('.', 'A'), encoding=encoding )
return MaskedStringMatcher(base_seq, mask)
| 2.515625
| 3
|
tests/fractalmusic/deepcopy/test_fm_deepcopy.py
|
alexgorji/musurgia
| 0
|
12782175
|
import os
from musicscore.musictree.treescoretimewise import TreeScoreTimewise
from musurgia.unittest import TestCase
from musurgia.fractaltree.fractalmusic import FractalMusic
path = str(os.path.abspath(__file__).split('.')[0])
class Test(TestCase):
def setUp(self) -> None:
self.score = TreeScoreTimewise()
fm = FractalMusic(tempo=60, quarter_duration=10, reading_direction='vertical')
fm.add_layer()
fm.add_layer()
self.fm = fm.get_children()[1]
self.deep_copied = self.fm.__deepcopy__()
def test(self, exp=None, act=None):
if not exp:
exp = self.fm
if not act:
act = self.deep_copied
self.assertEqual(exp.value, act.value)
self.assertEqual(exp.proportions, act.proportions)
self.assertEqual(exp.value, act.value)
self.assertEqual(exp.proportions, act.proportions)
self.assertEqual(exp.tree_permutation_order, act.tree_permutation_order)
self.assertEqual(exp.fractal_order, act.fractal_order)
self.assertEqual(exp.reading_direction, act.reading_direction)
self.assertEqual(exp._name, act._name)
self.assertEqual(exp.tree_directions, act.tree_directions)
self.assertEqual(exp.tempo, act.tempo)
self.assertEqual(exp.midi_value, act.midi_value)
def test_1(self):
self.assertIsNone(self.deep_copied.up)
def test_2(self):
self.assertNotEqual(self.deep_copied.name, self.fm.name)
def test_3(self):
for leaf in self.fm.traverse_leaves():
leaf.chord.add_words(leaf.fractal_order)
copied = self.fm.__deepcopy__()
copied.get_simple_format().to_stream_voice().add_to_score(self.score)
xml_path = path + '_test_3.xml'
self.score.write(xml_path)
expected_path = path + '_test_3_expected.xml'
expected_score = TreeScoreTimewise()
self.fm.get_simple_format().to_stream_voice().add_to_score(expected_score)
expected_score.write(expected_path)
self.assertCompareFiles(xml_path, expected_path)
def test_deep_copied_child_midi_values(self):
fm = FractalMusic(proportions=[1, 2, 3, 4], tree_permutation_order=[3, 1, 4, 2], quarter_duration=20,
tempo=70)
fm.midi_generator.midi_range = [36, 60]
fm.add_layer()
selected_node = fm.get_children()[0]
copied_node = selected_node.__deepcopy__()
copied_node.add_layer()
actual = [node.midi_value for node in copied_node.get_children()]
selected_node.add_layer()
expected = [node.midi_value for node in selected_node.get_children()]
self.assertEqual(expected, actual)
| 2.46875
| 2
|
chisch/utils/jsonutils.py
|
zhaowenxiang/chisch
| 0
|
12782176
|
<filename>chisch/utils/jsonutils.py
#! -*- coding: utf-8 -*-
import simplejson
from django.db import models
from dss.Serializer import serializer
from django.db.models.query import QuerySet
def to_json(obj, **kwargs):
exclude_attr = kwargs['exclude_attr'] if kwargs and 'exclude_attr' in kwargs else []
include_attr = kwargs['include_attr'] if kwargs and 'include_attr' in kwargs else []
return serializer(
obj,
datetime_format='string',
output_type='json',
exclude_attr=exclude_attr,
include_attr=include_attr)
| 2.265625
| 2
|
dashboard/processors.py
|
JayaramanSudhakar/yorek-ssis-dashboard
| 0
|
12782177
|
from dashboard import app
from distutils.util import strtobool
@app.context_processor
def utility_processor():
def tile_color(kpi_value):
result = 'green'
#if (kpi_value == 0) :
# result = 'primary'
if (kpi_value > 0) :
result = 'red'
return result
return dict(tile_color = tile_color)
@app.context_processor
def utility_processor():
def tile_color_inv(kpi_value):
result = 'green'
#if (kpi_value == 0) :
# result = 'primary'
if (kpi_value < 0) :
result = 'red'
return result
return dict(tile_color_inv = tile_color_inv)
@app.context_processor
def utility_processor():
def package_status_class(status):
result = {
0 : 'default',
1 : 'default',
2 : 'info',
3 : 'danger',
4 : 'danger',
5 : 'default',
6 : 'danger',
7 : 'success',
8 : 'warning',
9 : 'default'
}
return result[status]
return dict(package_status_class = package_status_class)
@app.context_processor
def utility_processor():
def executable_status_class(status):
result = {
0 : 'success',
1 : 'danger',
2 : 'default',
3 : 'danger'
}
return result[status]
return dict(executable_status_class = executable_status_class)
@app.context_processor
def utility_processor():
def boolean_to_check(value):
result = ""
if (value == 1):
result = "check-"
return result
return dict(boolean_to_check = boolean_to_check)
| 2.234375
| 2
|
tests/trees_tests/splay_tree_test.py
|
warmachine028/datastax
| 5
|
12782178
|
<filename>tests/trees_tests/splay_tree_test.py
from __future__ import annotations
import random
import unittest
from datastax.errors import (
NodeNotFoundWarning,
DeletionFromEmptyTreeWarning,
DuplicateNodeWarning
)
from datastax.trees import SplayTree
from tests.trees_tests.common_helper_functions import (
inorder_items, level_wise_items, check_bst_property
)
class TestSplayTree(unittest.TestCase):
def setUp(self) -> None:
self.s_tree = SplayTree()
self.test_cases = 100
self.max_sample_size = 10
def test_array_representation(self):
testcases = [
[*range(10)],
[None, 10],
[10, None, None],
['root', None, None, 'child']
]
results = [
[*range(9, -1, -1)],
[],
[10],
['child', 'root']
]
for testcase, result in zip(testcases, results):
tree = SplayTree(testcase)
self.assertEqual(result, tree.array_repr)
def test_construction(self):
numbers = range(1, 1000)
for _ in range(self.test_cases):
sample_size = random.randint(1, self.max_sample_size)
sample = random.sample(numbers, sample_size)
tree = SplayTree(sample)
self.assertEqual(sorted(sample), inorder_items(tree))
# Check deletion after insertion
to_delete = sample[random.randint(0, len(sample)) - 1]
tree.delete(to_delete)
check_bst_property(tree.root)
# Test insertion after deletion
tree.insert(to_delete)
check_bst_property(tree.root)
self.assertEqual(to_delete, tree.root.data)
def test_delete(self):
# Test deletion from empty Tree
with self.assertWarns(DeletionFromEmptyTreeWarning):
tree = SplayTree()
tree.delete()
self.assertEqual([], level_wise_items(tree))
sample = random.sample(range(100), self.max_sample_size)
tree = SplayTree(sample)
# Attempting deletion of invalid item from empty tree
with self.assertWarns(NodeNotFoundWarning):
tree.delete(404)
# The largest node is slayed
self.assertEqual(max(sample), tree.root.data)
temp = list(sample)
for item in sample:
tree.delete(item)
temp.remove(item)
self.assertEqual(sorted(temp), inorder_items(tree))
# checking Emptiness
self.assertTrue([] == tree.array_repr == level_wise_items(tree))
# Attempting deletion from empty tree
with self.assertWarns(DeletionFromEmptyTreeWarning):
tree.delete(404)
# Attempt insertion after deletion
insertion_order = random.sample(range(10), self.max_sample_size)
for i, item in enumerate(insertion_order):
tree.insert(item)
self.assertEqual(item, tree.root.data)
self.assertEqual(sorted(insertion_order[:i + 1]),
inorder_items(tree))
if not i:
self.assertEqual(item, tree.root.data)
def test_insertion(self):
numbers = range(1, 10000)
for _ in range(self.test_cases):
tree = self.s_tree
sample_size = random.randint(1, self.max_sample_size)
sample = random.sample(numbers, sample_size)
for item in sample:
tree.insert(item)
# Check deletion after insertion
to_delete = random.choice(sample)
tree.delete(to_delete)
temp = list(sample)
temp.remove(to_delete)
self.assertEqual(sorted(temp), inorder_items(tree))
# Again perform insertion
tree.insert(to_delete)
self.assertEqual(sorted(sample), inorder_items(tree))
self.assertTrue(check_bst_property(tree.root))
# Resetting the tree
tree._root = None
def test_insertion_duplicate(self):
# Test duplicate node insertion: must splay last accessed node
numbers = range(1, 1000)
sample_size = random.randint(1, self.max_sample_size)
sample = random.sample(numbers, sample_size)
duplicate = random.choice(sample)
tree = SplayTree(sample)
with self.assertWarns(DuplicateNodeWarning):
tree.insert(duplicate)
self.assertEqual(sorted(sample), inorder_items(tree))
self.assertEqual(duplicate, tree.root.data)
def test_search_empty_tree(self):
with self.assertWarns(NodeNotFoundWarning):
self.s_tree.search(404)
# Filling the tree
sample = random.sample(range(1, 100), 5)
for item in sample:
self.s_tree.insert(item)
# Emptying the tree
for item in sample:
self.s_tree.delete(item)
# Then performing search
with self.assertWarns(NodeNotFoundWarning):
self.s_tree.search(404)
def test_search_invalid_item(self):
# Filling the tree
sample = random.sample(range(1, 100), 5)
for item in sample:
self.s_tree.insert(item)
with self.assertWarns(NodeNotFoundWarning):
self.s_tree.search(404)
self.assertEqual(max(sample), self.s_tree.root.data)
self.s_tree.search(-1)
self.assertEqual(min(sample), self.s_tree.root.data)
def test_deletion_empty_tree(self):
with self.assertWarns(DeletionFromEmptyTreeWarning):
self.s_tree.delete(404)
# Filling the tree
sample = random.sample(range(1, 100), 5)
for item in sample:
self.s_tree.insert(item)
# Emptying the tree
for item in sample:
self.s_tree.delete(item)
with self.assertWarns(NodeNotFoundWarning):
self.s_tree.search(404)
# perform insertion after that
item = random.choice(sample)
self.s_tree.insert(item)
self.assertEqual(item, self.s_tree.root.data)
def test_deletion_of_root(self):
for _ in range(self.test_cases):
# Filling the tree
sample = random.sample(range(1, 100), 5)
for item in sample:
self.s_tree.insert(item)
# Emptying the tree
for _ in sample:
self.s_tree.delete(self.s_tree.root.data)
# Tree must be empty
self.assertEqual(None, self.s_tree.root)
self.assertEqual([], level_wise_items(self.s_tree))
# perform insertion after that
item = random.choice(sample)
self.s_tree.insert(item)
self.assertEqual(item, self.s_tree.root.data)
self.s_tree._root = None
def test_delete_invalid_item(self):
for _ in range(self.test_cases):
# Filling the tree
sample = random.sample(range(1, 100), 5)
for item in sample:
self.s_tree.insert(item)
with self.assertWarns(NodeNotFoundWarning):
self.s_tree.delete(404)
self.assertEqual(max(sample), self.s_tree.root.data)
self.s_tree.delete(-1)
self.assertEqual(min(sample), self.s_tree.root.data)
self.assertEqual(sorted(sample), inorder_items(self.s_tree))
self.assertTrue(check_bst_property(self.s_tree.root))
# resetting the tree
self.s_tree._root = None
def test_search(self):
sample = random.sample(range(10), 10)
item = random.choice(sample)
tree = SplayTree(sample)
self.assertEqual(item, tree.search(item).data)
with self.assertWarns(NodeNotFoundWarning):
self.assertNotEqual(11, tree.search(11).data)
self.assertTrue(bool(tree.search(random.choice(sample))))
self.assertEqual(sorted(sample), inorder_items(tree))
self.assertTrue(check_bst_property(tree.root))
if __name__ == '__main__':
unittest.main()
| 2.625
| 3
|
mlight/session.py
|
GitHK/mlight
| 0
|
12782179
|
<filename>mlight/session.py
from collections import deque
import motor.motor_asyncio
class DBSession:
def __init__(self, mongo_uri, database_name):
self.mongo_uri = mongo_uri
self.database_name = database_name
self.client = motor.motor_asyncio.AsyncIOMotorClient(mongo_uri)
self.registered_models = deque()
@property
def database(self):
""" Returns the motor database object. """
return self.client[self.database_name]
def register_model(self, model):
""" Register models to session and also create indexes. """
if model not in self.registered_models:
self.registered_models.append(model)
async def create_indexes(self):
"""
Creates indexes on the registered collections.
Note: If you removed an index it must be deleted from the database manually!
"""
for collection in self.registered_models:
await collection.create_indexes()
def clear_all(self):
""" Removes all the documents ready to be flushed. """
for collection in self.registered_models:
collection.clear_all()
async def flush_all(self, check_integrity=True):
"""
Stores the current modified items to the database.
:param check_integrity: if False skips the mapping check on each document.
"""
for obj in self.registered_models:
await obj.flush_all(check_integrity)
| 2.5
| 2
|
tests/shops/rsonline.py
|
MrLeeh/shopy
| 0
|
12782180
|
<reponame>MrLeeh/shopy
"""
Copyright 2015 by <NAME>
"""
from shopy.shop import Shop
with open('../shops/rsonline.json', 'r') as f:
shop = Shop.from_json(f)
iterator = shop.find("Batterien AAA 1.5V")
for item in iterator:
print(item, item.images)
| 2.765625
| 3
|
src/utils/formatting.py
|
AdityaSidharta/ML_server_framework
| 1
|
12782181
|
<reponame>AdityaSidharta/ML_server_framework
import pandas as pd
def df_na_value(df, schema):
column_names = schema.get_col_names()
for column_name in column_names:
if column_name in df.columns:
column_na_value = schema.get_col_na_value(column_name)
df[column_name] = df[column_name].fillna(column_na_value)
return df
def df_format(df, schema):
"""converting all columns within a dataframe to have a certain type, as defined in int_cols, float_cols, str_cols,
and date_cols"""
int_cols = schema.get_int_cols()
float_cols = schema.get_float_cols()
str_cols = schema.get_str_cols()
date_cols = schema.get_date_cols()
time_cols = schema.get_time_cols()
for column_name in int_cols:
if column_name in df.columns:
df[column_name] = df[column_name].astype(int)
for column_name in float_cols:
if column_name in df.columns:
df[column_name] = df[column_name].astype(float)
for column_name in str_cols:
if column_name in df.columns:
df[column_name] = df[column_name].astype(str)
for column_name in date_cols:
if column_name in df.columns:
date_format = schema.get_col_format(column_name)
df[column_name] = pd.to_datetime(df[column_name], format=date_format)
for column_name in time_cols:
if column_name in df.columns:
time_format = schema.get_col_format(column_name)
df[column_name] = pd.to_datetime(df[column_name], format=time_format)
return df
| 2.96875
| 3
|
tools/SolBinding/Config/BaseConfig.py
|
wzhengsen/engine-x
| 0
|
12782182
|
<gh_stars>0
#!/usr/bin/env python3
# Copyright (c) 2021 - wzhengsen
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from typing import Dict
from Util.EnvChecker import EnvChecker
from Util.Functions import ExceptImport
try:
import lupa
except:
lupa = ExceptImport("lupa")
class NotFoundFileException(Exception):
def __str__(self) -> str:
return "未找到某些关键文件或环境变量。"
class BaseConfig(object):
class AnonymousType():
# 不允许匿名枚举。
Ban = 0
# 允许所有的匿名枚举。
All = 1
# 仅允许类中的匿名枚举。
Class = 2
# 仅允许全局的匿名枚举。
Global = 3
def __init__(self):
ndkRoot = EnvChecker.Check_NDK_RootEnv()
defaultInclude = EnvChecker.DefaultIncludePath()
gccToolChain = EnvChecker.Find_GCC_ToolChain()
llvmToolChain = EnvChecker.Find_LLVM_ToolChain()
egx = os.environ.get("ENGINEX_ROOT")
if not ndkRoot or not defaultInclude or not gccToolChain or not llvmToolChain or not egx:
raise NotFoundFileException()
lua = lupa.LuaRuntime(unpack_returned_tuples=True)
lua.execute('package.path = package.path .. ";{}/?.lua"'.format(os.path.join(egx,
"templates/lua-template-default/Resources/src").replace("\\", '/')))
self.LuaConfig = lua.require("OOP.Config")
# Tag用于生成的注册函数中,作为和其它生成文件的区分。
self.Tag = None
# TargetNamespace用于将所有CppNamespace简化成一个lua表名便于访问。
self.TargetNamespace = None
# 指定一组c++命名空间,所有c++类将在符合的命名空间中查找,且可以和相同的类名区分开来。
# 比如:"cocos2d::Label" <-> "cocos2d::ui::Label"
self.CppNameSpace = []
# 允许生成匿名枚举(属于全局或仅属于命名空间的匿名枚举可能在被include时多次生成,谨慎开启)。
self.AllowAnonymous = BaseConfig.AnonymousType.Class
# 允许生成结构体类型(结构体普遍被转换为一个table,没有必要为每个结构体转换为用户类)。
self.AllowStruct = False
# 宏判断,用于某些情况下的条件编译。
self.MacroJudgement = None
# 使用大驼峰命名法(在最后阶段生效,不会影响命名空间)。
self.UpperCamelCase = True
# 是否自动生成属性。
# 当一个方法能够接受一个参数,且前缀符合SetPrefix时,生成set属性;
# 当一个方法能够返回一个非void值,且接受0个参数,前缀也还符合GetPrefix时,生成get属性。
self.AutoProperties = True
# 对于那些没有明确指示有new函数的类型,指定一个基类名,
# 凡是继承此基类的,如果没有new函数,但有构造函数,也会为其生成构造代码。
# 使用类型全名,包含命名空间名。
self.ConstructBase = ""
self.AndroidHeaders = []
self.AndroidFlags = [
"-target", "armv7-none-linux-androideabi",
"-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS",
"-DANDROID",
"-D__ANDROID_API__=14",
"-gcc-toolchain", gccToolChain,
"--sysroot={}/platforms/android-14/arch-arm".format(ndkRoot),
"-idirafter", "{}/sources/android/support/include".format(ndkRoot),
"-idirafter", "{}/sysroot/usr/include".format(llvmToolChain),
"-idirafter", "{}/sysroot/usr/include/arm-linux-androideabi".format(llvmToolChain),
"-I{}/sources/cxx-stl/llvm-libc++/include".format(ndkRoot)
]
dl = os.listdir("{}/lib64/clang".format(llvmToolChain))
if dl:
self.AndroidFlags.append("-idirafter")
self.AndroidFlags.append("{}/lib64/clang/{}/include".format(llvmToolChain, dl[0]))
self.ClangHeaders = []
self.ClangFlags = ["-nostdinc", "-x", "c++", "-std=c++11", "-fsigned-char", "-U__SSE__"]
self.Win32ClangFlags = []
# 额外参数,用于用户扩展。
self.ExtraArgs = []
# clang 参数。
self.ClangArgs = [
*self.AndroidHeaders,
*self.ClangHeaders,
*self.AndroidFlags,
*self.ClangFlags,
*defaultInclude
]
# 欲解析的头文件。
self.Headers = []
# 头文件的查找路径。
self.SearchPaths = []
# 欲生成代码的类(也适用于结构体和枚举类型),可以使用正则表达式。
# 会使用"^$"括起来再进行正则匹配,如:"^Menu*$"。
self.Classes = []
# 欲跳过的类方法(也适用于成员变量)。
# 格式:
# self.Skip = {
# "SkipName" : ["method1","method2"],
# "Skip.*" : ["method.*"]
# }
# 类名和方法名均可使用正则表达式。
self.Skip = {}
# 欲重命名的方法(也适用于成员变量)。
# 格式:
# self.RenameMembers = {
# "renameClass.*" : {"renameMethod" : "newMethod"},
# }
# 类名可以使用正则表达式。
self.RenameMembers = {}
# 欲重命名的类。
# 格式:
# self.RenameClasses = {
# "renameClass" : "newClass",
# "rename.*" : "newClass1"
# }
# 类名可使用正则表达式。
self.RenameClasses = {}
# 欲将单例方法变更为单例属性的对应关系。
# 格式:
# self.InstanceMethods = {
# ".*Test.*" : ("getInstance","destroyInstance"),
# ".*NoDestroyTest.*" : ("getInstance",None)
# }
# 类名和方法名均可使用正则表达式。
# 只有静态方法才生效。
# 变换后,可直接使用单例属性访问单例(原单例方法仍然可以使用),如:
# local inst = MyTest.getInstance();
# local inst = MyTest.Instance;
# MyTest.Instance = nil;--相当于调用MyTest.destroyInstance();
self.InstanceMethods: Dict[str, tuple] = {}
# 当一个类在作为父类时,希望被跳过的类型列表。
self.ParentsClassesSkip = []
# 生成get属性的前缀。
self.GetPrefix = []
# 生成set属性的前缀。
self.SetPrefix = []
| 1.726563
| 2
|
code/arc081_a_01.py
|
KoyanagiHitoshi/AtCoder
| 3
|
12782183
|
<reponame>KoyanagiHitoshi/AtCoder<filename>code/arc081_a_01.py<gh_stars>1-10
from collections import Counter
N=int(input())
A=Counter(list(map(int,input().split())))
x=[0,0]
for a in A:
if A[a]>1:x.append(a)
if A[a]>3:x.append(a)
x.sort()
print(x[-1]*x[-2])
| 3.21875
| 3
|
python/data_structures/stack.py
|
educauchy/algorithms
| 0
|
12782184
|
from typing import List, Optional, Any
class StackOverflow(Exception):
pass
class StackUnderflow(Exception):
pass
class Stack():
def __init__(self, capacity: int = 10):
"""
:param capacity: Maximum capacity of stack
:var top: Index of the current top element
:var container: Container for elements
"""
self.capacity: int = capacity
self.top: Optional[Any] = None
self.container: List[Any] = []
def is_empty(self) -> bool:
return self.top is None
def push(self, item: Any) -> None:
if self.top is not None and self.top + 1 == self.capacity:
raise StackOverflow('Too much elements for the stack')
else:
self.container.append(item)
if self.top is None:
self.top = 0
else:
self.top += 1
def pop(self) -> Any:
if self.top is None:
raise StackUnderflow('No elements in the stack')
else:
self.top -= 1
if self.top < 0:
self.top = None
return self.container.pop()
def top(self) -> Any:
return self.container[self.top]
def __str__(self):
return 'Stack(capacity={}, [{}])'.format(self.capacity, ', '.join(str(el) for el in self.container))
def __repr__(self):
return 'Stack(capacity={}, [{}])'.format(self.capacity, ', '.join(str(el) for el in self.container))
if __name__ == '__main__':
st = Stack(10)
st.push(1)
print(st)
st.push(3)
print(st)
st.push(5)
print(st)
st.push(7)
print(st)
st.pop()
print(st.is_empty())
print(st)
| 3.796875
| 4
|
test/test_matcher/test_marketorder.py
|
Miksus/ecosys
| 2
|
12782185
|
<gh_stars>1-10
import pytest
import sys
sys.path.append('..')
from ecosys.trading_platform.matcher.stockmarket import StockMatcher
def test_last_price_only_market_orders():
market = StockMatcher()
market.place_ask(quantity=50, order_type="market", party="Market Asker")
market.place_bid(quantity=100, order_type="market", party="Market Bidder")
bid_quantity = market.order_book["market"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["market"]["ask"]["quantity"].sum()
market.clear()
assert (market.last_price is None) and (100 == bid_quantity) and (50 == ask_quantity)
def test_oversupply():
market = StockMatcher()
market.place_bid(price=4.0, quantity=100, party="Bidder")
market.place_ask(quantity=200, order_type="market", party="Market Asker")
market.clear()
bid_quantity = market.order_book["limit"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["market"]["ask"]["quantity"].sum()
assert (4.0 == market.last_price) and (0 == bid_quantity) and (100 == ask_quantity)
def test_undersupply():
market = StockMatcher()
market.place_bid(price=4.0, quantity=200, party="Bidder")
market.place_ask(quantity=100, order_type="market", party="Market Asker")
market.clear()
bid_quantity = market.order_book["limit"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["market"]["ask"]["quantity"].sum()
assert (4.0 == market.last_price) and (100 == bid_quantity) and (0 == ask_quantity)
def test_overdemand():
market = StockMatcher()
market.place_bid(quantity=200, order_type="market", party="Market Bidder")
market.place_ask(price=4.0, quantity=100, party="Asker")
market.clear()
bid_quantity = market.order_book["market"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["limit"]["ask"]["quantity"].sum()
assert (4.0 == market.last_price) and (100 == bid_quantity) and (0 == ask_quantity)
def test_underdemand():
market = StockMatcher()
market.place_bid(quantity=50, order_type="market", party="Market Bidder")
market.place_ask(price=4.0, quantity=100, party="Asker")
market.clear()
bid_quantity = market.order_book["market"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["limit"]["ask"]["quantity"].sum()
assert (4.0 == market.last_price) and (0 == bid_quantity) and (50 == ask_quantity)
def test_market_to_market():
"Opposite market orders should trade with last price"
market = StockMatcher()
market.place_bid(quantity=1, price=5.0, order_type="limit", party="Bidder")
market.place_ask(quantity=1, price=5.0, order_type="limit", party="Asker")
market.clear()
market.place_bid(quantity=200, order_type="market", party="Market Bidder")
market.place_ask(quantity=200, order_type="market", party="Market Asker")
market.clear()
bid_quantity = market.order_book["market"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["market"]["ask"]["quantity"].sum()
assert (5.0 == market.last_price) and (0 == bid_quantity) and (0 == ask_quantity)
def test_market_priority():
"Market orders should be filled first"
market = StockMatcher()
market.place_ask(quantity=500, price=5.0, order_type="limit", party="Asker")
market.place_bid(quantity=100, price=4.0, order_type="limit", party="Bidder")
market.place_bid(quantity=500, order_type="market", party="Market Bidder")
market.place_bid(quantity=100, price=4.0, order_type="limit", party="Bidder")
market.clear()
bid_quantity_market = market.order_book["market"]["bid"]["quantity"].sum()
bid_quantity_limit = market.order_book["limit"]["bid"]["quantity"].sum()
assert (5.0 == market.last_price) and (0 == bid_quantity_market) and (200 == bid_quantity_limit)
| 2.484375
| 2
|
setup.py
|
aymericvie/EvoNetworks
| 0
|
12782186
|
<reponame>aymericvie/EvoNetworks
import os
os.system("pip install numpy")
os.system("pip install matplotlib")
os.system("pip install networkx")
os.system("pip install tqdm")
| 1.46875
| 1
|
platform/hwconf_data/zgm13/upgrade/sdk_2_7_3_Patch/upgradeUtility.py
|
lenloe1/v2.7
| 0
|
12782187
|
<filename>platform/hwconf_data/zgm13/upgrade/sdk_2_7_3_Patch/upgradeUtility.py
# SDK 2.7.3 Patch upgrade utilities
# Remove property with propertyId
# Note this function returns an xmldevice which should be passed back up to the
# upgradeDispatch level. Example:
# newXmlDevice = removePropertyLine(xmlDevice, "CMU.HAL_CLK_HFCLK_SOURCE.ENUM", verbose)
# return newXmlDevice
def removePropertyLine(xmldevice, propertyId, verbose=False):
xmlmode = xmldevice.getModes()[0]
properties = xmlmode.getProperty()
removeList = []
for i in range(0,len(properties)):
p = properties[i]
if propertyId == p.getPropertyId():
if verbose:
print ("Removing %s" % (propertyId))
removeList.append(i)
# reverse sort indicies in removeList so they can be removed from last to first
for index in sorted(removeList, reverse=True):
xmlmode.removeProperty(index)
return xmldevice
# Add a new property line
def addNewProperty(xmldevice, fullPropertyId, value, verbose):
xmlmode = xmldevice.getModes()[0]
if verbose:
print ("Adding %s" % fullPropertyId)
newval = xmldevice.createPropertyValue(fullPropertyId.split('.')[0], fullPropertyId, value)
xmlmode.addProperty(newval)
return xmldevice
# Checks if there are any instances of a certain object in the hwconf file
# that have a certain value
def propertyIDInHwconfHasValue(xmldevice, propertyId, value):
xmlmode = xmldevice.getModes()[0]
properties = xmlmode.getProperty()
for i in range(0,len(properties)):
p = properties[i]
propId = p.getPropertyId()
if propId == propertyId:
if p.getValue() == value:
return True
| 2.390625
| 2
|
examples/search_index.py
|
leafcoder/aliyun-tablestore-python-sdk
| 68
|
12782188
|
<filename>examples/search_index.py
# -*- coding: utf8 -*-
from example_config import *
from tablestore import *
import time
import json
table_name = 'SearchIndexExampleTable'
index_name = 'search_index'
nested_index_name = 'nested_search_index'
client = None
def term_query_with_multiple_version_response(table_name, index_name):
query = TermQuery('k', 'key000')
search_response = client.search(table_name, index_name,
SearchQuery(query, limit=100, get_total_count=True),
ColumnsToGet(return_type=ColumnReturnType.ALL))
print ("***** 1.0.0 ~ 5.1.0 version: tuple *****")
items = search_response.v1_response()
print(items)
print("***** 1.0.0 ~ 5.1.0 version: iter *****")
for item in search_response:
print(item)
print ("***** 5.2.0 version *****")
print(search_response.rows)
def match_all_query(table_name, index_name):
# simple queries: match all query and scan to get all data with next token
query = MatchAllQuery()
all_rows = []
next_token = None
while True:
search_response = client.search(table_name, index_name,
SearchQuery(query, next_token=next_token, limit=100, get_total_count=True),
columns_to_get=ColumnsToGet(['k', 't', 'g', 'ka', 'la'], ColumnReturnType.SPECIFIED))
all_rows.extend(search_response.rows)
if not next_token: # data all returned
break
for row in all_rows:
print(row)
print ('Total rows: %d' % len(all_rows))
def _print_rows(rows, total_count):
for row in rows:
print(row)
print ('Rows return: %d' % len(rows))
print ('Total count: %d' % total_count)
def match_query(table_name, index_name):
query = MatchQuery('t', 'this is 0')
search_response = client.search(table_name, index_name,
SearchQuery(query, limit=100, get_total_count=True),
ColumnsToGet(return_type=ColumnReturnType.ALL)
)
_print_rows(search_response.rows, search_response.total_count)
def match_phrase_query(table_name, index_name):
query = MatchPhraseQuery('t', 'this is')
search_response = client.search(table_name, index_name,
SearchQuery(query, limit=100, get_total_count=True),
ColumnsToGet(return_type=ColumnReturnType.ALL))
_print_rows(search_response.rows, search_response.total_count)
def term_query(table_name, index_name):
query = TermQuery('k', 'key000')
search_response = client.search(table_name, index_name,
SearchQuery(query, limit=100, get_total_count=True),
ColumnsToGet(return_type=ColumnReturnType.ALL))
_print_rows(search_response.rows, search_response.total_count)
def range_query(table_name, index_name):
query = RangeQuery('k', 'key100', 'key500', include_lower=False, include_upper=False)
search_response = client.search(table_name, index_name,
SearchQuery(query, offset=100, limit=100, get_total_count=True),
ColumnsToGet(return_type=ColumnReturnType.ALL))
_print_rows(search_response.rows, search_response.total_count)
def prefix_query(table_name, index_name):
query = PrefixQuery('k', 'key00')
search_response = client.search(table_name, index_name,
SearchQuery(query, limit=100, get_total_count=True),
ColumnsToGet(return_type=ColumnReturnType.ALL))
_print_rows(search_response.rows, search_response.total_count)
def wildcard_query(table_name, index_name):
query = WildcardQuery('k', 'key00*')
search_response = client.search(table_name, index_name,
SearchQuery(query, limit=100, get_total_count=True),
ColumnsToGet(return_type=ColumnReturnType.ALL))
_print_rows(search_response.rows, search_response.total_count)
def terms_query(table_name, index_name):
query = TermsQuery('k', ['key000', 'key100', 'key888', 'key999', 'key908', 'key1000'])
search_response = client.search(table_name, index_name,
SearchQuery(query, limit=100, get_total_count=True),
ColumnsToGet(return_type=ColumnReturnType.ALL))
_print_rows(search_response.rows, search_response.total_count)
def bool_query(table_name, index_name):
# k > 'key100' and (l > 110 and l < 200) and not (k = 'key121')
# and should_queries(k > 'key120' or l < 300, minimum_should_match=2)
bool_query = BoolQuery(
must_queries=[
RangeQuery('k', range_from='key100', include_lower=False),
BoolQuery(
must_queries=[
RangeQuery('l', range_from=110, include_lower=False),
RangeQuery('l', range_to=200, include_upper=False)
],
)
],
must_not_queries=[
TermQuery('k', 'key121')
],
should_queries=[
RangeQuery('k', range_from='key120', include_lower=False),
RangeQuery('l', range_to=300, include_upper=130)
],
minimum_should_match=2
)
search_response = client.search(table_name, index_name,
SearchQuery(bool_query, sort=Sort(sorters=[FieldSort('l', SortOrder.ASC)]), limit=100, get_total_count=True),
ColumnsToGet(return_type=ColumnReturnType.ALL))
_print_rows(search_response.rows, search_response.total_count)
def geo_distance_query(table_name, index_name):
query = GeoDistanceQuery('g', '32.5,116.5', 300000)
sort = Sort(sorters=[
GeoDistanceSort('g', ['32.5,116.5', '32.0,116.0'], sort_order=SortOrder.DESC)
])
search_response = client.search(table_name, index_name,
SearchQuery(query, limit=100, get_total_count=True, sort=sort),
ColumnsToGet(return_type=ColumnReturnType.ALL))
_print_rows(search_response.rows, search_response.total_count)
def geo_bounding_box_query(table_name, index_name):
query = GeoBoundingBoxQuery('g', '30.9,112.0', '30.2,119.0')
search_response = client.search(table_name, index_name,
SearchQuery(query, limit=100, get_total_count=True),
ColumnsToGet(return_type=ColumnReturnType.ALL))
_print_rows(search_response.rows, search_response.total_count)
def geo_polygon_query(table_name, index_name):
query = GeoPolygonQuery('g', ['30.9,112.0', '30.5,115.0', '30.3, 117.0', '30.2,119.0'])
search_response = client.search(table_name, index_name,
SearchQuery(query, limit=100, get_total_count=True),
ColumnsToGet(return_type=ColumnReturnType.ALL))
_print_rows(search_response.rows, search_response.total_count)
def nested_query(table_name, index_name):
nested_query = RangeQuery('n.nl', range_from=110, range_to=200, include_lower=True, include_upper=True)
query = NestedQuery('n', nested_query)
sort = Sort(
sorters = [FieldSort('n.nl', sort_order=SortOrder.ASC, nested_filter=NestedFilter('n', RangeQuery('n.nl', range_from=150, range_to=200)))]
)
search_response = client.search(table_name, index_name,
SearchQuery(query, limit=100, get_total_count=True, sort=sort),
ColumnsToGet(return_type=ColumnReturnType.ALL))
_print_rows(search_response.rows, search_response.total_count)
def function_score_query(table_name, index_name):
query = FunctionScoreQuery(
RangeQuery('l', range_from=100, range_to=300),
FieldValueFactor('l')
)
search_response = client.search(table_name, index_name,
SearchQuery(query, limit=100, get_total_count=True),
ColumnsToGet(return_type=ColumnReturnType.ALL))
_print_rows(search_response.rows, search_response.total_count)
def prepare_data(rows_count):
print ('Begin prepare data: %d' % rows_count)
for i in range(rows_count):
pk = [('PK1', i), ('PK2', 'pk_' + str(i % 10))]
lj = i / 100
li = i % 100
cols = [('k', 'key%03d' % i), ('t', 'this is ' + str(i)),
('g', '%f,%f' % (30.0 + 0.05 * lj, 114.0 + 0.05 * li)), ('ka', '["a", "b", "%d"]' % i),
('la', '[-1, %d]' % i), ('l', i),
('b', i % 2 == 0), ('d', 0.1),
('n', json.dumps([{'nk':'key%03d' % i, 'nl':i, 'nt':'this is in nested ' + str(i)}]))]
client.put_row(table_name, Row(pk, cols))
print ('End prepare data.')
print ('Wait for data sync to search index.')
time.sleep(10)
def prepare_table():
table_meta = TableMeta(table_name, [('PK1', 'INTEGER'), ('PK2', 'STRING')])
table_options = TableOptions()
reserved_throughput = ReservedThroughput(CapacityUnit(0, 0))
client.create_table(table_meta, table_options, reserved_throughput)
def prepare_index(index_name, with_nested=False):
field_a = FieldSchema('k', FieldType.KEYWORD, index=True, enable_sort_and_agg=True, store=True)
field_b = FieldSchema('t', FieldType.TEXT, index=True, store=True, analyzer=AnalyzerType.SINGLEWORD)
field_c = FieldSchema('g', FieldType.GEOPOINT, index=True, store=True)
field_d = FieldSchema('ka', FieldType.KEYWORD, index=True, is_array=True, store=True)
field_e = FieldSchema('la', FieldType.LONG, index=True, is_array=True, store=True)
field_f = FieldSchema('l', FieldType.LONG, index=True, store=True)
field_g = FieldSchema('b', FieldType.BOOLEAN, index=True, store=True)
field_h = FieldSchema('d', FieldType.DOUBLE, index=True, store=True)
if with_nested:
field_n = FieldSchema('n', FieldType.NESTED, sub_field_schemas=[
FieldSchema('nk', FieldType.KEYWORD, index=True, store=True),
FieldSchema('nl', FieldType.LONG, index=True, store=True),
FieldSchema('nt', FieldType.TEXT, index=True, store=True),
])
fields = [field_a, field_b, field_c, field_d, field_e, field_f, field_g, field_h]
if with_nested:
fields.append(field_n)
index_setting = IndexSetting(routing_fields=['PK1'])
index_sort = Sort(sorters=[PrimaryKeySort(SortOrder.ASC)]) if not with_nested else None
index_meta = SearchIndexMeta(fields, index_setting=index_setting, index_sort=index_sort) # default with index sort
client.create_search_index(table_name, index_name, index_meta)
def list_search_index():
for table, index_name in client.list_search_index(table_name):
print ('%s, %s' % (table, index_name))
def describe_search_index():
index_meta, sync_stat = client.describe_search_index(table_name, index_name)
print ('sync stat: %s, %d' % (str(sync_stat.sync_phase), sync_stat.current_sync_timestamp))
print ('index name: %s' % index_name)
print ('index fields:')
for field in index_meta.fields:
print (' field name: %s' % field.field_name)
print (' field type: %s' % str(field.field_type))
print (' field indexed: %s' % str(field.index))
print (' field stored: %s' % str(field.store))
print (' field is array: %s' % str(field.is_array))
print (' field allow sort and aggregate: %s' % str(field.enable_sort_and_agg))
print ('index routing keys: %s' % str(index_meta.index_setting.routing_fields))
print ('index sort: %s' % (index_meta.index_sort.sorters))
def delete_table():
try:
client.delete_table(table_name)
except:
pass
def delete_search_index(index_name):
try:
client.delete_search_index(table_name, index_name)
except:
pass
if __name__ == '__main__':
client = OTSClient(OTS_ENDPOINT, OTS_ID, OTS_SECRET, OTS_INSTANCE)
delete_search_index(index_name)
delete_search_index(nested_index_name)
delete_table()
prepare_table()
prepare_index(index_name, with_nested=False)
prepare_index(nested_index_name, with_nested=True)
prepare_data(1000)
list_search_index()
describe_search_index()
# perform queries
term_query_with_multiple_version_response(table_name, index_name)
match_all_query(table_name, index_name)
match_query(table_name, index_name)
match_phrase_query(table_name, index_name)
term_query(table_name, index_name)
range_query(table_name, index_name)
prefix_query(table_name, index_name)
terms_query(table_name, index_name)
bool_query(table_name, index_name)
wildcard_query(table_name, index_name)
geo_distance_query(table_name, index_name)
geo_bounding_box_query(table_name, index_name)
geo_polygon_query(table_name, index_name)
nested_query(table_name, nested_index_name)
function_score_query(table_name, nested_index_name)
delete_search_index(index_name)
delete_search_index(nested_index_name)
delete_table()
| 2.65625
| 3
|
pagination/page.py
|
pmaigutyak/mp-pagination
| 0
|
12782189
|
import functools
import collections
from django.template.loader import render_to_string
from pagination.settings import MARGIN_PAGES_DISPLAYED, PAGE_RANGE_DISPLAYED
class PageRepresentation(int):
def __new__(cls, x, querystring):
obj = int.__new__(cls, x)
obj.querystring = querystring
return obj
def add_page_querystring(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if isinstance(result, int):
querystring = self._other_page_querystring(result)
return PageRepresentation(result, querystring)
elif isinstance(result, collections.Iterable):
new_result = []
for number in result:
if isinstance(number, int):
querystring = self._other_page_querystring(number)
new_result.append(PageRepresentation(number, querystring))
else:
new_result.append(number)
return new_result
return result
return wrapper
class Page(object):
template = 'pagination.html'
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.paginator = paginator
if paginator.request:
self.base_queryset = self.paginator.request.GET.copy()
self.number = PageRepresentation(number, self._other_page_querystring(number))
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
@add_page_querystring
def next_page_number(self):
return self.number + 1
@add_page_querystring
def previous_page_number(self):
return self.number - 1
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
@add_page_querystring
def pages(self):
if self.paginator.num_pages <= PAGE_RANGE_DISPLAYED:
return range(1, self.paginator.num_pages + 1)
result = []
left_side = PAGE_RANGE_DISPLAYED / 2
right_side = PAGE_RANGE_DISPLAYED - left_side
if self.number > self.paginator.num_pages - PAGE_RANGE_DISPLAYED / 2:
right_side = self.paginator.num_pages - self.number
left_side = PAGE_RANGE_DISPLAYED - right_side
elif self.number < PAGE_RANGE_DISPLAYED / 2:
left_side = self.number
right_side = PAGE_RANGE_DISPLAYED - left_side
for page in range(1, self.paginator.num_pages + 1):
if page <= MARGIN_PAGES_DISPLAYED:
result.append(page)
continue
if page > self.paginator.num_pages - MARGIN_PAGES_DISPLAYED:
result.append(page)
continue
if (page >= self.number - left_side) and (page <= self.number + right_side):
result.append(page)
continue
if result[-1]:
result.append(None)
return result
def _other_page_querystring(self, page_number):
"""
Returns a query string for the given page, preserving any
GET parameters present.
"""
if self.paginator.request:
self.base_queryset['page'] = page_number
return self.base_queryset.urlencode()
# raise Warning("You must supply Paginator() with the request object for a proper querystring.")
return 'page=%s' % page_number
def render(self):
return render_to_string(self.template, {
'current_page': self,
'page_obj': self, # Issue 9 https://github.com/jamespacileo/django-pure-pagination/issues/9
# Use same naming conventions as Django
})
| 2.28125
| 2
|
molecule/common/tests/test_containerd.py
|
incubateur-pe/containerd
| 1
|
12782190
|
<reponame>incubateur-pe/containerd<gh_stars>1-10
"""Role testing files using testinfra."""
def test_containerd_installed(host):
containerd = host.file("/usr/bin/containerd")
assert containerd.exists
assert containerd.user == "root"
assert containerd.group == "root"
assert containerd.mode == 0o755
def test_containerd_service(host):
containerd = host.service("containerd")
assert containerd.is_running
assert containerd.is_enabled
def test_containerd_config(host):
config = host.file("/etc/containerd/config.toml")
assert config.exists
assert config.contains("http://10.0.4.40:5005")
assert config.contains("insecure_skip_verify")
| 1.75
| 2
|
day16/part1.py
|
tatoonz/advent-of-code-2021
| 0
|
12782191
|
import sys
input = bin(int(sys.stdin.readline().strip(), base=16))[2:]
# filling missing leading 0
input = input.zfill(-(-len(input)//4)*4)
def decode(msg):
if msg == '' or int(msg) == 0:
return 0
version = int(msg[0:3], 2)
type_id = int(msg[3:6], 2)
if type_id == 4:
last_group = False
cursor = 6
while not last_group:
if msg[cursor] == '0':
last_group = True
cursor += 5
return version + decode(msg[cursor:])
length_type_id = msg[6]
if length_type_id == '0':
total_sub_packets_len = int(msg[7:22], 2)
return version + decode(msg[22:22+total_sub_packets_len]) + decode(msg[22+total_sub_packets_len:])
return version + decode(msg[18:])
# result
# sample1: 16
# smaple2: 12
# sample3: 23
# sample4: 31
# puzzle: 971
print(decode(input))
| 2.765625
| 3
|
Server/Python/src/dbs/dao/MySQL/BlockParent/Insert.py
|
vkuznet/DBS
| 8
|
12782192
|
#!/usr/bin/env python
""" DAO Object for BlockParents table """
from dbs.dao.Oracle.BlockParent.Insert import Insert as OraBlockParentInsert
class Insert(OraBlockParentInsert):
pass
| 1.78125
| 2
|
arm_prosthesis/external_communication/models/dto/save_gesture_dto.py
|
paulrozhkin/arm_prosthesis_raspberry
| 2
|
12782193
|
from arm_prosthesis.external_communication.models.dto.entity_dto import EntityDto
from arm_prosthesis.external_communication.models.dto.gesture_dto import GestureDto
from gestures_pb2 import SaveGesture
class SaveGestureDto(EntityDto):
def __init__(self):
self._time_sync = 0
self._gesture_dto = None
@property
def time_sync(self) -> int:
return self._time_sync
@property
def gesture_dto(self) -> GestureDto:
return self._gesture_dto
def serialize(self) -> bytes:
raise NotImplementedError
def deserialize(self, byte_array: bytes):
save_gesture_protobuf = SaveGesture()
save_gesture_protobuf.ParseFromString(byte_array)
self._time_sync = save_gesture_protobuf.time_sync
self._gesture_dto = GestureDto()
self._gesture_dto.create_from_protobuf_gesture(save_gesture_protobuf.gesture)
| 2.265625
| 2
|
soke-scripts/process/load.py
|
Wilfongjt/soke
| 0
|
12782194
|
from process.process import Process
class Load(Process):
def __init__(self, settings=None):
Process.__init__(self, settings=settings)
# import_file_name is full local file name or url to source
#self.import_file_list=import_file_list
#self.dataframe=None
#self.dictionary={}
#self.list={}
#print('Load')
'''
def get_dictionary(self):
return self.dictionary
def get_dataframe(self):
return self.dataframe
def get_list(self):
return self.list
'''
| 2.640625
| 3
|
es_dumpvec.py
|
eugene-yang/DESIRES18-QBD-Experiments
| 0
|
12782195
|
<filename>es_dumpvec.py<gh_stars>0
import argparse
from pathlib import Path
import pandas as pd
import numpy as np
import scipy.sparse as sp
import json, warnings, socket, pickle, sys, re
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan
from helpers import pbar
parser = argparse.ArgumentParser(description='Dump Elasticsearch index as a document-term '
'sparse matrix that can be used in `sklearn_exp.py`')
parser.add_argument('server', type=str, help='Path to the Elasticsearch server, including '
'hostname/IP and port')
parser.add_argument('index', type=str, help='Name of the Elasticsearch index to be dumped.')
parser.add_argument('output_name', type=str, help='Output file name (.pkl)')
parser.add_argument('--ingested_text', type=str, default="./raw_text.csv",
help='The ingested .csv file created by `helper.py ingest`')
args = parser.parse_args()
if __name__ == '__main__':
es = Elasticsearch([ args.server ])
print("Loading text cache...")
raw_text_index = pd.read_csv( args.ingested_text ).index
finalfn = Path( args.output_name + ".pkl" )
if finalfn.exists():
raise FileExistsError("%s already exists."%finalfn)
print("Start dumping from Elasticsearch")
doc_terms = []
vocab = set()
for i in pbar(raw_text_index.shape[0])(raw_text_index):
d = es.termvectors(index=args.index, doc_type='document', id=i, fields='raw')
vocab |= set( d['term_vectors']['raw']['terms'].keys() )
doc_terms.append({ v: d['term_vectors']['raw']['terms'][v]['term_freq']
for v in d['term_vectors']['raw']['terms'] })
vocab = sorted(list(vocab))
print("Vectorizing...")
n_samples = raw_text_index.shape[0]
n_features = len( vocab )
inv_vocab = { v:i for i,v in enumerate(vocab) }
row = []
col = []
data = []
for i, doc in pbar(n_samples)(enumerate( doc_terms )):
for term in doc:
row.append(i)
col.append( inv_vocab[term] )
data.append( doc[term] )
print("Transforming...")
X = sp.csr_matrix(( data, (row,col) ), shape=(n_samples, n_features))
print("Saving...")
pickle.dump({ "vec": X }, finalfn.open("wb") )
| 2.8125
| 3
|
pulumi/infra/fargate_service.py
|
ebcarty/grapl
| 0
|
12782196
|
import json
from typing import List, Mapping, Optional, Sequence, Tuple, Union, cast
import pulumi_aws as aws
import pulumi_docker as docker
from infra.cache import Cache
from infra.config import (
DEPLOYMENT_NAME,
REAL_DEPLOYMENT,
SERVICE_LOG_RETENTION_DAYS,
configured_version_for,
)
from infra.ec2 import Ec2Port
from infra.emitter import EventEmitter
from infra.metric_forwarder import MetricForwarder
from infra.network import Network
from infra.policies import ECR_TOKEN_POLICY, attach_policy
from infra.repository import Repository, registry_credentials
from infra.service_queue import ServiceQueue
import pulumi
class GraplDockerBuild(docker.DockerBuild):
def __init__(
self,
dockerfile: str,
target: str,
context: Optional[str] = None,
args: Optional[Mapping[str, pulumi.Input[str]]] = None,
env: Optional[Mapping[str, str]] = None,
):
super().__init__(
context=context,
dockerfile=dockerfile,
env={**(env or {}), "DOCKER_BUILDKIT": 1},
args={**(args or {}), "RUST_BUILD": "debug"},
target=target,
# Quiet the Docker builds at `pulumi up` time
# ...except it doesn't work with `buildx` yet
# https://github.com/docker/buildx/issues/401
# extra_options=("--quiet",),
)
class FargateTaskRole(aws.iam.Role):
def __init__(
self, name: str, opts: Optional[pulumi.ResourceOptions] = None
) -> None:
super().__init__(
f"{name}-task-role",
description=f"Fargate task role for {name}",
assume_role_policy=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {"Service": "ecs-tasks.amazonaws.com"},
}
],
}
),
opts=opts,
)
class FargateExecutionRole(aws.iam.Role):
def __init__(
self, name: str, opts: Optional[pulumi.ResourceOptions] = None
) -> None:
super().__init__(
f"{name}-execution-role",
description=f"Fargate execution role for {name}",
assume_role_policy=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {"Service": "ecs-tasks.amazonaws.com"},
}
],
}
),
opts=opts,
)
class _AWSFargateService(pulumi.ComponentResource):
def __init__(
self,
name: str,
cluster: aws.ecs.Cluster,
queue: ServiceQueue,
input_emitter: EventEmitter,
output_emitter: EventEmitter,
network: Network,
image: pulumi.Output[str],
env: Mapping[str, Union[str, pulumi.Output[str]]],
forwarder: MetricForwarder,
entrypoint: Optional[List[str]] = None,
command: Optional[List[str]] = None,
opts: Optional[pulumi.ResourceOptions] = None,
) -> None:
"""
:param command: supply an override to the CMD defined in the Dockerfile.
"""
super().__init__("grapl:AWSFargateService", name, None, opts)
self.task_role = FargateTaskRole(name, opts=pulumi.ResourceOptions(parent=self))
########################################################################
# TODO: CDK code has us consuming from all queues, but that's
# likely excessive. The default service probably just needs to
# consume from the main queue; similarly for the retry service
# and retry queue
#
# We should probably bundle this concept up into a single
# policy (one for the "default" case and one for the "retry"
# case), and then put this into the ServiceQueue object. Then,
# anything that needs to behave as a "default service" can
# just attach the appropriate policy; similarly for things
# that behave like "retry services".
#
# That would likely allow us to unify the Fargate- and
# Lambda-based services, too.
queue.grant_main_queue_consumption_to(self.task_role)
queue.grant_retry_queue_consumption_to(self.task_role)
queue.grant_dead_letter_queue_consumption_to(self.task_role)
########################################################################
########################################################################
# TODO: As above, we don't need everything to be able to send
# to all our queues.
#
# If we take the approach advocated above with a single policy
# laying out the behavior we want, then these attachments can
# go away, since they will have been subsumed into the ones
# above.
queue.grant_main_queue_send_to(self.task_role)
queue.grant_retry_queue_send_to(self.task_role)
queue.grant_dead_letter_queue_send_to(self.task_role)
########################################################################
input_emitter.grant_read_to(self.task_role)
output_emitter.grant_write_to(self.task_role)
self.execution_role = FargateExecutionRole(
name, opts=pulumi.ResourceOptions(parent=self)
)
# Incorporating the stack name into this log group name;
# otherwise we'll end up dumping logs from different stacks
# together.
#
# TODO: Consider a helper function for generating log group
# names that adheres to this convention for all our services
# (though this will be less of an issue once we migrate to
# Kafka)
self.log_group = aws.cloudwatch.LogGroup(
f"{name}-log-group",
name=f"/grapl/{DEPLOYMENT_NAME}/{name}",
retention_in_days=SERVICE_LOG_RETENTION_DAYS,
opts=pulumi.ResourceOptions(parent=self),
)
aws.iam.RolePolicy(
f"{name}-write-log-events",
role=self.execution_role.name,
policy=self.log_group.arn.apply(
lambda arn: json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["logs:CreateLogStream", "logs:PutLogEvents"],
"Resource": f"{arn}:*",
}
],
}
)
),
opts=pulumi.ResourceOptions(parent=self.execution_role),
)
# This is only needed if we're actually pulling from ECR,
# which we don't do in production (because we're pulling from
# Cloudsmith). The only time we use ECR is when we build a
# Docker container locally, and that'll only happen for
# individual developer sandbox deployments.
# TODO: This feels hacky; consider other ways to model this.
if not REAL_DEPLOYMENT:
attach_policy(ECR_TOKEN_POLICY, self.execution_role)
forwarder.subscribe_to_log_group(name, self.log_group)
self.task = aws.ecs.TaskDefinition( # type: ignore[call-overload]
f"{name}-task",
family=f"{DEPLOYMENT_NAME}-{name}-task",
container_definitions=pulumi.Output.all(
queue_url=queue.main_queue_url,
retry_url=queue.retry_queue_url,
dead_letter_url=queue.dead_letter_queue_url,
log_group=self.log_group.name,
bucket=output_emitter.bucket.bucket,
image=image,
env=env,
).apply(
lambda inputs: json.dumps(
[
{
# NOTE: it seems that *all* our containers
# are named this. Perhaps due to CDK's
# QueueProcessingFargateService abstraction?
"name": "QueueProcessingContainer",
"image": inputs["image"],
"environment": _environment_from_map(
{
"QUEUE_URL": inputs["queue_url"],
"SOURCE_QUEUE_URL": inputs["queue_url"],
"DEST_BUCKET_NAME": inputs["bucket"],
"DEPLOYMENT_NAME": DEPLOYMENT_NAME,
"DEAD_LETTER_QUEUE_URL": inputs["dead_letter_url"],
"RETRY_QUEUE_URL": inputs["retry_url"],
**inputs["env"],
}
),
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-stream-prefix": "logs",
"awslogs-region": aws.get_region().name,
"awslogs-group": inputs["log_group"],
},
},
**({"entryPoint": entrypoint} if entrypoint else {}),
**({"command": command} if command else {}),
},
]
)
),
requires_compatibilities=["FARGATE"],
cpu=256,
memory=512,
network_mode="awsvpc", # only option for Fargate
task_role_arn=self.task_role.arn,
execution_role_arn=self.execution_role.arn,
opts=pulumi.ResourceOptions(
parent=self,
),
)
self.security_group = aws.ec2.SecurityGroup(
f"{name}-security-group",
vpc_id=network.vpc.id,
opts=pulumi.ResourceOptions(parent=self),
)
self.service = aws.ecs.Service(
f"{name}-service",
cluster=cluster.arn,
network_configuration=aws.ecs.ServiceNetworkConfigurationArgs(
assign_public_ip=False,
subnets=[net.id for net in network.private_subnets],
security_groups=[self.security_group.id],
),
launch_type="FARGATE",
desired_count=1, # TODO: Set this to 1 or 0 depending on default vs. retry
deployment_minimum_healthy_percent=50,
task_definition=self.task.arn,
opts=pulumi.ResourceOptions(
parent=self,
),
)
self.register_outputs({})
class FargateService(pulumi.ComponentResource):
def __init__(
self,
name: str,
input_emitter: EventEmitter,
output_emitter: EventEmitter,
network: Network,
image: docker.DockerBuild,
env: Mapping[str, Union[str, pulumi.Output[str]]],
forwarder: MetricForwarder,
entrypoint: Optional[List[str]] = None,
command: Optional[List[str]] = None,
retry_image: Optional[docker.DockerBuild] = None,
retry_entrypoint: Optional[List[str]] = None,
retry_command: Optional[List[str]] = None,
opts: Optional[pulumi.ResourceOptions] = None,
) -> None:
super().__init__("grapl:FargateService", name, None, opts)
self.queue = ServiceQueue(name, opts=pulumi.ResourceOptions(parent=self))
self.queue.subscribe_to_emitter(input_emitter)
self.ecs_cluster = aws.ecs.Cluster(
f"{name}-cluster",
opts=pulumi.ResourceOptions(parent=self),
)
# We're not calling this image, e.g., "foo-default" to account
# for the (common) case that the corresponding retry service
# uses the same image.
(repository, image_name) = self._repository_and_image(name, image)
self.default_service = _AWSFargateService(
f"{name}-default",
cluster=self.ecs_cluster,
queue=self.queue,
input_emitter=input_emitter,
output_emitter=output_emitter,
network=network,
image=image_name,
entrypoint=entrypoint,
command=command,
env=env,
forwarder=forwarder,
opts=pulumi.ResourceOptions(parent=self),
)
if repository:
repository.grant_access_to(self.default_service.execution_role)
# If a separate retry image was provided, create a separate
# repository for it; otherwise, reuse the existing repository
# and image.
retry_name = f"{name}-retry"
(retry_repository, retry_image_name) = (
self._repository_and_image(retry_name, retry_image)
if retry_image
else (repository, image_name)
)
self.retry_service = _AWSFargateService(
retry_name,
cluster=self.ecs_cluster,
queue=self.queue,
input_emitter=input_emitter,
output_emitter=output_emitter,
network=network,
image=retry_image_name,
entrypoint=retry_entrypoint or entrypoint,
command=retry_command or command,
env=env,
forwarder=forwarder,
opts=pulumi.ResourceOptions(parent=self),
)
if retry_repository:
retry_repository.grant_access_to(self.retry_service.execution_role)
self.services = (self.default_service, self.retry_service)
self._setup_default_ports()
self.register_outputs({})
def _setup_default_ports(self) -> None:
"""
Can be overridden by subclasses. Most services are fine having an outbound 443.
Has a cognate in service.py.
"""
for svc in self.services:
Ec2Port("tcp", 443).allow_outbound_any_ip(svc.security_group)
def allow_egress_to_cache(self, cache: Cache) -> None:
"""
Allow both the default and retry services to connect to the `cache`.
"""
for svc in self.services:
cache.allow_egress_to_cache_for(svc._name, svc.security_group)
def _repository_and_image(
self, name: str, build: docker.DockerBuild
) -> Tuple[Optional[Repository], pulumi.Output[str]]:
version = configured_version_for(name)
if version:
image_name = f"docker.cloudsmith.io/grapl/raw/{name}:{version}"
pulumi.info(f"Version found for {name}: {version} ({image_name})")
# It's a bit of a bummer to need this cast :/
return (None, cast(pulumi.Output[str], image_name))
else:
# create ECR, build image, push to ECR, return output
pulumi.info(
f"Version NOT found for {name}; performing local container image build"
)
repository = Repository(name, opts=pulumi.ResourceOptions(parent=self))
image = docker.Image(
name,
image_name=repository.registry_qualified_name,
build=build,
registry=registry_credentials(),
opts=pulumi.ResourceOptions(parent=self),
)
# The built image name will have a checksum appended to it,
# thus eliminating the need to use tags.
return (repository, image.image_name)
def _environment_from_map(env: Mapping[str, str]) -> Sequence[Mapping[str, str]]:
"""
Generate a list of environment variable dictionaries for an ECS task container definition from a standard dictionary.
"""
return [{"name": k, "value": v} for (k, v) in env.items()]
| 1.789063
| 2
|
src/python/WMCore/WMBS/MySQL/Fileset/ListFilesetByTask.py
|
khurtado/WMCore
| 21
|
12782197
|
<gh_stars>10-100
#!/usr/bin/env python
"""
_ListFilesetByTask_
MySQL implementation of Fileset.ListFilesetByTask
"""
__all__ = []
from WMCore.Database.DBFormatter import DBFormatter
class ListFilesetByTask(DBFormatter):
sql = """SELECT id, name, open, last_update FROM wmbs_fileset
WHERE id IN (SELECT fileset FROM wmbs_subscription
WHERE workflow IN (SELECT id FROM wmbs_workflow
WHERE task = :task))"""
def formatDict(self, result):
"""
_formatDict_
Cast the id attribute to an int because the DBFormatter turns everything
into strings.
"""
tempResults = DBFormatter.formatDict(self, result)
formattedResults = []
for tempResult in tempResults:
tempResult["id"] = int(tempResult["id"])
formattedResults.append(tempResult)
return formattedResults
def execute(self, task = None, conn = None, transaction = False):
result = self.dbi.processData(self.sql, {"task": task},
conn = conn, transaction = transaction)
return self.formatDict(result)
| 2.515625
| 3
|
Interpolation/TridiagonalSol.py
|
ssklykov/collection_numCalc
| 0
|
12782198
|
# -*- coding: utf-8 -*-
"""
Solution of a system of linear equations with tridiagonal matrix
Developed in the Spyder IDE
@author: ssklykov
"""
def Solution(a, b, c, d):
for i in range(1, len(d)): # Range always not included the last value
# Below the literal implementation from the book
a[i] /= b[i-1] # This is the substitution of a[i] values by alpha[i] = a[i]/b[i-1]
b[i] -= a[i]*c[i-1] # -//- by b[i] - alpha[i]*c[i-1]
d[i] -= a[i]*d[i-1] # this is "y" values - solution to the actual system Ly = d, there L composed from a,b,c
d[len(d)-1] /= b[len(b)-1] # backward substitution
for i in range(len(d)-2, -1):
d[i] = (d[i] - c[i]*d[i+1])/b[i]
return d # solution to the input system Gx = d
| 3.734375
| 4
|
dungeonMaster.py
|
p0l0satik/pyprak
| 0
|
12782199
|
d = {}
nvis = set()
s = input()
while " " in s:
k, v = s.split()
if k == v:
continue
nvis.add(k)
nvis.add(v)
d.setdefault(v, [])
d.setdefault(k, [])
if v not in d[k]:
d[k].append(v)
if k not in d[v]:
d[v].append(k)
s = input()
now = [s,]
out = input()
while len(now) and len(s):
proc = now.pop()
if proc == out:
print("YES")
break
if proc in nvis:
nvis.remove(proc)
for t in d[proc]:
if t in nvis:
now.append(t)
else:
print("NO")
| 3.28125
| 3
|
colvar/compilers.py
|
kzinovjev/colvar
| 0
|
12782200
|
X_INDEX_INCREMENTS = {"x": 0, "y": 1, "z": 2}
def compile_constant(schema):
return {"type": "x",
"params": {"value": schema["value"]}}
def compile_cartesian(schema):
return {"type": "x",
"params": {
"index": schema["atom"] * 3 + X_INDEX_INCREMENTS[schema["type"]]
}}
def compile_weight(weight):
if isinstance(weight, dict):
return compile_schema(weight)
return {"type": "constant", "params": {"value": weight}}
def compile_weights(weights):
return list(map(compile_weight, weights))
def compile_center_cartesian(x, atoms, weights):
return {
"type": "linear",
"params": {
"colvars": [compile_cartesian({"type": x, "atom": atom})
for atom in atoms],
"weights": compile_weights(weights),
"normalize": True
}
}
def compile_atom(atom):
return [compile_cartesian({"type": _type, "atom": atom})
for _type in ("x", "y", "z")]
def compile_center(center):
if isinstance(center, list):
return compile_schema(center)
weights = center.get("weights", [1 for _ in center["atoms"]])
return [compile_center_cartesian(_type, center["atoms"], weights)
for _type in ("x", "y", "z")]
def compile_geometric(schema):
if "centers" in schema:
centers = list(map(compile_center, schema["centers"]))
else:
centers = list(map(compile_atom, schema["atoms"]))
return {"type": schema["type"],
"params": {"centers": centers}}
def compile_sigmoid(schema):
return {"type": "sigmoid",
"params": {"colvar": compile_schema(schema["colvar"]),
"L": schema.get("L", 1),
"k": schema.get("k", 1),
"x0": schema.get("x0", 0)}
}
def compile_linear(schema):
return {
"type": "linear",
"params": {"colvars": compile_schema(schema["colvars"]),
"weights": compile_weights(schema["weights"]),
"normalize": schema.get("normalize", False)}
}
COMPILERS = {"constant": compile_constant,
"x": compile_cartesian,
"y": compile_cartesian,
"z": compile_cartesian,
"distance": compile_geometric,
"angle": compile_geometric,
"dihedral": compile_geometric,
"point_plane": compile_geometric,
"sigmoid": compile_sigmoid,
"linear": compile_linear}
def compile_schema(schema):
if isinstance(schema, list):
return list(map(compile_schema, schema))
if "params" in schema:
return schema
return COMPILERS.get(schema["type"], lambda _: _)(schema)
| 2.4375
| 2
|
Solutions/Correlation does not imply causation.py
|
GuardsmanPanda/ProjectLovelace
| 0
|
12782201
|
<filename>Solutions/Correlation does not imply causation.py
def mean(a): return sum(a)/len(a)
def std(a):
m = mean(a)
return (sum((x - m)**2 for x in a)/len(a))**0.5
def correlation_coefficient(x, y):
xm, ym = mean(x), mean(y)
return sum((x[i]-xm)*(y[i]-ym) for i in range(len(x)))/std(x)/std(y)/len(x)
print(correlation_coefficient([ 5427, 5688, 6198, 6462, 6635, 7336, 7248, 7491, 8161, 8578, 9000], [18.079, 18.594, 19.753, 20.734, 20.831, 23.029, 23.597, 23.584, 22.525, 27.731, 29.449]))
| 2.859375
| 3
|
testing/micro-bench/server/app.py
|
jgrunert/Microservice-Fault-Injection
| 5
|
12782202
|
import random
import string
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/')
def root():
return jsonify(result=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(2 ** 10)))
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0')
| 2.625
| 3
|
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Edno/035.py
|
moacirsouza/nadas
| 1
|
12782203
|
# (01-Gabarito/035.py)) Desenvolva um programa que leia o comprimento de três retas e diga ao
# usuário se elas podem ou não formar um triângulo.
hipotenusa = float(input('Digite o valor do que será a hipotenusa\t: '))
cateto_a = float(input('Digite o valor do que será o cateto adjacente\t: '))
cateto_o = float(input('Digite o valor do que será o cateto oposto\t: '))
if pow(hipotenusa,2) == (pow(cateto_a,2)+pow(cateto_o,2)):
print('Dá pra ser triângulo.')
else:
print('Não dá pra ser triângulo.')
# Viajei. Não sabia que era assim que se calculava pra saber se dava pra fazer um triângulo.
| 4.21875
| 4
|
desafio042.py
|
RickChaves29/Desafios-Python
| 0
|
12782204
|
print('<>'*20)
print('Analizador de trangulo')
print('<>'*20)
r1 = float(input('Primeiro valor: '))
r2 = float(input('Segundo valor: '))
r3 = float(input('Terceiro valor: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('Os valores podem forma um triangulo')
if r1 == r2 == r3:
print('Tipo: EQUILÁTERO')
elif r1 != r2 != r3 != r1:
print('Tipo: ESCALENO')
else:
print('Tipo: ISÓSCELES')
else:
print('O valores não forma um triangulo')
| 4.09375
| 4
|
fairseq/modules/multibranch.py
|
ishine/lite-transformer
| 543
|
12782205
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from . import MultiheadAttention
class MultiBranch(nn.Module):
def __init__(self, branches, embed_dim_list):
super().__init__()
self.branches = nn.ModuleList(branches)
self.embed_dim_list = embed_dim_list
def forward(self, query, key, value, key_padding_mask=None, incremental_state=None, need_weights=True, static_kv=False, attn_mask=None):
tgt_len, bsz, embed_size = query.size()
assert sum(self.embed_dim_list) == embed_size
out = []
attn = None
start = 0
for idx, embed_dim in enumerate(self.embed_dim_list):
branch = self.branches[idx]
branch_type = type(branch)
q = query[...,start:start+embed_dim]
if key is not None:
assert value is not None
k, v = key[..., start:start+embed_dim], value[..., start:start+embed_dim]
start += embed_dim
if branch_type == MultiheadAttention:
x, attn = branch(q, k, v, key_padding_mask, incremental_state, need_weights, static_kv, attn_mask)
else:
mask = key_padding_mask
if mask is not None:
q = q.masked_fill(mask.transpose(0, 1).unsqueeze(2), 0)
x = branch(q.contiguous(), incremental_state=incremental_state)
out.append(x)
out = torch.cat(out, dim=-1)
return out, attn
| 2.28125
| 2
|
ComputerVision/AruCO_Board.py
|
mateusribs/DissertacaoMestrado
| 0
|
12782206
|
<reponame>mateusribs/DissertacaoMestrado<filename>ComputerVision/AruCO_Board.py
import numpy as np
import cv2 as cv
dictionar = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_4X4_50)
board = cv.aruco.GridBoard_create(2, 2, 0.05, 0.005, dictionar)
img = board.draw((680,500), 10, 1)
cv.imwrite('aruco_board.png', img)
# Display the image to us
cv.imshow('Gridboard', img)
# Exit on any key
cv.waitKey(0)
cv.destroyAllWindows()
| 2.796875
| 3
|
src/infrastructure/translators/holiday_translator.py
|
gabrielleandro0801/holidays-importer
| 0
|
12782207
|
<reponame>gabrielleandro0801/holidays-importer
from datetime import datetime
from typing import Any
from src.domain.holiday import Holiday
DATE: dict = {
'FROM': '%Y-%m-%d',
'TO': '%Y/%m/%d'
}
def format_date(date: str) -> str:
formatted_date: datetime = datetime.strptime(date, DATE["FROM"])
return formatted_date.strftime(DATE["TO"])
class HolidayTranslator:
def __init__(self):
pass
def translate(self, holiday: dict) -> Holiday or None:
category: str or None = holiday["type"].upper()
return Holiday(
date=format_date(holiday["date"]),
name=holiday["name"],
category=category
)
def clean(self, holiday: Holiday) -> Holiday:
if holiday is not None:
return holiday
def create_holiday_translator() -> Any:
return lambda: HolidayTranslator()
| 3.40625
| 3
|
setup.py
|
alekbuza/python-sgetopt
| 1
|
12782208
|
<filename>setup.py
#!/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from os import path
from setuptools import setup
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
project_long_description = f.read()
with open(path.join(here, "LICENSE"), encoding="utf-8") as f:
project_license = f.read()
setup(
name="sgetopt",
version="0.0.1",
packages=["sgetopt"],
python_requires=">=3.6",
url="https://github.com/alekbuza/python-sgetopt",
license=project_license,
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Operating System :: POSIX",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries",
],
description="Simplified implementation of getopt (Unix C-style) "
"command-line arguments parser as Python decorator",
long_description=project_long_description,
long_description_content_type="text/markdown",
)
| 1.351563
| 1
|
tests/repository/test_memory.py
|
kobibleu/easyrepo
| 0
|
12782209
|
<filename>tests/repository/test_memory.py
from typing import Optional
import pytest
from pydantic import BaseModel
from easyrepo.model.paging import PageRequest
from easyrepo.repository.memory import MemoryRepository
class TestModel(BaseModel):
id: Optional[int]
name: str
class DictRepo(MemoryRepository[dict]):
pass
class ModelRepo(MemoryRepository[TestModel]):
pass
@pytest.fixture
def dict_repo():
repo = DictRepo()
repo._data = {
1: {"id": 1, "name": "entity1"},
2: {"id": 2, "name": "entity2"},
3: {"id": 3, "name": "entity3"}
}
yield repo
@pytest.fixture
def model_repo():
repo = ModelRepo()
yield repo
def test_count(dict_repo):
assert dict_repo.count() == 3
def test_delete_all(dict_repo):
dict_repo.delete_all()
assert dict_repo.count() == 0
def test_delete_all_by_id(dict_repo):
dict_repo.delete_all_by_id([1, 2])
assert dict_repo.count() == 1
def test_delete_by_id(dict_repo):
dict_repo.delete_by_id(1)
assert dict_repo.count() == 2
def test_exists_by_id(dict_repo):
assert dict_repo.exists_by_id(1)
assert not dict_repo.exists_by_id(4)
def test_find_all(dict_repo):
assert len(dict_repo.find_all()) == 3
def test_find_page(dict_repo):
res = dict_repo.find_page(PageRequest.of_size(2))
assert len(res.content) == 2
assert res.total_elements == 3
def test_find_all_by_id(dict_repo):
assert len(dict_repo.find_all_by_id([1, 2])) == 2
def test_find_by_id(dict_repo):
assert dict_repo.find_by_id(1)["name"] == "entity1"
assert dict_repo.find_by_id(4) is None
def test_save_dict_type(dict_repo):
res = dict_repo.save({"name": "entity4"})
assert res["id"] == 4
res["name"] = "entity4bis"
res = dict_repo.save(res)
assert res["name"] == "entity4bis"
def test_save_pydantic_model_type(model_repo):
res = model_repo.save(TestModel(name="entity1"))
assert res.id == 1
res.name = "entity1bis"
res = model_repo.save(res)
assert res.name == "entity1bis"
def test_save_unexpected_type(dict_repo):
with pytest.raises(ValueError):
dict_repo.save(1)
def test_save_dict_type_list(dict_repo):
res = dict_repo.save_all([
{"id": 3, "name": "entity3bis"},
{"name": "entity4"},
{"name": "entity5"}
])
assert len(res) == 3
assert len(dict_repo.find_all()) == 5
def test_save_pydantic_model_type_list(model_repo):
res = model_repo.save_all([
TestModel(name="entity1"),
TestModel(name="entity2"),
TestModel(name="entity3")
])
assert len(res) == 3
assert len(model_repo.find_all()) == 3
| 2.234375
| 2
|
pie4t/assist.py
|
beardad1975/pie4t
| 1
|
12782210
|
<reponame>beardad1975/pie4t
from time import time
import arcade
import pymunk
from pymunk.vec2d import Vec2d
from . import common
class DotMark:
def __init__(self, x=0, y=0):
self.enabled = False
self.x = x
self.y = y
self.timestamp = time()
def lazy_setup(self):
self.shape_list = arcade.ShapeElementList()
s = arcade.create_ellipse(0, 0 , 20, 20, (255,255,255,120))
self.shape_list.append(s)
vs = arcade.create_rectangle(0,0, 3, 20, arcade.color.BLACK )
hs = arcade.create_rectangle(0,0, 20, 3, arcade.color.BLACK )
self.shape_list.append(vs)
self.shape_list.append(hs)
def update_pos(self, x, y):
self.x = x
self.y = y
self.timestamp = time()
self.enabled = True
def draw(self):
if self.enabled:
if time() - self.timestamp < common.ASSIST_MARK_PERIOD:
self.shape_list.center_x = self.x
self.shape_list.center_y = self.y
self.shape_list.angle = 0 # no rotation
self.shape_list.draw()
else: # expired
self.enabled = False
class SegmentAddAssist:
def __init__(self):
self._enabled = False
self.first_clicked = False
self.first_x = 0
self.first_y = 0
self.second_x = 0
self.second_y = 0
self.dirty = False
def enable(self):
self._enabled = True
self.first_clicked = False
self.first_x = 0
self.first_y = 0
self.second_x = 0
self.second_y = 0
self.dirty = False
cur = common.stage.get_system_mouse_cursor('crosshair')
common.stage.set_mouse_cursor(cur)
common.stage.模擬暫停 = True
def disable(self):
self._enabled = False
common.stage.set_mouse_cursor(None)
common.stage.模擬暫停 = False
if self.dirty:
common.stage.save_terrain()
@property
def enabled(self):
return self._enabled
# def lazy_setup(self):
# #self.shape_list = arcade.ShapeElementList()
# pass
def click(self, x, y):
if not self.first_clicked:
# first click
self.first_x = x
self.first_y = y
self.first_clicked = True
else: # second click
self.second_x = x
self.second_y = y
self.first_clicked = False
if not (self.first_x == self.second_x and self.first_y == self.second_y):
common.stage.新增線段((self.first_x,self.first_y),
(self.second_x, self.second_y),
common.SEG_THICKNESS)
self.dirty = True
def draw(self):
if self._enabled and self.first_clicked:
fx = self.first_x
fy = self.first_y
mx = common.stage.mouse_x
my = common.stage.mouse_y
arcade.draw_line(fx, fy, mx, my,arcade.color.GREEN ,common.SEG_THICKNESS)
class SegmentRemoveAssist:
def __init__(self):
self._enabled = False
self.dirty = False
self.hover_segment = None
self.seg_filter = pymunk.ShapeFilter(mask=common.CATE_SEGMENT)
def enable(self):
self._enabled = True
self.dirty = False
self.hover_segment = None
cur = common.stage.get_system_mouse_cursor('help')
common.stage.set_mouse_cursor(cur)
common.stage.模擬暫停 = True
def disable(self):
self._enabled = False
common.stage.set_mouse_cursor(None)
common.stage.模擬暫停 = False
if self.dirty:
common.stage.save_terrain()
@property
def enabled(self):
return self._enabled
# def lazy_setup(self):
# #self.shape_list = arcade.ShapeElementList()
# pass
def draw(self):
if self._enabled:
if self.hover_segment:
a = self.hover_segment.a
b = self.hover_segment.b
thickness = self.hover_segment.thickness
arcade.draw_line(a[0], a[1], b[0], b[1],
arcade.color.RED, thickness)
def click(self, x, y):
if self._enabled:
if self.hover_segment:
common.stage.移除(self.hover_segment)
self.hover_segment = None
self.dirty = True
def update_hover(self,x, y):
if self._enabled:
query = common.stage.space.point_query_nearest((x,y), 3, self.seg_filter)
if query:
self.hover_segment = query.shape.obj
else:
self.hover_segment = None
class ArrowAssist:
def __init__(self):
#self.mode_turn_on = False # turn on by user
self.enabled = False # drawing
#self.vector = 0
self.start_x = 0
self.start_y = 0
# self.mouse_x = 0
# self.mouse_y = 0
def start(self, pos=None):
#if self.mode_turn_on :
if pos is None :
self.start_x = common.stage.mouse_x
self.start_y = common.stage.mouse_y
else:
pos = Vec2d(pos)
self.start_x = pos.x
self.start_y = pos.y
self.enabled = True
# def update_mouse_pos(self, x, y):
# if self.mode_turn_on and self.enabled:
# self.mouse_x = x
# self.mouse_y = y
def launch(self):
if self.enabled:
self.enabled = False
# def lazy_setup(self):
# pass
def draw(self):
if self.enabled :
sx = self.start_x
sy = self.start_y
mx = common.stage.mouse_x
my = common.stage.mouse_y
length = self.vector.length
#print("length: ",self.vector.length )
if length > 40:
line_v = self.vector
line_v.length -= 30
# line
arcade.draw_line(sx, sy, sx + line_v.x, sy + line_v.y,
arcade.color.RED ,common.SEG_THICKNESS*3)
# triangle
#if length
if length < 500:
degree_delta = 15 - length / 50
else:
degree_delta = 5
left_v = self.vector
left_v.length -= 30
left_v.angle_degrees += degree_delta
right_v = self.vector
right_v.length -= 30
right_v.angle_degrees -= degree_delta
arcade.draw_triangle_filled(mx, my,
sx + left_v.x, sy + left_v.y,
sx + right_v.x, sy + right_v.y,
arcade.color.RED
)
@property
def vector(self):
sx = self.start_x
sy = self.start_y
mx = common.stage.mouse_x
my = common.stage.mouse_y
return Vec2d(mx - sx, my - sy)
@property
def start_pos(self):
return Vec2d(self.start_x, self.start_y)
class CoordinateAssist:
def __init__(self):
self._enabled = False
self.shape_element = None
self.text_list = []
self.coor_start = 0
self.win_width = common.stage.win_width
self.win_height = common.stage.win_height
self.font = ('C:/Windows/Fonts/msjh.ttc','arial')
upper_bound = max(self.win_width, self.win_height)
upper_bound = (upper_bound + 99) // 100 * 100
self.coor_end = upper_bound
self.coor_step = 50
self.label_step = 100
def enable(self):
self._enabled = True
def disable(self):
self._enabled = False
@property
def enabled(self):
return self._enabled
def lazy_setup(self):
self.shape_element = arcade.ShapeElementList()
i = 0
for y in range(self.coor_start, self.coor_end + self.coor_step, self.coor_step):
l = arcade.create_line(0, y, self.win_width ,y , arcade.color.ANTIQUE_BRONZE , 1)
self.shape_element.append(l)
r = arcade.create_rectangle(0, y, 8, 3, arcade.color.WHITE_SMOKE)
self.shape_element.append(r)
# Draw the x labels.
i = 0
for x in range(self.coor_start, self.coor_end + self.coor_step, self.coor_step):
l = arcade.create_line(x, 0, x ,self.win_height , arcade.color.ANTIQUE_BRONZE , 1)
self.shape_element.append(l)
r = arcade.create_rectangle(x, 0, 3, 8, arcade.color.WHITE_SMOKE)
self.shape_element.append(r)
# ---------------------------
for y in range(self.coor_start, self.coor_end + self.label_step, self.label_step):
t = arcade.Text(f"{y}", 5, y+3, arcade.color.WHITE_SMOKE, 12, anchor_x="left",
anchor_y="center", font_name=self.font)
self.text_list.append(t)
i += 1
i = 1
for x in range(self.coor_start + self.label_step, self.coor_end + self.label_step, self.label_step):
t = arcade.Text( f"{x}" , x, 5, arcade.color.WHITE_SMOKE, 12, anchor_x="center",
anchor_y="bottom", font_name=self.font)
self.text_list.append(t)
i += 1
t = arcade.Text("Y", 25, common.stage.win_height-25, arcade.color.WHITE_SMOKE, 18,
anchor_x="center", anchor_y="center", font_name=self.font)
self.text_list.append(t)
t = arcade.Text("X", common.stage.win_width-25, 25, arcade.color.WHITE_SMOKE, 18,
anchor_x="center", anchor_y="center", font_name=self.font)
self.text_list.append(t)
def draw(self):
if self._enabled:
self.shape_element.center_x = 0
self.shape_element.center_y = 0
self.shape_element.angle = 0
self.shape_element.draw()
for t in self.text_list:
t.draw()
# i = 0
# for y in range(self.coor_start, self.coor_end + self.label_step, self.label_step):
# arcade.draw_text(f"{y}", 5, y+3, arcade.color.WHITE_SMOKE, 12, anchor_x="left",
# anchor_y="center", font_name=self.font)
# i += 1
# i = 1
# for x in range(self.coor_start + self.label_step, self.coor_end + self.label_step, self.label_step):
# arcade.draw_text( f"{x}" , x, 5, arcade.color.WHITE_SMOKE, 12, anchor_x="center",
# anchor_y="bottom", font_name=self.font)
# i += 1
# arcade.draw_text("Y", 25, common.stage.win_height-25, arcade.color.WHITE_SMOKE, 18,
# anchor_x="center", anchor_y="center", font_name=self.font)
# arcade.draw_text("X", common.stage.win_width-25, 25, arcade.color.WHITE_SMOKE, 18,
# anchor_x="center", anchor_y="center", font_name=self.font)
| 2.46875
| 2
|
api/rules.py
|
poldracklab/bids-core
| 1
|
12782211
|
import fnmatch
from . import jobs
from . import config
log = config.log
#
# {
# At least one match from this array must succeed, or array must be empty
# "any": [
# ["file.type", "dicom" ] # Match the file's type
# ["file.name", "*.dcm" ] # Match a shell glob for the file name
# ["file.measurements", "diffusion" ] # Match any of the file's measurements
# ["container.measurement", "diffusion" ] # Match the container's primary measurment
# ["container.has-type", "bvec" ] # Match the container having any file (including this one) with this type
# ]
#
# All matches from array must succeed, or array must be empty
# "all": [
# ]
#
# Algorithm to run if both sets of rules match
# "alg": "dcm2nii"
# }
#
MATCH_TYPES = [
'file.type',
'file.name',
'file.measurements',
'container.measurement',
'container.has-type'
]
# TODO: replace with default rules, which get persisted, maintained, upgraded, and reasoned intelligently
HARDCODED_RULES = [
{
'alg': 'dicom_mr_classifier',
'all': [
['file.type', 'dicom']
]
},
{
'alg': 'dcm_convert',
'all': [
['file.type', 'dicom']
]
},
{
'alg': 'qa-report-fmri',
'all': [
['file.type', 'nifti']
]
}
]
def _log_file_key_error(file_, container, error):
log.warning('file ' + file_.get('name', '?') + ' in container ' + str(container.get('_id', '?')) + ' ' + error)
def eval_match(match_type, match_param, file_, container):
"""
Given a match entry, return if the match succeeded.
"""
# Match the file's type
if match_type == 'file.type':
try:
return file_['type'] == match_param
except KeyError:
_log_file_key_error(file_, container, 'has no type key')
return False
# Match a shell glob for the file name
elif match_type == 'file.name':
return fnmatch.fnmatch(file_['name'], match_param)
# Match any of the file's measurements
elif match_type == 'file.measurements':
try:
return match_param in file_['measurements']
except KeyError:
_log_file_key_error(file_, container, 'has no measurements key')
return False
# Match the container's primary measurment
elif match_type == 'container.measurement':
return container['measurement'] == match_param
# Match the container having any file (including this one) with this type
elif match_type == 'container.has-type':
for c_file in container['files']:
if match_param in c_file['measurements']:
return True
return False
raise Exception('Unimplemented match type ' + match_type)
def eval_rule(rule, file_, container):
"""
Decide if a rule should spawn a job.
"""
# Are there matches in the 'any' set?
must_match = len(rule.get('any', [])) > 0
has_match = False
for match in rule.get('any', []):
if eval_match(match[0], match[1], file_, container):
has_match = True
break
# If there were matches in the 'any' array and none of them succeeded
if must_match and not has_match:
return False
# Are there matches in the 'all' set?
for match in rule.get('all', []):
if not eval_match(match[0], match[1], file_, container):
return False
return True
def create_jobs(db, container, container_type, file_):
"""
Check all rules that apply to this file, and enqueue the jobs that should be run.
Returns the algorithm names that were queued.
"""
job_list = []
# Get configured rules for this project
rules = get_rules_for_container(db, container)
# Add hardcoded rules that cannot be removed or changed
for hardcoded_rule in HARDCODED_RULES:
rules.append(hardcoded_rule)
for rule in rules:
if eval_rule(rule, file_, container):
alg_name = rule['alg']
input = jobs.create_fileinput_from_reference(container, container_type, file_)
jobs.queue_job(db, alg_name, input)
job_list.append(alg_name)
return job_list
# TODO: consider moving to a module that has a variety of hierarchy-management helper functions
def get_rules_for_container(db, container):
"""
Recursively walk the hierarchy until the project object is found.
"""
if 'session' in container:
session = db.sessions.find_one({'_id': container['session']})
return get_rules_for_container(db, session)
elif 'project' in container:
project = db.projects.find_one({'_id': container['project']})
return get_rules_for_container(db, project)
else:
# Assume container is a project, or a collection (which currently cannot have a rules property)
return container.get('rules', [])
| 2.1875
| 2
|
experiments/webcam_pose2csv.py
|
Nao-Y1996/situation_recognition
| 0
|
12782212
|
<filename>experiments/webcam_pose2csv.py
import argparse
import logging
import time
import os
import csv
import datetime
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
import sys
# sys.path.remove('/opt/ros/melodic/lib/python2.7/dist-packages')
import cv2
logger = logging.getLogger('TfPoseEstimator-WebCam')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='tf-pose-estimation realtime webcam')
parser.add_argument('--camera', type=int, default=0)
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
parser.add_argument('--model', type=str, default='mobilenet_thin',
help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
parser.add_argument('--tensorrt', type=str, default="False",
help='for tensorrt process.')
args = parser.parse_args()
logger.debug('initialization %s : %s' %
(args.model, get_graph_path(args.model)))
w, h = model_wh(args.resize)
if w > 0 and h > 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(
w, h), trt_bool=str2bool(args.tensorrt))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(
432, 368), trt_bool=str2bool(args.tensorrt))
logger.debug('cam read+')
cam = cv2.VideoCapture(args.camera)
ret_val, image = cam.read()
logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0]))
# pose情報のcsv保存用の設定
dir_here = os.path.dirname(os.path.abspath(__file__))
# base_dir = '/home/kubotalab-hsr/Desktop/webcamera_pose_data'
base_dir = dir_here + '/data/'
dt_now = datetime.datetime.now()
new_dir_path = str(dt_now)[0:16].replace(' ', '-').replace(':', '-')
save_dir = base_dir + new_dir_path
os.makedirs(save_dir+'/images/')
pose_par_second_path = save_dir + '/index_per_second.csv'
f = open(pose_par_second_path, 'w')
f.close
pose_path = save_dir + '/pose.csv'
f = open(pose_path, 'w')
f.close
# 動画ファイル保存用の設定
camera_w = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH))
camera_h = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
# print(camera_h, camera_w)
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# video = cv2.VideoWriter(base_dir + '/output.mov', fourcc, 30, (camera_w,camera_h))
elasped_time = 0
frame_num = 0 # 何フレーム目か
index_pose_par_second = [] # 1秒ごとにその時何フレーム目かを保存する配列
while True:
processing_start = time.time()
ret_val, image = cam.read()
# 骨格推定を実行
humans = e.inference(image, resize_to_default=(
w > 0 and h > 0), upsample_size=args.resize_out_ratio)
all_pose_data = []
# 人間がいるとき
if len(humans) != 0:
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
# poseを取得
for human in humans:
pose_data = []
for part_index in range(18):
try:
part = human.body_parts[part_index]
pose_data.extend(
# [int(part.x*camera_w), int(part.y*camera_h), round(part.score, 4)])
[round(part.x, 4), round(part.y,4), round(part.score, 4)])
except:
pose_data.extend([0.0, 0.0, 0.0])
all_pose_data.extend(pose_data)
# 毎フレームposeを記録する
with open(pose_path, 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(all_pose_data)
# 人間がいないとき
else:
all_pose_data = np.zeros(18*3)
with open(pose_path, 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(all_pose_data)
# 1秒ごとに何フレーム目か記録
if elasped_time > 1.0:
with open(pose_par_second_path, 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([frame_num])
elasped_time = 0
cv2.imwrite(save_dir + '/images/' + str(frame_num) + '.png', image)
# cv2.putText(image,"frame: : %f" % frame,(5, 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
# フレームの表示
cv2.imshow('tf-pose-estimation result', image)
# image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
# cv2.putText(image,"FPS: %f" % (1.0 / processing_time),(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
# cv2.imshow('tf-pose-estimation result', image)
# cv2.imwrite(save_dir + '/images/' + str(count) + '.png',image)
processing_time = time.time() - processing_start
elasped_time += processing_time
# fps = 1.0 / processing_time
frame_num += 1
if cv2.waitKey(1) == 27:
break
cam.release()
cv2.destroyAllWindows()
| 2.4375
| 2
|
access-benty-fields.py
|
aibhleog/arXiv-order
| 0
|
12782213
|
'''
Script used to pull voting information from benty-fields.com.
Must be logged in to access the benty-fields.
NOTES:
1) benty-fields mostly organizes paper suggestions based upon voting
history and chosen preferences (machine learning involved), so these
voting totals can be considered as a control sample (in a way?).
2) Additionally, can only see the total votes for the "most popular"; the
total votes per paper is not information available through the search.
--> (so smaller sample than VoxCharta votes)
3) "last year" not an option
THOUGHT: how new is benty-fields?
'''
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from datetime import datetime
import threading, time, getpass, sys, subprocess
import pandas as pd
import numpy as np
from datetime import datetime as dt
from datetime import timedelta
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# amount of time to wait
timeit = 2 # seconds
# ------------------------ #
# -- creating dataframe -- #
# ------------------------ #
df_dtypes = {'id':str,'total_votes':int}
# will be creating them inside the outermost for loop
# ------------------------ #
logmein = True # option to log into benty-fields account
# opening browser & going to benty-fields
if logmein == True:
# pulling information to access arXiv account
username = '<EMAIL>'
password = '<PASSWORD>!'
#username = input('\nBenty-Fields username: ')
assert len(username) > 0, "Need to provide a username"
#password = input('Benty-Fields password: ')
assert len(password) > 0, "Need to provide account password"
driver = webdriver.Firefox()
driver.get("https://www.benty-fields.com/login")
assert "Login" in driver.title
# finding log in cells
usern = driver.find_element_by_id("email")
passw = driver.find_element_by_id("password")
usern.clear()
passw.clear()
# adding log in info
usern.send_keys(username)
passw.send_keys(password)
# locating "Log In" button
buttons = driver.find_element_by_class_name("modal-footer")
login = buttons.find_element_by_xpath("//button[@type='submit' and contains(.,'Login')]")
login.click()
else:
driver = webdriver.Firefox()
driver.get("https://www.benty-fields.com/most_popular")
# going to Most Popular page
driver.get("https://www.benty-fields.com/most_popular")
frequencies = ['last week','last month','last 6 months']
freq_days = [7,30,180]
for freq in frequencies: # running through frequencies
print(f'''\n---------------------------------------
Looking at Most Popular: {freq}
---------------------------------------\n''')
df = pd.DataFrame({'id':[],'total_votes':[]}) # dataframe for the frequency
idx = frequencies.index(freq)
if freq != 'last week': # have to change frequency
period = driver.find_element_by_xpath("//button[@data-toggle='dropdown']")
period.click() # this works
# THIS WORKS!!! Can I tell you how long this took me to figure out.......
loc = f"//ul[@class='dropdown-menu inner']/li[contains(.,'{freq}')]"
last_month = driver.find_element_by_xpath(loc)
last_month.click() # this works
time.sleep(5) # let it load
# ... just realized I could have just used the URL
# most_popular/1?period=180 # where period == number of days
# -- most popular votes -- #
# ------------------------ #
i = 0
for page in range(1,50): # start with page 1, go to page 50
# going page by page for at least 20 pages
items = driver.find_elements_by_class_name("paper")
# running through the posts to pull out arXiv ID
for item in items:
print(f"{i}) ",end=' ')
arxiv_id = item.get_attribute('id') # "paper####.#####v#"
arxiv_id = arxiv_id.lstrip('paper')
arxiv_id = arxiv_id.rsplit('v')[0]
print(arxiv_id,end='\t')
# total votes
votes = item.find_element_by_tag_name("h3").text
votes = votes.rsplit('Votes ')[1].rsplit(')')[0] # pulling out just vote count
votes = int(votes) # just because
print(f"{votes} votes")
# adding value to dataframe
filler_df = pd.DataFrame({'id':[arxiv_id],'total_votes':[votes]})
df = df.append(filler_df,ignore_index=True)
i += 1
# going to the next page using the link (instead of clicking the buttons)
next_page = f"https://www.benty-fields.com/most_popular/{page+1}?period={freq_days[idx]}"
driver.get(next_page)
# saving dataframe
freq_dash = freq.replace(' ','-')
df_dtypes = {'id':str,'total_votes':int}
sub_df = pd.read_csv(f'votes_benty-fields/benty-fields_voting-{freq_dash}.txt',sep='\t',dtype=df_dtypes) # reading in to add
df = df.astype(df_dtypes) # to make sure column dtypes don't change
# appending on data
final_df = sub_df.append(df,ignore_index=True)
# checking for duplicates
ids = set(final_df.id.values) # creates 'set' of unique values
if len(ids) != len(final_df): # SO checking for duplicates added in to table
print(f'\nLength of sub_df: \t\t\t\t\t{len(sub_df)}')
print(f'Length of df: \t\t\t\t\t\t{len(df)}')
print(f'Length of combined df: \t\t\t\t\t{len(final_df)}')
final_df.drop_duplicates(inplace=True,subset='id',keep='last') # want most up-to-date #'s
print(f'Length of final_df after dropping id duplicates: \t{len(final_df)}')
else:
print(f'\nNo duplicates, check passed.')
final_df.to_csv(f'votes_benty-fields/benty-fields_voting-{freq_dash}.txt',sep='\t',index=False)
print(f"\nData saved to 'benty-fields_voting-{freq_dash}.txt'",end='\n\n')
# Wait until before closing browser (so we can see the "pycon" search)
time.sleep(timeit)
driver.close()
| 3.09375
| 3
|
src/traingame/__version__.py
|
timolesterhuis/traingame
| 0
|
12782214
|
<filename>src/traingame/__version__.py
__title__ = "traingame"
__description__ = "Train your own AI and race it!"
__url__ = ""
__version__ = "0.4.0"
__author__ = "<NAME>"
__author_email__ = "<EMAIL>"
| 1.3125
| 1
|
Conditional/Lista 1/Thiago/11.py
|
Vitor-ORB/algorithms-and-programming-1-ufms
| 7
|
12782215
|
<reponame>Vitor-ORB/algorithms-and-programming-1-ufms
# Recebe o horário em h m s divididos apenas por espaço.
def entrada():
h, m, s = map(int, input(
"Insira o horário separado apenas por espaço: ").split())
return h, m, s
# Calcula a diferença entre os horários de começo e fim.
def calculo(h1, m1, s1, h2, m2, s2):
s = s2 - s1
m = m2 - m1
h = h2 - h1
return h, m, s
h1, m1, s1 = entrada()
h2, m2, s2 = entrada()
if s1 > s2:
s2 = s2 + 60
m2 = m2 - 1
if m1 > m2:
m2 = m2 + 60
h2 = h2 - 1
if h1 > h2:
h2 = h2 + 24
hFinal, mFinal, sFinal = calculo(h1, m1, s1, h2, m2, s2)
print(f"O jogo durou: {hFinal} {mFinal} {sFinal}")
| 3.953125
| 4
|
exercise_report_slack/util/slack_api_util.py
|
yamap55/exercise_report_slack
| 0
|
12782216
|
<filename>exercise_report_slack/util/slack_api_util.py
"""Slack APIを操作する関数群"""
from time import sleep
from typing import Any, Callable, Dict, List, Optional
from exercise_report_slack.settings import client
from slack_sdk.web.slack_response import SlackResponse
def get_channel_id(name: str) -> str:
"""
指定されたチャンネルのチャンネルIDを取得
Parameters
----------
name : str
チャンネル名
Returns
-------
str
チャンネルID
Raises
-------
ValueError
存在しないチャンネル名の場合
"""
# https://api.slack.com/methods/conversations.list
try:
option = {}
next_cursor = "DUMMY" # whileを1度は回すためダミー値を設定
while next_cursor:
response = client.conversations_list(**option).data
target_channnels = [
channel["id"] for channel in response["channels"] if channel["name"] == name
]
if target_channnels:
return target_channnels[0]
# チャンネルが多い場合は1度で全てを取得できない
# 尚、メッセージ取得系と異なり「has_more」属性は持っていない
next_cursor = response["response_metadata"]["next_cursor"]
option["cursor"] = next_cursor
sleep(1) # need to wait 1 sec before next call due to rate limits
raise ValueError("not exists channel name.")
except StopIteration:
raise ValueError("not exists channel name.")
def get_user_name(user_id: str) -> str:
"""
指定されたユーザIDのユーザ名を取得
Parameters
----------
user_id : str
ユーザID
Returns
-------
str
ユーザ名
Raises
-------
SlackApiError
存在しないユーザIDの場合
"""
# https://api.slack.com/methods/users.info
return client.users_info(user=user_id)["user"]["real_name"]
def post_message(
channel_id: str, text: str, thread_ts: Optional[str] = None, mention_users: List[str] = []
) -> Dict[str, Any]:
"""
指定されたチャンネルにメッセージをポスト
Parameters
----------
channel_id : str
チャンネルID
text : str
ポストする内容
thread_ts : Optional[str], optional
リプライとしたい場合に指定するタイムスタンプ, by default None
mention_users : List[str], optional
メンションを指定するユーザID
テキストの先頭に空白区切りで付与します
2人以上が指定されている場合はメンション後に改行を追加します, by default []
Returns
-------
Dict[str, Any]
ポストしたメッセージのデータ
"""
# https://api.slack.com/methods/chat.postMessage
mentions = [f"<@{u}>" for u in mention_users]
mentions_postfix = "\n" if len(mentions) > 1 else ""
send_message = " ".join(mentions) + mentions_postfix + text
res = client.chat_postMessage(channel=channel_id, text=send_message, thread_ts=thread_ts)
return res.data
def __get_all_message_by_iterating(
func: Callable[..., SlackResponse], option: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""繰り返し処理ですべてのメッセージを取得"""
response = func(**option).data
messages_all = response["messages"]
while response["has_more"]:
sleep(1) # need to wait 1 sec before next call due to rate limits
response = func(**option, cursor=response["response_metadata"]["next_cursor"]).data
messages = response["messages"]
messages_all = messages_all + messages
return messages_all
def get_channel_message(channel_id: str, oldest: float, latest: float) -> List[Dict[str, Any]]:
"""
指定されたチャンネルのメッセージを取得
Parameters
----------
channel_id : str
チャンネルID
oldest : float
取得を行う最初の時間
latest : float
取得を行う最後の時間
Returns
-------
List[Dict[str, Any]]
指定されたチャンネルのメッセージ
"""
# https://api.slack.com/methods/conversations.history
option = {"channel": channel_id, "oldest": oldest, "latest": latest}
return __get_all_message_by_iterating(client.conversations_history, option)
def get_replies(channel_id: str, message: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
指定されたメッセージのリプライを取得
Parameters
----------
channel_id : str
チャンネルID
message : Dict[str, Any]
リプライを取得する対象のメッセージ
Returns
-------
List[Dict[str, Any]]
リプライメッセージ
リプライがついていない場合は空のリスト
"""
# https://api.slack.com/methods/conversations.replies
if "thread_ts" not in message:
return []
option = {"channel": channel_id, "ts": message["thread_ts"]}
return __get_all_message_by_iterating(client.conversations_replies, option)
| 2.921875
| 3
|
src/models/board.py
|
hadizakialqattan/sudoku
| 6
|
12782217
|
import pygame
# local import
from base.base import GUIBase
from solver.solver import Solver
class Board(GUIBase):
"""Screen Board
:param board: Sudoku board represent as two dimensional array
:type board: list
:param size: screen dimensions (pixels) (width, height)
:type size: tuple
:param screen: pygame screen
:type screen: pygame.Surface
"""
def __init__(self, size: tuple, board: list, screen: pygame.Surface):
super().__init__((size[1], size[1], size[0] - size[1]), screen)
self.__board = board
self.__solver = Solver(self)
# create squares list
self.__squares = [
[
Square(
self.__board[c][r],
(r, c),
(self.size[0], self.size[2]),
self.screen,
True if self.__board[c][r] == 0 else False,
)
for r in range(9)
]
for c in range(9)
]
self.__selected = None
self.__wrong = None
@property
def wrong(self):
"""wrong property (getter)"""
return self.__wrong
@property
def squares(self) -> list:
"""squares property (getter)"""
return self.__squares
def update_squares(self):
"""squares property (updatter)"""
# iterate over all squares
for r in range(9):
for c in range(9):
# update values
self.__squares[r][c].value = self.__board[r][c]
self.__squares[r][c].pencil = 0
@property
def board(self) -> list:
"""board property (getter)"""
return self.__board
@board.setter
def board(self, board: list):
"""board property (setter) & update squares
:param board: Sudoku board represent as two dimensional array
:type board: list
"""
# set new board
self.__board = board
# reinit squares
self.__squares = [
[
Square(
self.__board[c][r],
(r, c),
(self.size[0], self.size[2]),
self.screen,
True if self.__board[c][r] == 0 else False,
)
for r in range(9)
]
for c in range(9)
]
@property
def selected(self) -> tuple:
"""selected property (getter)"""
return self.__selected
@selected.setter
def selected(self, pos: tuple):
"""selected property (setter) & refresh squares
:param pos: selected square position (row, column)
:type pos: tuple
"""
if not self.__wrong:
# clear previous selection
if self.__selected != None:
self.__squares[self.__selected[0]][self.__selected[1]].selected = False
if pos:
# select new square
self.__selected = pos
self.__squares[self.__selected[0]][self.__selected[1]].selected = True
else:
# set selected to None if pos out of board
self.__selected = None
@property
def get_pencil(self) -> int:
"""selected square pencil (getter)"""
# get selected square
r, c = self.__selected
return self.__squares[r][c].pencil
def set_pencil(self, value: int):
"""set pencil value
:param value: pencil value
:type value: int
"""
# get selected square
r, c = self.__selected
if self.__squares[r][c].value == 0:
self.__squares[r][c].pencil = value
@property
def get_value(self) -> int:
"""selected square value (getter)"""
# get selected square
r, c = self.__selected
return self.__squares[r][c].value
def set_value(self) -> str:
"""set square value
:returns: board state ('s' -> success, 'w' -> wrong, 'c' -> unsolvable board)
:rtype: str
"""
# get selected square
r, c = self.__selected
if self.get_value == 0:
# chock for non-0 pencil value
pencil = self.get_pencil
if pencil != 0:
# check the number match Sudoku rules
w = self.__solver.exists(self.__board, pencil, (r, c))
if w:
# change squares state to wrong (red color)
self.__squares[r][c].wrong = True
self.__squares[w[0]][w[1]].wrong = True
self.__squares[r][c].value = pencil
self.__board[r][c] = pencil
self.__wrong = w
return "w"
else:
# change set square value and return true
self.__squares[r][c].value = pencil
self.__board[r][c] = pencil
# copy board
# init copy as two dimensional array with 9 rows
copy = [[] for r in range(9)]
# iterate over all rows
for r in range(9):
# iterate over all columns
for c in range(9):
# append the num
copy[r].append(self.__board[r][c])
# check if the board unsolvable
if not self.__solver.solve(copy):
return "c"
return "s"
@property
def clear(self):
"""clear selected square value"""
# get selected square
r, c = self.__selected
# clear square value and pencil
self.__squares[r][c].value = 0
self.__squares[r][c].pencil = 0
self.__board[r][c] = 0
# change wrong state
if self.__wrong:
self.__squares[r][c].wrong = False
self.__squares[self.__wrong[0]][self.__wrong[1]].wrong = False
self.__wrong = None
@property
def isfinished(self):
"""return true if there's no more empty squares else false
:returns: true if there's no more empty squares else false
:rtype: bool
"""
return not self.__solver.nextpos(self.board)
def set_sq_value(self, value: int, pos: tuple):
"""change square value by position
:param value: new square value
:type value: int
:param pos: square position
:type pos: tuple
"""
self.__squares[pos[0]][pos[1]].value = value
def draw(self):
"""Draw the board on the screen"""
# Draw squares
# iterate over all rows
for r in range(9):
# iterate over all columns
for c in range(9):
# draw square value
self.__squares[c][r].draw()
# Draw grid
# set space between squares
space = self.size[0] // 9
# drow 10 lines HvV
for r in range(10):
# set line weight (bold at the end of 3*3 area)
w = 4 if r % 3 == 0 and r != 0 else 1
# draw horizontal line (screen, (color), (start_pos), (end_pos), width)
pygame.draw.line(
self.screen,
(72, 234, 54),
(self.size[2], r * space),
(self.size[0] + self.size[2], r * space),
w,
)
# draw vertical line (screen, (color), (start_pos), (end_pos), width)
pygame.draw.line(
self.screen,
(72, 234, 54),
(r * space + self.size[2], 0),
(r * space + self.size[2], self.size[1]),
w,
)
class Square(GUIBase):
"""Board squeares
:param value: square display number
:type value: int
:param pos: square position (row, column)
:type pos: tuple
:param width: screen width and left gap (width, gap)
:type width: tuple
:param screen: pygame screen
:type screen: pygame.Surface
:param changeable: changeabllity
:type changeable: bool
"""
def __init__(
self,
value: int,
pos: tuple,
widthpos: tuple,
screen: pygame.Surface,
changeable: bool,
):
super().__init__(0, screen)
self.__value = value
self.__pos = pos
self.__widthpos = widthpos
self.__pencil = 0
self.__selected = False
self.__changeable = changeable
self.__wrong = False
@property
def changeable(self):
"""changeable property (getter)"""
return self.__changeable
@property
def selected(self) -> tuple:
"""selected property (getter)"""
return self.__selected
@selected.setter
def selected(self, v: bool):
"""selected property (setter)
:param v: selected value
:type v: bool
"""
self.__selected = v
@property
def value(self) -> int:
"""value property (getter)"""
return self.__value
@value.setter
def value(self, value: int):
"""value property (setter)
:param value: square value
:type value: int
"""
if self.__changeable:
self.__value = value
@property
def pencil(self) -> int:
"""pencil property (getter)"""
return self.__pencil
@pencil.setter
def pencil(self, value: int):
"""pencil property (setter)
:param value: pencil square value
:type value: int
"""
if self.__changeable:
self.__pencil = value
@property
def wrong(self):
"""wrong property (getter)"""
return self.__wrong
@wrong.setter
def wrong(self, w: bool):
"""wrong property (setter)
:param w: wrong value
:type w: bool
"""
self.__wrong = w
def draw(self):
"""Draw square value"""
# set space between squares
space = self.__widthpos[0] // 9
# set actuall square position on the screen
r, c = self.__pos[0] * space + self.__widthpos[1], self.__pos[1] * space
# fill unchangeable square background
if not self.__changeable:
sqsize = self.__widthpos[0] // 9
# draw rectangle (frame)
pygame.draw.rect(self.screen, (10, 30, 0), ((r, c), (sqsize, sqsize)))
# check for none 0's squares
if self.__value != 0:
font = pygame.font.Font("../assets/Rubik-font/Rubik-Regular.ttf", 38)
# set color
rgb = (72, 234, 54) if not self.__wrong else (234, 72, 54)
# create suface object
v = font.render(str(self.__value), 1, rgb)
# draw in on the screen
self.screen.blit(
v,
(
int(r + ((space / 2) - (v.get_width() / 2))),
int(c + ((space / 2) - (v.get_height() / 2))),
),
)
elif self.__pencil != 0:
font = pygame.font.Font("../assets/Rubik-font/Rubik-Regular.ttf", 30)
# create suface object
v = font.render(str(self.__pencil), 1, (2, 164, 0))
# draw in on the screen
self.screen.blit(
v,
(
int(r + ((space / 2) - (v.get_width() / 2)) - 20),
int(c + ((space / 2) - (v.get_height() / 2)) - 15),
),
)
# draw bold outline around selected square
if self.__selected:
# draw rectangle (frame)
pygame.draw.rect(self.screen, (52, 214, 34), ((r, c), (space, space)), 3)
| 3.453125
| 3
|
web-app/src/models/quizgame/QuizGameQuizGameQuestionScore.py
|
philipp-mos/iubh-quiz-app
| 0
|
12782218
|
from ... import db
class QuizGameQuizGameQuestionScore(db.Model):
__tablename__ = 'quiz_game_quiz_game_question_scores'
id = db.Column(
db.Integer,
primary_key=True
)
quizgame_id = db.Column(
db.Integer(),
db.ForeignKey(
'quiz_games.id',
ondelete='CASCADE'
)
)
quizgamequestionscore_id = db.Column(
db.Integer(),
db.ForeignKey(
'quiz_game_question_scores.id',
ondelete='CASCADE'
)
)
| 2.40625
| 2
|
train_model.py
|
Yunodo/Seq2Seq
| 1
|
12782219
|
<gh_stars>1-10
import os
import numpy as np
import trax
import jax
import jax.numpy as np
from prepare_data import text_data_generator
from model import biLSTMwithAttn
from training_loop import create_training_loop
%load_ext tensorboard
vocab_file = 'en_8k.subword' # one of: 'subword', 'sentencepiece', 'char'
BATCH_SIZE = 64
n_training_steps = 30000
input_vocab_size = 8000
target_vocab_size = 8000
d_model = 512 # depth of inner layers of LSTM
def txt_reader(filepath):
# Reads a csv file and returns Python tuple generator
df = pd.read_csv(filepath)
while True:
try:
for row in df.itertuples(index = False):
yield(row[0], row[1]) # (input, target)
except StopIteration:
df = pd.read_csv(filepath)
train_data, eval_data = txt_reader('train.csv'), txt_reader('test.csv')
data_generator = text_data_generator(vocab_file, BATCH_SIZE)
train_stream, eval_stream = data_generator(train_data), data_generator(eval_data)
"""
Parameters for the model are automatically initialized with Trax training loop.
If there're some pretrained weights, use:
model.init_from_file(os.path.join(output_dir,'model.pkl.gz'),weights_only=True)
"""
Net = biLSTMwithAttn(input_vocab_size = input_vocab_size,
target_vocab_size = 8000, d_model = d_model, mode = 'train')
output_dir = "/content/drive/My Drive/paraphrase'" # weights, logs etc. stored
loop = create_training_loop(Net, train_stream,
eval_stream, output_dir)
%tensorboard --logdir output_dir # displaying TensorBoard training results
loop.run(n_training_steps)
"""
How to resume checkpoint if training broke down:
loop.load_checkpoint(directory=output_dir, filename="model.pkl.gz")
"""
| 2.390625
| 2
|
opencamlib-read-only/src/get_revision.py
|
play113/swer
| 0
|
12782220
|
import commands
# import subprocess
import sys
import os
f = open("revision.h", "w")
# change directory to src/
os.chdir(sys.argv[1])
# run svnversion to get the revision number
rev_number = commands.getoutput("svnversion")
print "get_revision.py: got revision: ",rev_number
# commands is deprecated, should use subprocess instead?
# rev_number = subprocess.Popen(["svnversion"], stdout=subprocess.PIPE).communicate()[0]
# current_dir = commands.getoutput("pwd")
# print "get_revision.py: writing revision.h to ", current_dir
rev_string = '#define OCL_REV_STRING "OpenCAMLib Revision ' + rev_number + '"'
f.write(rev_string)
f.close()
| 2.546875
| 3
|
siswrapper/__init__.py
|
mario33881/siswrapper
| 1
|
12782221
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ._version import __version__
from .siswrapper import Siswrapper
| 1.132813
| 1
|
asset-metadata/src/main.py
|
Screenly/playground
| 0
|
12782222
|
"""
Example headers
{
"X-Screenly-hostname": "srly-jmar75ko6xp651j",
"X-Screenly-screen-name": "dizzy cherry",
"X-Screenly-location-name": "Cape Town",
"X-Screenly-hardware": "x86",
"X-Screenly-version": "v2",
"X-Screenly-lat": "-33.925278",
"X-Screenly-lng": "18.423889",
"X-Screenly-tags": "srly-jmar75ko6xp651j,custom-label"
}"""
from os import environ
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/")
def render_metadata_headers():
return render_template(
"metadata_headers.html",
headers=request.headers,
apiKey=environ.get("GOOGLE_MAPS_API_KEY"),
)
if __name__ == "__main__":
app.run(host="0.0.0.0")
| 2.546875
| 3
|
pages/themes/beginners/exceptions/Task_and_HW/get_user_data_exception_handling.py
|
ProgressBG-Python-Course/ProgressBG-VC2-Python
| 0
|
12782223
|
<reponame>ProgressBG-Python-Course/ProgressBG-VC2-Python<filename>pages/themes/beginners/exceptions/Task_and_HW/get_user_data_exception_handling.py<gh_stars>0
def get_string_from_user(msg):
while True:
try:
user_name = input(msg)
except:
print("\n***Oops, something went wrong! Try again!\n")
else:
return user_name
def get_number_from_user(msg):
while True:
try:
number = int(input(msg))
except ValueError:
print("***Enter a number, please!")
except:
print("\n***Oops, something went wrong! Try again!\n")
else:
return number
user_data = {}
user_data["name"] = get_string_from_user("Enter your name, please: ")
user_data["height"] = get_number_from_user("I need to know your height in centimetres: ")
user_data["weight"] = get_number_from_user("And your weight in kilograms: ")
print(user_data)
| 3.21875
| 3
|
physicellToXYZ.py
|
hallba/PhysicellVMD
| 0
|
12782224
|
import numpy as np
from scipy.io import loadmat # this is the SciPy module that loads mat-files
import matplotlib.pyplot as plt
from datetime import datetime, date, time
import pandas as pd
import math
'''
#Contains "basic_agents"
cells = loadmat("output00000064_cells.mat")
#contains "multiscale_microenvironment"
microenvironment = loadmat("output00000064_microenvironment0.mat")
'''
def filenameFormat(n):
result = "data/output{:08d}_cells_physicell.mat".format(n)
return(result)
def writeXYZ(firstFrame,lastFrame):
with open("organoid.xyz",'w') as output:
for frame in range(firstFrame,lastFrame+1):
#Transposed so we can iterate
allCells = loadmat(filenameFormat(frame))['cells'].transpose()
#filter out absorbed organoids
cellData = allCells[(allCells[:,4] > 0)]
print(len(cellData),file=output)
print("Physicell organoid simulation",file=output)
for cell in cellData:
'''
Modified xyz format- could be extended further
x y z user2 user3 vx vy user
user is radius- important for size rendering
'''
x = cell[1]
y = cell[2]
z = cell[3]
#convert from the volume
radius = (3*cell[4]/(4*math.pi))**(1/3) #user
#why are the volumes in frame 64 mostly 2494? Dead, single cell organoids?
#suspect diffcells and stem cells are mixed up- 2494 organoids have 0 diffcells and 1 stemcell
#what are 0 volume organoids?
stemcells = cell[29] #user2
diffcells = cell[28] #user3
attached = cell[27]
fusions = cell[31]
#do not change the position of x,y,z and radius
result = "CA {x} {y} {z} {user} {user3} {user2} {vx} {vy}".format(x=x,y=y,z=z,user2=stemcells,vx=diffcells,user3=attached,vy=fusions,user=radius)
print(result,file=output)
writeXYZ(0,65)
| 2.59375
| 3
|
webdriver_test_tools/testcase/ie.py
|
connordelacruz/webdriver-test-tools
| 5
|
12782225
|
<filename>webdriver_test_tools/testcase/ie.py<gh_stars>1-10
from .webdriver import *
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class IETestCase(WebDriverTestCase):
"""Implementation of :class:`WebDriverTestCase
<webdriver_test_tools.testcase.webdriver.WebDriverTestCase>` using Internet
Explorer webdriver
`Driver info
<https://github.com/SeleniumHQ/selenium/wiki/InternetExplorerDriver>`__
.. note::
This driver is platform-specific, so it is disabled by default. It can
be enabled in ``<test_package>/config/browser.py`` by setting the
corresponding value in :attr:`BrowserConfig.ENABLED_BROWSERS
<webdriver_test_tools.config.browser.BrowserConfig.ENABLED_BROWSERS>`
to ``True``.
"""
DRIVER_NAME = 'Internet Explorer'
SHORT_NAME = 'ie'
CAPABILITIES = DesiredCapabilities.INTERNETEXPLORER.copy()
# Set version
CAPABILITIES['version'] = '11'
def driver_init(self):
return self.WebDriverConfig.get_ie_driver()
| 2.15625
| 2
|
news/tests.py
|
VanirLab/VOS
| 0
|
12782226
|
from django.core import mail
from django.test import TestCase, TransactionTestCase
from django.contrib.auth.models import User
from news.models import News
class NewsTest(TestCase):
def test_feed(self):
response = self.client.get('/feeds/news/')
self.assertEqual(response.status_code, 200)
def test_sitemap(self):
response = self.client.get('/sitemap-news.xml')
self.assertEqual(response.status_code, 200)
def test_news_sitemap(self):
response = self.client.get('/news-sitemap.xml')
self.assertEqual(response.status_code, 200)
def test_newsitem(self):
response = self.client.get('/news/404', follow=True)
self.assertEqual(response.status_code, 404)
class NewsCrud(TransactionTestCase):
def setUp(self):
password = '<PASSWORD>'
self.user = User.objects.create_superuser('admin',
'<EMAIL>',
password)
self.client.post('/login/', {
'username': self.user.username,
'password': password
})
def tearDown(self):
News.objects.all().delete()
self.user.delete()
def create(self, title='Bash broken', content='Broken in [testing]', announce=False):
data = {
'title': title,
'content': content,
}
if announce:
data['send_announce'] = 'on'
return self.client.post('/news/add/', data, follow=True)
def testCreateItem(self):
title = 'Bash broken'
response = self.create(title)
self.assertEqual(response.status_code, 200)
news = News.objects.first()
self.assertEqual(news.author, self.user)
self.assertEqual(news.title, title)
def testView(self):
self.create()
news = News.objects.first()
response = self.client.get(news.get_absolute_url())
self.assertEqual(response.status_code, 200)
def testRedirectId(self):
self.create()
news = News.objects.first()
response = self.client.get('/news/{}'.format(news.id), follow=True)
self.assertEqual(response.status_code, 200)
def testSendAnnounce(self):
title = 'New glibc'
self.create(title, announce=True)
self.assertEqual(len(mail.outbox), 1)
self.assertIn(title, mail.outbox[0].subject)
def testPreview(self):
response = self.client.post('/news/preview/', {'data': '**body**'}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual('<p><strong>body</strong></p>', response.content.decode())
| 2.265625
| 2
|
utils.py
|
ktho22/tacotron2
| 0
|
12782227
|
<reponame>ktho22/tacotron2
import numpy as np
from scipy.io.wavfile import read
import torch
import time
import os
from os.path import join, exists
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def load_filepaths_and_text(filename, split="|"):
with open(filename, encoding='utf-8') as f:
filepaths_and_text = [line.strip().split(split) for line in f]
return filepaths_and_text
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
def set_savepath(message):
today = time.strftime('%y%m%d')
savepath = join('result', '{}-{}'.format(today, message))
if not exists(savepath):
os.makedirs(savepath)
os.system('cp -r *.py dataset {}'.format(savepath))
elif message=='test':
os.system("rm -rf {}/*".format(savepath))
else:
input("Path already exists, wish to continue?")
os.system("rm -rf {}/*".format(savepath))
os.system('cp -r *.py dataset {}'.format(savepath))
return savepath
| 2.21875
| 2
|
_pkg_Studios/pkgStudio_kuhq/mod_KuWrite.py
|
tianlunjiang/_NukeStudio_v2
| 6
|
12782228
|
<reponame>tianlunjiang/_NukeStudio_v2<filename>_pkg_Studios/pkgStudio_kuhq/mod_KuWrite.py
def _version_():
ver='''
version 1.1
- add more render types
- fix precomp render directory problem (`render/` to `elements/`)
- add render dialog UI with threading
- fixing naming error in render file, avoid `..._comp_comp_...`
- automatically check latest version
version 1.0
- auto set directory and type and versioning
'''
return ver
__VERSION__=1.1
#------------------------------------------------------------------------------
#-Import Modules
#------------------------------------------------------------------------------
import nuke, nukescripts, os
import slate
import shutil
import subprocess
from kputl import joinPath
#------------------------------------------------------------------------------
#-Global Variables
#------------------------------------------------------------------------------
RENDER_TYPE = ['comp', 'precomp', 'lookdev', 'backplate', 'lgtSlap', 'delivery']
NO_PASSNAME = ['comp', 'lookdev', 'backplate', 'lgtSlap', 'delivery']
TYPE_CONFIG = {
'comp': {'DIR': slate.RENDER_DIR, 'CH': 'rgb', 'EXT': 'exr', 'CS': 0},
'precomp': {'DIR': slate.ELEMENTS_DIR, 'CH': 'all', 'EXT': 'exr', 'CS': 0},
'lookdev': {'DIR': slate.RENDER_DIR, 'CH': 'rgb', 'EXT': 'exr', 'CS': 0},
'backplate': {'DIR': slate.ELEMENTS_DIR, 'CH': 'rgba', 'EXT': 'jpg', 'CS': 'Output - sRGB'},
'lgtSlap': {'DIR': slate.RENDER_DIR, 'CH': 'rgb', 'EXT': 'exr', 'CS': 0},
'delivery': {'DIR': slate.DELIVERY_DIR, 'CH': 'rgb', 'EXT': 'exr', 'CS': 'Output - sRGB'}
}
PADDING_VER, PADDING_FRAME = slate.SHOW_CONFIG['padding']
PADDING_FRAME = '%0{}d'.format(int(PADDING_FRAME))
VERSION_LABEL = {
'delivery': "<font color='Gold'>(version matching latest comp version)</>",
'new_version': "<font color='LimeGreen'>(new version)</>"
}
#------------------------------------------------------------------------------
#-Supporting Function
#------------------------------------------------------------------------------
def image_group_path(verType, ver):
'''create image group path, with scriptcopy
@verType: verType of the file (str)
@ver: file version (int)
return: [out_exr, out_scriptcopy] full file path (list)
'''
# Type without passname
just_verType = verType.split('_')[0]
versionName, versionDir, out_file, out_scriptcopy = None,None,None,None
def _makeFile(versionName, subdir):
'''sets out values
@versionName: name of the version, format differs between delivery and non-delivery, ie. show_shot_type_pass_v###
@subdir: sub-directory under version directory
'''
# Version directory
versionDir = joinPath(TYPE_CONFIG[just_verType]['DIR'], versionName)
out_file = joinPath(
versionDir,
subdir,
'{versionName}.{frame}.{ext}'.format(versionName=versionName,frame=PADDING_FRAME,ext=TYPE_CONFIG[just_verType]['EXT'])
)
# Output file scriptcopy
out_scriptcopy = joinPath(versionDir,'%s.scriptcopy.nk' % versionName)
return out_file, out_scriptcopy
# Set values
if just_verType == 'delivery':
# Delivery version
versionName = '{show}_{shot}_comp_v{ver}.delivery'.format(
show=slate.SHOW_CONFIG['kp_show'],
shot=slate.SHOT_CONFIG['kp_shot'],
ver=str(ver).zfill(int(PADDING_VER))
)
out_file, out_scriptcopy = _makeFile(versionName, TYPE_CONFIG[just_verType]['EXT'])
else:
# Non-Delivery version
versionName = '{show}_{shot}_{verType}_v{ver}'.format(
show=slate.SHOW_CONFIG['kp_show'],
shot=slate.SHOT_CONFIG['kp_shot'],
verType=verType,
ver=str(ver).zfill(int(PADDING_VER))
)
out_file, out_scriptcopy = _makeFile(versionName, verType)
print("\n==========\n- version:\n%s\n\n- file:\n%s\n\n- scriptcopy:\n%s\n==========" % (versionName, out_file, out_scriptcopy))
return out_file, out_scriptcopy
def get_versions(verType):
'''get the latest version of selected version type
@verType: version type (str)
return: verLatest (int)
'''
just_verType = verType.split('_')[0]
if just_verType == 'delivery':
_dir2list = TYPE_CONFIG['comp']['DIR']
_type2look = '_comp_'
else:
_dir2list = TYPE_CONFIG[just_verType]['DIR']
_type2look = '_'+verType+'_'
ls_ver = [int(v.split('_v')[1]) for v in os.listdir(_dir2list) if _type2look in v]
ls_ver = [0] if len(ls_ver)==0 else ls_ver
return ls_ver
def get_type(node):
''' get the type of the node'''
out_type = node['mu_type'].value()
out_type = out_type+'_'+node['tx_passname'].value() if out_type not in NO_PASSNAME else out_type
return out_type
def set_versions(node, out_type):
'''sets the list of version and labels
@node: node (obj)
@out_type: ouput type (str)
'''
if out_type == 'delivery':
verNew = max(get_versions(out_type))
node['mu_ver'].setEnabled(False)
node['tx_versionLabel'].setValue(VERSION_LABEL['delivery'])
else:
verNew = max(get_versions(out_type))+1
node['mu_ver'].setEnabled(True)
node['tx_versionLabel'].setValue(VERSION_LABEL['new_version'])
ls_ver = [str(v) for v in range(1, verNew+1)]
node['mu_ver'].setValues(ls_ver)
# node['mu_ver'].setValue(max(ls_ver))
def set_file(node, out_type, ver):
'''set output file and settings
@node: node (obj)
@out_type: output type (str)
@ver: version to set (int)
'''
out_file, out_scriptcopy = image_group_path(out_type, ver)
node['file'].setValue(out_file)
node['tx_scriptcopy'].setValue(out_scriptcopy)
node['channels'].setValue(TYPE_CONFIG[out_type.split('_')[0]]['CH'])
node['colorspace'].setValue(TYPE_CONFIG[out_type.split('_')[0]]['CS'])
# print "==========\n\n%s\n\n%s-%s\n\n==========" % (
# file, nuke.Root()['first_frame'].value(), nuke.Root()['last_frame'].value()
# )
def set_write(node):
'''sets settings for given node
@node: (node) Write node
'''
out_type = get_type(node)
set_versions(node, out_type)
verLatest = max(get_versions(node['mu_type'].value()))+1
node['mu_ver'].setValue(verLatest)
set_file(node, out_type, verLatest)
#------------------------------------------------------------------------------
#-Main Function
#------------------------------------------------------------------------------
def KuWrite():
'''Adding inputs for auto generate output path'''
node = nuke.createNode('Write')
node.setName('KuWrite')
node.knob('file').setEnabled(False)
k_pipeline = nuke.Text_Knob('kupipeline', 'kuWrite', 'kuWrite') # Ku Pipeline Identifier
k_tab = nuke.Tab_Knob('tb_KuWrite', 'KuWrite')
k_title = nuke.Text_Knob('tx_title', '', '<h1><br>KuWrite</h1>')
k_show = nuke.Text_Knob('tx_show', '<h3>show</h3>', slate.SHOW_CONFIG['kp_show'])
k_shot = nuke.Text_Knob('tx_shot', '<h3>shot</h3>', slate.SHOT_CONFIG['kp_shot'])
k_type = nuke.Enumeration_Knob('mu_type', '<h3>type</h3>', RENDER_TYPE)
k_elements = nuke.String_Knob('tx_passname', '_', 'NewPass')
k_ver = nuke.Enumeration_Knob('mu_ver', '<h3>version</h3>', [' '])
k_latest = nuke.Text_Knob('tx_versionLabel', '', VERSION_LABEL['new_version'])
k_div_title = nuke.Text_Knob('divider','')
k_div = nuke.Text_Knob('divider',' ')
k_set = nuke.PyScript_Knob('bt_set', '<b>⭮ Check Versions</b>', 'mod_KuWrite.set_write(nuke.thisNode())')
k_render = nuke.PyScript_Knob('bt_render', '<b>Render</b>', 'mod_KuWrite.render_node(nuke.thisNode())')
k_scriptcopy = nuke.String_Knob('tx_scriptcopy', 'scriptcopy dir', '')
k_elements.clearFlag(nuke.STARTLINE)
k_elements.setVisible(False)
k_set.setFlag(nuke.STARTLINE)
k_latest.clearFlag(nuke.STARTLINE)
k_render.clearFlag(nuke.STARTLINE)
k_pipeline.setVisible(False)
k_scriptcopy.setVisible(False)
for k in [k_tab, k_pipeline, k_title, k_div_title, k_show, k_shot, k_type, k_elements, k_ver, k_latest, k_div, k_set, k_render, k_scriptcopy]:
node.addKnob(k)
mod = os.path.basename(__file__).split('.')[0]
node['knobChanged'].setValue('%s.onChange()' % mod)
set_write(node)
#------------------------------------------------------------------------------
#-Knob Changed
#------------------------------------------------------------------------------
def onChange():
'''knob change function to call'''
n = nuke.thisNode()
k = nuke.thisKnob()
if k.name() in ['mu_type','tx_passname']:
verType = get_type(n)
set_versions(n, verType)
if n['mu_type'].value() not in NO_PASSNAME:
n['tx_passname'].setVisible(True)
else:
n['tx_passname'].setVisible(False)
n['mu_ver'].setValue(max(n['mu_ver'].values()))
set_file(n, verType, n['mu_ver'].value())
if k.name() in ['mu_ver']:
verType = get_type(n)
if n['mu_ver'].value() == max(n['mu_ver'].values()):
n['tx_versionLabel'].setVisible(True)
else:
n['tx_versionLabel'].setVisible(False)
set_file(n, verType, n['mu_ver'].value())
if k.name() == 'mu_type' and k.value() == 'delivery':
n['tile_color'].setValue(12533759)
else:
n['tile_color'].setValue(0)
#------------------------------------------------------------------------------
#-Rendering Node
#------------------------------------------------------------------------------
def render_node(node):
'''launch render'''
out_path = node['file'].value()
out_scriptcopy = node['tx_scriptcopy'].value()
startFrame = int(nuke.Root()['first_frame'].value())
endFrame = int(nuke.Root()['last_frame'].value())
def _soloWrite(sel_node, all_enabled_write, mode='solo'):
if mode == 'solo':
for s in all_enabled_write:
if s != sel_node.name():
print('node disabled---' + s )
nuke.toNode(s)['disable'].setValue(True)
elif mode == 'reverse':
for s in all_enabled_write:
nuke.toNode(s)['disable'].setValue(False)
print('node enabled---' + s )
askMessage = "Render Node: %s\nFile: %s\nFramerage: %s-%s\n" % (
node.name(), os.path.basename(node['file'].value()), startFrame, endFrame)
c = nuke.ask(askMessage)
if c:
if not os.path.exists(os.path.dirname(out_path)):
p = os.path.dirname(out_path)
os.makedirs(p)
print("out path created --- %s" % p)
if not os.path.exists(os.path.dirname(out_scriptcopy)):
s = os.path.dirname(out_scriptcopy)
os.makedirs(s)
print("out scriptcopy created --- %s" % s)
all_enabled_write = [n.name() for n in nuke.allNodes('Write') if n['disable'].value() == False]
_soloWrite(node, all_enabled_write, mode='solo')
nuke.scriptSave()
thisScript_path = nuke.scriptName()
shutil.copy2(thisScript_path, out_scriptcopy)
# nuke.render(node, startFrame, endFrame)
exe = joinPath(nuke.EXE_PATH).replace('/', '\\')
cmd_str = """start cmd /k "{exe}" -t -m 22 -xi {script} {start}-{end}""".format(
exe=exe,
node=node.name(),
script=thisScript_path,
start=startFrame,
end=endFrame
)
subprocess.Popen(cmd_str, shell=True)
_soloWrite(node, all_enabled_write, mode='reverse')
else:
print("user cancelled")
# KuWrite()
# render_node(nuke.toNode('KuWrite1'))
| 1.484375
| 1
|
tests/functional/adapter/test_basic.py
|
dbt-labs/dbt-snowflake
| 51
|
12782229
|
<reponame>dbt-labs/dbt-snowflake<filename>tests/functional/adapter/test_basic.py
import pytest
from dbt.tests.adapter.basic.test_base import BaseSimpleMaterializations
from dbt.tests.adapter.basic.test_singular_tests import BaseSingularTests
from dbt.tests.adapter.basic.test_singular_tests_ephemeral import (
BaseSingularTestsEphemeral,
)
from dbt.tests.adapter.basic.test_empty import BaseEmpty
from dbt.tests.adapter.basic.test_ephemeral import BaseEphemeral
from dbt.tests.adapter.basic.test_incremental import BaseIncremental
from dbt.tests.adapter.basic.test_generic_tests import BaseGenericTests
from dbt.tests.adapter.basic.test_snapshot_check_cols import BaseSnapshotCheckCols
from dbt.tests.adapter.basic.test_snapshot_timestamp import BaseSnapshotTimestamp
from dbt.tests.adapter.basic.test_adapter_methods import BaseAdapterMethod
from dbt.tests.adapter.basic.test_docs_generate import BaseDocsGenerate
from dbt.tests.adapter.basic.expected_catalog import base_expected_catalog, no_stats
from tests.functional.adapter.expected_stats import snowflake_stats
class TestSimpleMaterializationsSnowflake(BaseSimpleMaterializations):
pass
class TestSingularTestsSnowflake(BaseSingularTests):
pass
class TestSingularTestsEphemeralSnowflake(BaseSingularTestsEphemeral):
pass
class TestEmptySnowflake(BaseEmpty):
pass
class TestEphemeralSnowflake(BaseEphemeral):
pass
class TestIncrementalSnowflake(BaseIncremental):
pass
class TestGenericTestsSnowflake(BaseGenericTests):
pass
class TestSnapshotCheckColsSnowflake(BaseSnapshotCheckCols):
pass
class TestSnapshotTimestampSnowflake(BaseSnapshotTimestamp):
pass
class TestBaseAdapterMethodSnowflake(BaseAdapterMethod):
@pytest.fixture(scope="class")
def equal_tables(self):
return ["MODEL", "EXPECTED"]
class TestDocsGenerateSnowflake(BaseDocsGenerate):
@pytest.fixture(scope="class")
def get_role(self, project):
return project.run_sql('select current_role()', fetch='one')[0]
@pytest.fixture(scope="class")
def expected_catalog(self, project, get_role):
return base_expected_catalog(
project,
role=get_role,
id_type="NUMBER",
text_type="TEXT",
time_type="TIMESTAMP_NTZ",
view_type="VIEW",
table_type="BASE TABLE",
model_stats=no_stats(),
seed_stats=snowflake_stats(),
case=lambda x: x.upper(),
case_columns=False,
)
| 1.757813
| 2
|
sim/lib/inference.py
|
MPI-SWS/simulator
| 0
|
12782230
|
import time
import bisect
import numpy as np
import pandas as pd
import networkx as nx
import scipy
import scipy.optimize
import scipy as sp
import os
import matplotlib.pyplot as plt
import random
from bayes_opt import BayesianOptimization
from bayes_opt.util import UtilityFunction, Colours
import asyncio
import threading
import json
import tornado.ioloop
import tornado.httpserver
from tornado.web import RequestHandler
import requests
from lib.priorityqueue import PriorityQueue
from lib.dynamics import DiseaseModel
from lib.mobilitysim import MobilitySimulator
from bayes_opt import BayesianOptimization
from lib.parallel import *
SIMPLIFIED_OPT = True
def format_opt_to_sim(opt_params, n_betas):
'''
Convert bayes_opt parameter format into our format
'''
if SIMPLIFIED_OPT:
return {
'betas' : [opt_params['beta'] for _ in range(n_betas)],
'alpha': opt_params['alpha'],
'mu': opt_params['mu']
}
else:
sim_params = {
'betas' : [None for _ in range(n_betas)],
'alpha': None,
'mu': None
}
for k, v, in opt_params.items():
if 'betas' in k:
sim_params['betas'][int(k[5:])] = v
else:
sim_params[k] = v
return sim_params
def format_sim_to_opt(sim_params):
'''
Convert our format into bayes opt format
'''
if SIMPLIFIED_OPT:
return {
'beta' : sim_params['betas'][0],
'alpha': sim_params['alpha'],
'mu': opt_params['mu']
}
else:
opt_params = {'betas' + str(i) : p for i, p in enumerate(sim_params['betas'])}
opt_params.update({
'alpha': sim_params['alpha'],
'mu': sim_params['mu']
})
return opt_params
def convert_timings_to_daily(timings, time_horizon):
'''
Converts batch of size N of timings of M individuals in a time horizon
of `time_horizon` in hours into daily aggregate cases
Argument:
timings : np.array of shape (N, M)
Argument:
timings : np.array of shape (N, T / 24)
'''
if len(timings.shape) == 1:
timings = np.expand_dims(timings, axis=0)
arr = np.array([
np.sum((timings >= t * 24) &
(timings < (t + 1) * 24), axis=1)
for t in range(0, int(time_horizon // 24))]).T
return arr
def convert_timings_to_cumulative_daily(timings, time_horizon):
'''
Converts batch of size N of timings of M individuals in a time horizon
of `time_horizon` in hours into daily cumulative aggregate cases
Argument:
timings : np.array of shape (N, M)
Argument:
timings : np.array of shape (N, T / 24)
'''
if len(timings.shape) == 1:
timings = np.expand_dims(timings, axis=0)
cumulative = np.array([
np.sum((timings < (t + 1) * 24), axis=1)
for t in range(0, int(time_horizon // 24))]).T
return cumulative
def loss_daily(predicted_confirmed_times, targets_daily, time_horizon, power=2.0):
'''
Daily loss:
total squared error between average predicted daily cases and true daily cases
'''
# predicted_confirmed_daily = convert_timings_to_daily(predicted_confirmed_times, time_horizon)
predicted_confirmed_daily = convert_timings_to_cumulative_daily(predicted_confirmed_times, time_horizon)
ave_predicted_confirmed_daily = predicted_confirmed_daily.mean(axis=0)
loss = np.power(np.abs(ave_predicted_confirmed_daily - targets_daily), power).mean()
return loss
def multimodal_loss_daily(preds, weights, targets, time_horizon, power=2.0):
'''
Multimodal Daily loss:
Same as loss_daily but considering several weighted metrics (e.g. positive, recovered, deceased)
'''
loss = 0
for w, pred, target in zip(weights, preds, targets):
# pred = convert_timings_to_daily(pred, time_horizon)
pred = convert_timings_to_cumulative_daily(pred, time_horizon)
ave_pred = pred.mean(axis=0)
loss += w * np.power(np.abs(ave_pred - target), power).mean()
return loss
def make_loss_function(mob_settings, distributions, targets, time_horizon, param_bounds,
initial_seeds, testing_params, random_repeats, num_site_types,
cpu_count, measure_list, loss, num_people, site_loc, home_loc, c, extra_params=None):
'''
Returns function executable by optimizer with desired loss
'''
with open(f'logger_{c}.txt', 'w+') as logfile:
logfile.write(f'Log run: seed = {c}\n\n')
def f(opt_params):
# convert bayes_opt parameter format into our format
sim_params = format_opt_to_sim(opt_params, n_betas=num_site_types)
# launch in parallel
summary = launch_parallel_simulations(
mob_settings=mob_settings,
distributions=distributions,
random_repeats=random_repeats,
cpu_count=cpu_count,
params=sim_params,
initial_seeds=initial_seeds,
testing_params=testing_params,
measure_list=measure_list,
max_time=time_horizon,
num_people=num_people,
site_loc=site_loc,
home_loc=home_loc,
verbose=False)
if loss == 'loss_daily':
return summary.state_started_at['posi']
elif loss == 'multimodal_loss_daily':
return (summary.state_started_at['posi'], summary.state_started_at['resi'], summary.state_started_at['dead'])
else:
raise ValueError('Unknown loss function')
if loss == 'loss_daily':
def loss_function(**kwargv):
predicted_confirmed_times = f(kwargv)
l = loss_daily(
predicted_confirmed_times=predicted_confirmed_times,
targets_daily=targets,
time_horizon=time_horizon,
power=2.0)
ave_pred = convert_timings_to_cumulative_daily(
predicted_confirmed_times, time_horizon).mean(axis=0)
loginfo = f'{-l} ' + str(kwargv) + '\n'
with open(f'logger_{c}.txt', 'a') as logfile:
logfile.write(loginfo)
# bayes_opt maximizes
return - l
return loss_function
elif loss == 'multimodal_loss_daily':
# here `extra_params` are weights
if extra_params:
weights = extra_params['weights']
else:
weights = np.ones(len(targets))
def loss_function(**kwargv):
preds = f(kwargv)
l = multimodal_loss_daily(
preds=preds, weights=weights, targets=targets,
time_horizon=time_horizon, power=2.0)
# bayes_opt maximizes
return - l
return loss_function
else:
raise ValueError('Unknown loss function')
| 1.96875
| 2
|
example/todos/models.py
|
pawnhearts/django-reactive
| 21
|
12782231
|
<reponame>pawnhearts/django-reactive<gh_stars>10-100
from django.db import models
from django_reactive.fields import ReactJSONSchemaField
from .constants import TODO_SCHEMA, TODO_UI_SCHEMA, set_task_types
class Todo(models.Model):
"""
A collection of task lists for a todo.
"""
name = models.CharField(max_length=255)
task_lists = ReactJSONSchemaField(
help_text="Task lists",
schema=TODO_SCHEMA,
ui_schema=TODO_UI_SCHEMA,
on_render=set_task_types,
extra_css=["css/extra.css"],
extra_js=["js/extra.js"],
)
class TaskType(models.Model):
"""
A task type used to dynamically populate a todo list schema field dropdown.
"""
name = models.CharField(max_length=255)
| 2.0625
| 2
|
caelus/post/__init__.py
|
sayerhs/cpl
| 0
|
12782232
|
# -*- coding: utf-8 -*-
"""
Provides log analysis and plotting utilities
.. currentmodule: caelus.post
.. autosummary::
:nosignatures:
~funcobj.functions.PostProcessing
~logs.SolverLog
~plots.CaelusPlot
"""
from .logs import SolverLog
from .funcobj import PostProcessing
| 1.09375
| 1
|
modules/info.py
|
merlinfuchs/clancy
| 0
|
12782233
|
<filename>modules/info.py
from dbots.cmd import *
from dbots import rest
class InfoModule(Module):
@Module.command()
async def avatar(self, ctx, user: CommandOptionType.USER):
"""
Get the avatar url for an user
"""
resolved = ctx.resolved.users.get(user)
if resolved is None:
try:
resolved = await ctx.bot.http.get_user(user)
except rest.HTTPNotFound:
await ctx.respond("I'm unable to find this user :(", ephemeral=True)
await ctx.respond(f"**{resolved.name}**s **Avatar**\n{resolved.avatar_url}", ephemeral=True)
# @dc.Module.command()
async def user(self, ctx, user: CommandOptionType.USER):
"""
Get information about a user
"""
resolved = ctx.resolved.users.get(user)
if resolved is None:
try:
resolved = await ctx.bot.http.get_user(user)
except rest.HTTPNotFound:
await ctx.respond("I'm unable to find this user :(", ephemeral=True)
await ctx.respond()
# @dc.Module.command()
async def role(self, ctx, role: CommandOptionType.ROLE):
"""
Get information about a role
"""
print(ctx.resolved)
# @dc.Module.command()
async def channel(self):
"""
Get information about a channel
"""
# @dc.Module.command()
async def server(self):
"""
Get information about a server
"""
| 2.515625
| 3
|
ctf/nsec/2018/reverse/MarsAnalytica-20/solveMA.py
|
Sylhare/Flag
| 0
|
12782234
|
<filename>ctf/nsec/2018/reverse/MarsAnalytica-20/solveMA.py
# https://blog.rpis.ec/2018/05/northsec-2018-marsanalytica.html
import angr
def constrain_stdin(st):
for _ in range(19):
k = st.posix.files[0].read_from(1)
st.solver.add(k > 0x20)
st.solver.add(k < 0x7f)
st.posix.files[0].seek(0)
st.posix.files[0].length = 19
p = angr.Project("./MarsAnalytica")
s = p.factory.entry_state(add_options=angr.options.unicorn)
constrain_stdin(s)
sm = p.factory.simulation_manager(s)
sm.step(until=lambda lpg: len(lpg.active) > 1)
while len(sm.deadended) == 0:
sm.drop(stash='active', filter_func=lambda s: s != sm.active[0])
print(sm.one_active.state.posix.dumps(0))
sm.step(until=lambda lpg: len(lpg.deadended) > 1 or len(lpg.active) > 1)
| 2.421875
| 2
|
test/downloadAckunMusic.py
|
1254517211/test-1
| 5
|
12782235
|
#!/bin/env python
# coding=utf8
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
import base64
import requests
import json
import urllib
import time
import random
import datetime
import hashlib
# 获得响应头信息中的Content-Type域
def urlOpenGetHeaders(url):
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36')
page = urllib.request.urlopen(req)
html = page.getheader('Content-Type')
return html
# 获得url的源码
def urlOpen(url):
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36')
if False:
proxies = ['192.168.127.12:8123', '172.16.31.10:8123', '192.168.3.11:8118']
proxy = random.choice(proxies)
proxy_support = urllib.request.ProxyHandler({'http':proxy})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [('User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36')]
urllib.request.install_opener(opener)
page = urllib.request.urlopen(url)
else:
page = urllib.request.urlopen(req)
html = page.read()
return html
# 根据名字、后缀和url下载到本地文件夹
def download(title,post,url):
filename = title + "." +post
with open(filename, 'wb') as f:
music = urlOpen(url)
f.write(music)
def getPostStr(pKey, song_id):
rsaKey = RSA.importKey(pKey)
cipher = Cipher_pkcs1_v1_5.new(rsaKey)
encry = cipher.encrypt(song_id)
return base64.b64encode(encry)
# 获取歌曲的实际的url
def getSongRealUrl(songidVal,timeVal,md5Val):
url = 'http://www.aekun.com/api/getMusicbyid/'
r = requests.post(url, {
'songid': songidVal,
't':timeVal,
'sign':md5Val
})
return r.content
# 将需要的数据写入本地文件
def writeStrToFile(writeStr):
print(writeStr)
with open("downurl.txt","a",encoding="UTF-8") as f:
f.write(writeStr)
f.write("\n")
# 获取最新的推荐歌曲编号
def getMaxSongs():
url = "http://www.aekun.com/new/"
html = urlOpen(url).decode('utf-8')
a = html.find('<tr musicid=') + 13
b = html.find('"',a)
result = int(html[a:b])
return result
# 获取目前已经获得的最大曲目编号
def getNowSongId(songIdInt):
f = open("downurl.txt","r",encoding="UTF-8")
lines = f.readlines() #读取全部内容
for line in lines:
if line.find('|')!=-1:
line = line.split("|")
line = int(line[0])
if line > songIdInt:
songIdInt = line
return songIdInt
# 下载歌曲的主程序部分
def downloadMusicMain():
# 获取pKey
f = open('public.pem')
pKey = f.read()
f.close()
songIdInt = 3509719
songIdInt = getNowSongId(songIdInt)
songIdInt = songIdInt + 1
maxSong = getMaxSongs()
print("start from:%s,end with:%s"%(songIdInt,maxSong))
# 3505251 |10 |2015084685 |▌▌Chillout ▌▌Losing Ground Michael FK & Groundfold -----3505251.mp3
while(False):
if songIdInt > maxSong:
break
time.sleep(10)
try:
urlOpen("http://www.aekun.com/song/" + str(songIdInt))
except ConnectionResetError:
print("Error occur")
songId = str(songIdInt).encode('utf-8')
print(songId)
songidVal = getPostStr(pKey, songId)
songidVal = songidVal.decode('utf-8')
t = time.time()
t = int(round(t * 1000))
timeVal = getPostStr(pKey,str(t).encode('utf-8'))
timeVal = timeVal.decode('utf-8')
m2 = hashlib.md5()
src = str(songIdInt) + "|" + str(t)
m2.update(src.encode("utf8"))
t = m2.hexdigest()
md5Val = getPostStr(pKey,str(t).encode('utf-8'))
md5Val = md5Val.decode('utf-8')
try:
print(songidVal)
print(timeVal)
print(md5Val)
ret = getSongRealUrl(songidVal,timeVal,md5Val)
except (ConnectionError , ConnectionResetError):
print("ConnectionError")
time.sleep(3)
continue
ret = ret.decode('utf-8')
#ret = '{"state":"success","message":"ok","action":null,"data":{"url":"http://us.aekun.com/upload/75AAB77BC2D16123F9F2E8B6C68FCB8E.mp3","song_name":"就算遇到挫折、受到嘲笑,也要勇敢的向前跑!","coll":0,"singername":"小哥","singerpic":"https://m4.aekun.com/user_l_5973822_20170513135220.png"}}'
print(ret)
ret = json.loads(ret)
print(ret)
status = ret['state']
if status != 'success':
print(status)
break
downUrl = ret['data']
if isinstance(downUrl,str):
if downUrl.strip() == '':
html = urlOpen("http://www.aekun.com/song/" + str(songIdInt)).decode('utf-8')
songIdInt = songIdInt + 1
continue
elif isinstance(downUrl,dict):
pass
else:
continue
downUrl = ret['data']['url']
if downUrl is None:
continue
if downUrl.strip() == "":
continue
post = downUrl[-3:]
post = post.lower()
if post != 'mp3' and post != 'm4a':
tmp = urlOpenGetHeaders(downUrl)
if tmp.find('mp3') != -1:
post = 'mp3'
songName = ret['data']['song_name']
writeStr = "%-10s|%-50s|%-5s|%s"%(songIdInt,songName,post,downUrl)
writeStrToFile(writeStr)
songIdInt = songIdInt + 1
now = datetime.datetime.now()
now = now.strftime('%Y-%m-%d %H:%M:%S')
writeStrToFile(str(now) + '\t\t\t' + str(maxSong))
if __name__ == '__main__':
downloadMusicMain()
| 2.609375
| 3
|
fiber/__init__.py
|
ehmadzubair/django-fiber
| 0
|
12782236
|
__version__ = '1.9.dev0'
| 1.070313
| 1
|
translations/migrations/0034_auto_20200423_1115.py
|
aniruddha-adhikary/translateforsg-backend
| 2
|
12782237
|
<filename>translations/migrations/0034_auto_20200423_1115.py
# Generated by Django 3.0.5 on 2020-04-23 03:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translations', '0033_populate_ordering_categories'),
]
operations = [
migrations.AddField(
model_name='language',
name='speech_code',
field=models.CharField(blank=True, db_index=True, help_text='Speech will not be generated if blank', max_length=10),
),
migrations.AddField(
model_name='language',
name='translation_code',
field=models.CharField(blank=True, db_index=True, help_text='Translation will not be generated if blank', max_length=10),
),
]
| 1.5625
| 2
|
VSR/DataLoader/Dataset.py
|
Kadantte/VideoSuperResolution
| 1,447
|
12782238
|
<reponame>Kadantte/VideoSuperResolution
# Copyright (c) 2017-2020 <NAME>.
# Author: <NAME>
# Email: <EMAIL>
# Update: 2020 - 2 - 7
import re
from concurrent import futures
from pathlib import Path
import copy
import yaml
from .VirtualFile import ImageFile, RawFile
from ..Util import Config, to_list
try:
from yaml import FullLoader as _Loader
except ImportError:
from yaml import Loader as _Loader
IMAGE_SUF = ('PNG', 'JPG', 'JPEG', 'BMP', 'TIFF', 'TIF', 'GIF')
VIDEO_SUF = {
'NV12': 'NV12',
'YUV': 'YV12',
'YV12': 'YV12',
'NV21': 'NV21',
'YV21': 'YV21',
'RGB': 'RGB'
}
def _supported_image(x: Path):
return x.suffix[1:].upper() in IMAGE_SUF
def _supported_video(x: Path):
return x.suffix[1:].upper() in VIDEO_SUF
def _supported_suffix(x: Path):
return _supported_image(x) or _supported_video(x)
class Dataset(object):
""" Make a `dataset` object
"""
def __init__(self, *folders):
self.dirs = list(map(Path, folders))
self.recursive = True
self.glob_patterns = ('*',)
self.inc_patterns = None
self.exc_patterns = None
self.as_video = False
self.compiled = None
def use_like_video_(self):
self.as_video = True
def use_like_video(self):
d = copy.copy(self)
d.compiled = None
d.use_like_video_()
return d
def include_(self, *pattern: str):
self.glob_patterns = list(pattern)
self.inc_patterns = None
def include(self, *pattern: str):
d = copy.copy(self)
d.compiled = None
d.include_(*pattern)
return d
def include_reg_(self, *reg: str):
self.inc_patterns = [re.compile(r) for r in reg]
self.glob_patterns = ('*',)
def include_reg(self, *reg: str):
d = copy.copy(self)
d.compiled = None
d.include_reg_(*reg)
return d
def exclude_(self, *reg: str):
self.exc_patterns = [re.compile(r) for r in reg]
def exclude(self, *reg: str):
d = copy.copy(self)
d.compiled = None
d.exclude_(*reg)
return d
def compile(self):
if self.compiled:
return self.compiled
files = []
def _exc(x: Path):
if self.exc_patterns:
for reg in self.exc_patterns:
if reg.search(str(x.absolute().as_posix())):
return False
return True
def _inc(x: Path):
if self.inc_patterns:
for reg in self.inc_patterns:
if reg.search(str(x.absolute().as_posix())):
return True
return False
for folder in self.dirs:
if not Path(folder).exists():
continue
nodes = []
if folder.is_file():
# if points to a file rather than a directory
nodes.append(folder)
fn_glob = Path.rglob if self.recursive else Path.glob
for pat in self.glob_patterns:
nodes += list(fn_glob(folder, pat))
if self.inc_patterns:
nodes = filter(_inc, nodes)
files += list(filter(_exc, filter(_supported_suffix, nodes)))
image_nodes = list(filter(_supported_image, files))
if not self.as_video:
self.compiled = Container(sorted(image_nodes), self.as_video)
return self.compiled
video_nodes = list(filter(_supported_video, files))
video_nodes += list(map(lambda x: x.parent, image_nodes))
video_nodes = list(set(video_nodes)) # remove duplicated nodes
self.compiled = Container(sorted(video_nodes), self.as_video)
return self.compiled
class Container(object):
"""Frames container
"""
def __init__(self, urls, is_video: bool):
assert isinstance(urls, (list, tuple))
pool = futures.ThreadPoolExecutor(4)
fs = []
self.nodes = []
def _parse_image_node(url: Path):
if url.is_dir():
for i in filter(_supported_image, url.glob('*')):
self.nodes.append(ImageFile(i, rewind=True))
elif _supported_image(url):
self.nodes.append(ImageFile(url, rewind=True))
def _parse_video_node(url: Path):
if _supported_video(url):
size = re.findall("\\d+x\\d+", url.stem)
if size:
size = [int(x) for x in size[0].split('x')]
self.nodes.append(
RawFile(url, VIDEO_SUF[url.suffix[1:].upper()], size,
rewind=True))
elif url.is_dir():
self.nodes.append(ImageFile(url))
for j in urls:
if is_video:
fs.append(pool.submit(_parse_video_node, j))
else:
fs.append(pool.submit(_parse_image_node, j))
futures.as_completed(fs)
pool.shutdown()
self.nodes = sorted(self.nodes, key=lambda x: x.path)
def __getitem__(self, item):
return self.nodes[item]
def __len__(self):
return len(self.nodes)
@property
def capacity(self):
if not self.nodes:
return 0
pos = 0
max_sz = 0
total_frames = 0
for i, n in enumerate(self.nodes):
total_frames += n.frames
if n.size() > max_sz:
max_sz = n.size()
pos = i
shape = self.nodes[pos].shape
max_bpp = 3
return shape[0] * shape[1] * max_bpp * total_frames
def load_datasets(describe_file, key=''):
"""load dataset described in YAML file"""
def _extend_pattern(url):
_url = root / Path(url)
url_p = _url
while True:
try:
if url_p.exists():
break
except OSError:
url_p = url_p.parent
continue
if url_p == url_p.parent:
break
url_p = url_p.parent
# retrieve glob pattern
url_r = str(_url.relative_to(url_p))
if url_r == '.' and url_p.is_dir():
return str(Path(url) / '**/*')
return url
def _get_dataset(desc, use_as_video=None, name=None):
dataset = Config(name=name)
for i in desc:
if i not in ('train', 'val', 'test'):
continue
if isinstance(desc[i], dict):
hr = to_list(desc[i].get('hr'))
lr = to_list(desc[i].get('lr'))
else:
hr = to_list(desc[i])
lr = []
if use_as_video:
hr_pattern = [
x if x not in all_path and x + '[video]' not in all_path else
all_path[x + '[video]'] for x in hr]
lr_pattern = [
x if x not in all_path and x + '[video]' not in all_path else
all_path[x + '[video]'] for x in lr]
else:
hr_pattern = [x if x not in all_path else all_path[x] for x in hr]
lr_pattern = [x if x not in all_path else all_path[x] for x in lr]
hr_data = Dataset(root).include(*(_extend_pattern(x) for x in hr_pattern))
lr_data = Dataset(root).include(
*(_extend_pattern(x) for x in lr_pattern)) if lr_pattern else None
hr_data.recursive = False
if lr_data is not None:
lr_data.recursive = False
if use_as_video:
hr_data.use_like_video_()
if lr_data is not None:
lr_data.use_like_video_()
setattr(dataset, i, Config(hr=hr_data, lr=lr_data))
return dataset
datasets = Config()
with open(describe_file, 'r') as fd:
config = yaml.load(fd, Loader=_Loader)
root = Path(config["Root"])
if not root.is_absolute():
# make `root` relative to the file
root = Path(describe_file).resolve().parent / root
root = root.resolve()
all_path = config["Path"]
if key.upper() in config["Dataset"]:
return _get_dataset(config["Dataset"][key.upper()], name=key)
elif key.upper() + '[video]' in config["Dataset"]:
return _get_dataset(config["Dataset"][key.upper() + '[video]'], True,
name=key)
elif key.upper() in all_path:
return _get_dataset(Config(test=all_path[key.upper()]), name=key)
elif key.upper() + '[video]' in all_path:
return _get_dataset(Config(test=all_path[key.upper() + '[video]']), True,
name=key)
for name, value in config["Dataset"].items():
if '[video]' in name:
name = name.replace('[video]', '')
datasets[name] = _get_dataset(value, True, name=name)
else:
datasets[name] = _get_dataset(value, name=name)
for name in all_path:
if '[video]' in name:
_name = name.replace('[video]', '')
datasets[_name] = _get_dataset(Config(test=all_path[name]), True,
name=_name)
else:
datasets[name] = _get_dataset(Config(test=all_path[name]), name=name)
return datasets
| 2.203125
| 2
|
2015/Day6.py
|
trajkan/AdventOfCode
| 0
|
12782239
|
<filename>2015/Day6.py
""" Day 6: Probably a fire hazard """
import json
import re
with open('input_d6.txt') as f:
data = f.readlines()
lights_grid = []
for x in range(1000):
for y in range(1000):
lights_grid.append([x, y, 0])
xy_regex = re.compile(r'([0-9,]*) through ([0-9,]*)')
# action = ''
for instructions in data:
if 'on' in instructions:
action = 'on'
elif 'off' in instructions:
action = 'off'
elif 'toggle' in instructions:
action = 'toggle'
first_xy = xy_regex.search(instructions).group(1)
second_xy = xy_regex.search(instructions).group(2)
x1 = int(first_xy.split(',')[0])
y1 = int(first_xy.split(',')[1])
x2 = int(second_xy.split(',')[0])
y2 = int(second_xy.split(',')[1])
delta_x = x2 - x1
delta_y = y2 - y1
for x in range(x1,x1+delta_x+1):
for y in range(y1, y1+delta_y+1):
pos = x*1000 + y
if action == 'on':
lights_grid[pos] = [x, y, 1]
elif action == 'off':
lights_grid[pos] = [x, y, 0]
elif action == 'toggle':
if lights_grid[pos][2] == 0:
lights_grid[pos] = [x, y, 1]
elif lights_grid[pos][2] == 1:
lights_grid[pos] =[x, y, 0]
on = 0
for light in lights_grid:
on += light[2]
print('number of light on: ', on)
""" part 2 """
lights_grid = []
for x in range(1000):
for y in range(1000):
lights_grid.append([x, y, 0])
xy_regex = re.compile(r'([0-9,]*) through ([0-9,]*)')
# action = ''
for instructions in data:
if 'on' in instructions:
action = 'on'
elif 'off' in instructions:
action = 'off'
elif 'toggle' in instructions:
action = 'toggle'
first_xy = xy_regex.search(instructions).group(1)
second_xy = xy_regex.search(instructions).group(2)
x1 = int(first_xy.split(',')[0])
y1 = int(first_xy.split(',')[1])
x2 = int(second_xy.split(',')[0])
y2 = int(second_xy.split(',')[1])
delta_x = x2 - x1
delta_y = y2 - y1
for x in range(x1,x1+delta_x+1):
for y in range(y1, y1+delta_y+1):
pos = x*1000 + y
if action == 'on':
brightness = lights_grid[pos][2]
lights_grid[pos] = [x, y, brightness + 1]
elif action == 'off':
brightness = lights_grid[pos][2]
if brightness > 0:
lights_grid[pos] = [x, y, brightness-1]
elif action == 'toggle':
brightness = lights_grid[pos][2]
lights_grid[pos] = [x, y, brightness+2]
brightness_total = 0
for light in lights_grid:
brightness_total += light[2]
print('Total brightness:', brightness_total)
| 3.140625
| 3
|
python/beginner/sum-earnings_ljsauer.py
|
saumyasingh048/hacktoberithms
| 16
|
12782240
|
<filename>python/beginner/sum-earnings_ljsauer.py
"""
Challenge: Write a function that accepts a comma-separated
string input of earning/spending activity and returns the sum
of earning as a single int value.
Requirements: If input is empty or invalid, return 0; if spending
(negative values) is greater than earning during the cumulative
addition of values, the count should start over from the remaining
values in input.
"""
def sum_earnings():
values = input("Enter a string of whole numbers separated by commas (e.g. 1,-3,0,-4): ").split(',')
earnings = 0
for i in values:
try:
earnings = max(0, earnings + int(i))
except ValueError:
earnings = 0
break
print(earnings)
return
| 3.984375
| 4
|
src/single_byte_cipher.py
|
kaltsimon/crypto-challenges
| 0
|
12782241
|
<filename>src/single_byte_cipher.py
"""Decrypt a message that was XOR'd against a single character."""
from encode_decode import decode_hex
from fixed_xor import xor
import requests
from operator import itemgetter
def xor_with_char(bytes, char):
"""XOR a sequence of bytes with a single character."""
return xor(bytes, char * len(bytes))
def analyze_text(text):
"""Analyze the given text for the frequencies of its characters."""
# Initialize Dictionary
frequencies = {}
count = 0
for c in text:
c = c.lower()
if c in frequencies:
frequencies[c] += 1
else:
frequencies[c] = 1
count += 1
return {char: val / count for char, val in frequencies.items()}
def extract_wiki_text(wikipedia_title, language='en'):
"""Get the text of the given wikipedia article."""
base_url = ('https://' + language + '.wikipedia.org/w/api.php?action=query'
+ '&prop=extracts&format=json&explaintext='
+ '&exsectionformat=plain&titles=')
text = ''
r = requests.get(base_url + wikipedia_title)
if r.status_code == 200:
pages = r.json()['query']['pages']
for (id, page) in pages.items():
text += page['extract']
return text
def analyze_wiki_text(wikipedia_title, language='en'):
"""Analyze the text of the given Wikipedia article."""
return analyze_text(extract_wiki_text(wikipedia_title, language))
def decrypt(bytes_, guess=0):
"""Decrypt a sequence of bytes."""
wiki = analyze_wiki_text('Pineapple')
dec = bytes_.decode('utf-8')
freqs = analyze_text(dec)
item2 = itemgetter(1)
wiki = sorted(wiki.items(), key=item2, reverse=True)
freqs = sorted(freqs.items(), key=item2, reverse=True)
diffs = []
for i in range(min(len(wiki), len(freqs))):
diffs.append(abs(ord(wiki[i][0]) - ord(freqs[i][0])))
return xor_with_char(bytes_, bytes([diffs[guess]]))
def decrypt_hex(hex_string):
"""Decrypt the hex encoded string."""
return decrypt(decode_hex(hex_string))
if __name__ == '__main__':
print(decrypt_hex('1b37373331363f78151b7f2b783431333d78'
+ '397828372d363c78373e783a393b3736'))
| 3.640625
| 4
|
itscsapp/research/apps.py
|
danyRivC/itscsapp
| 0
|
12782242
|
from django.apps import AppConfig
class ResearchConfig(AppConfig):
name = "itscsapp.research"
verbose_name = "Research"
| 1.148438
| 1
|
modules/losses.py
|
michael-iuzzolino/CascadedDistillation
| 0
|
12782243
|
<gh_stars>0
"""Custom losses."""
import torch
import torch.nn as nn
def categorical_cross_entropy(pred_logits, y_true_softmax):
"""Categorical cross entropy."""
log_softmax_pred = nn.LogSoftmax(dim=1)(pred_logits)
soft_targets = y_true_softmax.detach().clone() # Stop gradient
cce_loss = -(soft_targets * log_softmax_pred).sum(dim=1).mean()
return cce_loss
class DistillationLossHandler(object):
def __init__(self, alpha, temp):
self._alpha = alpha
self._temp = temp
self._kl = nn.KLDivLoss()
def __call__(self, outputs, labels, teacher_outputs, temp_pred=None):
kl_loss = self._kl(
nn.functional.log_softmax(outputs / self._temp, dim=1),
nn.functional.softmax(teacher_outputs / self._temp, dim=1)
) * (self._alpha * self._temp * self._temp)
target_loss = nn.functional.cross_entropy(outputs, labels) * (1. - self._alpha)
loss = kl_loss + target_loss
return loss
class TD_Loss(object):
def __init__(self, n_timesteps, tau_handler, flags):
self.n_timesteps = n_timesteps
self.tau_handler = tau_handler
self.flags = flags
def __call__(self, criterion, predicted_logits, y, targets):
loss = 0
timestep_losses = torch.zeros(self.n_timesteps)
timestep_accs = torch.zeros(self.n_timesteps)
for t in range(len(predicted_logits)):
logit_i = predicted_logits[t]
# First term
sum_term = torch.zeros_like(logit_i)
t_timesteps = list(range(t+1, self.n_timesteps))
for i, n in enumerate(t_timesteps, 1):
logit_k = predicted_logits[n].detach().clone()
softmax_i = nn.functional.softmax(logit_k, dim=1)
sum_term = sum_term + self.flags.lambda_val**(i - 1) * softmax_i
# Final terms
term_1 = (1 - self.flags.lambda_val) * sum_term
term_2 = self.flags.lambda_val**(self.n_timesteps - t - 1) * y
softmax_j = term_1 + term_2
# Temp scale
logit_i = logit_i / self.flags.distillation_temperature
softmax_j = softmax_j / self.flags.distillation_temperature
# Compute loss
loss_i = criterion(pred_logits=logit_i, y_true_softmax=softmax_j)
# Tau weighted
if self.flags.tau_weighted_loss and t < self.n_timesteps - 1:
tau_i = self.tau_handler(t-1, epoch_i)
loss_i = tau_i * loss_i
# Aggregate loss
if self.flags.tdl_mode == "EWS":
loss = loss + loss_i
else:
# Ignore first timestep loss (all 0's output)
if t > 0:
loss = loss + loss_i
# Log loss item
timestep_losses[t] = loss_i.item()
# Predictions
softmax_i = nn.functional.softmax(logit_i, dim=1)
y_pred = torch.argmax(softmax_i, dim=1)
# Updates running accuracy statistics
n_correct = torch.eq(targets, y_pred).sum()
acc_i = n_correct / float(targets.shape[0])
timestep_accs[t] = acc_i
# Normalize loss
if self.flags.normalize_loss:
loss = loss / float(self.n_timesteps)
return loss, timestep_losses, timestep_accs
def compute_distillation_loss(target, teacher, alpha, temperature):
teacher_term = teacher * (alpha * temperature * temperature)
target_term = (1 - alpha) * target
loss = teacher_term + target_term
return loss
class Distillation_TD_Loss(object):
def __init__(self, n_timesteps, tau_handler, flags):
self.n_timesteps = n_timesteps
self.tau_handler = tau_handler
self.flags = flags
def __call__(self, criterion, predicted_logits, teacher_y, y, targets):
loss = 0
timestep_losses = torch.zeros(self.n_timesteps)
timestep_accs = torch.zeros(self.n_timesteps)
for t in range(len(predicted_logits)):
logit_i = predicted_logits[t]
# First term
sum_term = torch.zeros_like(logit_i)
t_timesteps = list(range(t+1, self.n_timesteps))
for i, n in enumerate(t_timesteps, 1):
logit_k = predicted_logits[n].detach().clone()
softmax_i = nn.functional.softmax(logit_k, dim=1)
sum_term = sum_term + self.flags.lambda_val**(i - 1) * softmax_i
# Final terms
target_term_1 = (1 - self.flags.lambda_val) * sum_term
target_term_2 = self.flags.lambda_val**(self.n_timesteps - t - 1) * y
target_softmax_j = target_term_1 + target_term_2
teacher_term_1 = (1 - self.flags.lambda_val) * sum_term
teacher_term_2 = self.flags.lambda_val**(self.n_timesteps - t - 1) * teacher_y
teacher_softmax_j = teacher_term_1 + teacher_term_2
# Temp scale
logit_i = logit_i / self.flags.distillation_temperature
teacher_softmax_j = teacher_softmax_j / self.flags.distillation_temperature
# Compute target and teacher losses
target_loss_i = criterion(pred_logits=logit_i, y_true_softmax=target_softmax_j)
teacher_loss_i = criterion(pred_logits=logit_i, y_true_softmax=teacher_softmax_j)
# Compute distillation loss
loss_i = compute_distillation_loss(
target_loss_i,
teacher_loss_i,
alpha=self.flags.distillation_alpha,
temperature=self.flags.distillation_temperature
)
# Tau weighted
if self.flags.tau_weighted_loss and t < self.n_timesteps - 1:
tau_i = self.tau_handler(t-1, epoch_i)
loss_i = tau_i * loss_i
# Aggregate loss
if self.flags.tdl_mode == "EWS":
loss = loss + loss_i
else:
# Ignore first timestep loss (all 0's output)
if t > 0:
loss = loss + loss_i
# Log loss item
timestep_losses[t] = loss_i.item()
# Predictions
softmax_i = nn.functional.softmax(logit_i, dim=1)
y_pred = torch.argmax(softmax_i, dim=1)
# Updates running accuracy statistics
n_correct = torch.eq(targets, y_pred).sum()
acc_i = n_correct / float(targets.shape[0])
timestep_accs[t] = acc_i
# Normalize loss
if self.flags.normalize_loss:
loss = loss / float(self.n_timesteps)
return loss, timestep_losses, timestep_accs
| 2.328125
| 2
|
tests/continuous_integration/test_05_network.py
|
simonvh/ANANSE
| 0
|
12782244
|
import os
import numpy as np
import pytest
from ananse.network import Network
from .test_02_utils import write_file
@pytest.fixture
def binding_fname():
return "tests/example_data/binding2.tsv"
@pytest.fixture
def network():
genome = "tests/data/genome.fa"
if not os.path.exists(genome):
write_file(genome, [">chr1", "N"])
return Network(genome=genome, gene_bed="ananse/db/hg38.genes.bed")
def test_unique_enhancer(network, binding_fname):
regions = network.unique_enhancers(binding_fname)
regions = regions.as_df()
assert regions.shape[0] == 6
assert sorted(list(regions["Chromosome"].unique())) == ["chr1", "chr10", "chr17"]
assert sorted(list(regions["Start"].unique())) == [7677184, 7687827]
def test_distance_weight(network):
dw = network.distance_weight(
include_promoter=True,
promoter_region=20,
full_weight_region=50,
maximum_distance=100,
alpha=5,
)
assert list(dw.columns) == ["weight", "dist"]
dw = dw.set_index("dist")
assert dw.loc[0, "weight"] == 1
assert dw.loc[25, "weight"] == 1
assert dw.loc[50, "weight"] == 1
assert dw.loc[51, "weight"] < 1
assert np.isclose(dw.loc[100, "weight"], 0, atol=1e-4)
assert dw.shape[0] == 101
dw = network.distance_weight(
include_promoter=False,
promoter_region=20,
full_weight_region=50,
maximum_distance=100,
alpha=5,
)
assert list(dw.columns) == ["weight", "dist"]
dw = dw.set_index("dist")
assert dw.loc[0, "weight"] == 0
assert dw.loc[20, "weight"] == 0
assert dw.loc[21, "weight"] == 1
assert dw.shape[0] == 101
| 2.125
| 2
|
setup.py
|
zhouxiexuan/pDeep3
| 10
|
12782245
|
import setuptools
from configparser import ConfigParser
from pkg_resources import parse_version
from sys import platform
assert parse_version(setuptools.__version__) >= parse_version('36.2')
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
cfg = config['DEFAULT']
license_options = {
'apache2': (
'Apache Software License 2.0',
'OSI Approved :: Apache Software License'
),
'MIT': (
'MIT License',
'OSI Approved :: MIT License'
)
}
status_options = {
'1': 'Planning',
'2': 'Pre-Alpha',
'3': 'Alpha',
'4': 'Beta',
'5': 'Production/Stable',
'6': 'Mature',
'7': 'Inactive'
}
maximum_python3_available = 8
with open("requirements.txt") as requirements_file:
requirements = []
for line in requirements_file:
line = line.strip()
requirements.append(line)
setuptools.setup(
name=cfg["lib_name"],
license=license_options[cfg["license"]][0],
classifiers=[
f'Development Status :: {cfg["status"]} - {status_options[cfg["status"]]}',
f'Intended Audience :: {cfg["audience"]}',
f'License :: {license_options[cfg["license"]][1]}',
f'Natural Language :: {cfg["language"]}',
] + [
f'Programming Language :: Python :: 3.{i}' for i in range(
int(cfg["min_python"].split(".")[1]),
maximum_python3_available + 1
)
],
version=cfg["version"],
description=cfg["description"],
keywords=cfg["keywords"],
author=cfg["author"],
author_email=cfg["author_email"],
url=cfg["url"],
packages=setuptools.find_packages(),
# TODO: Modifying this should allow to remove the MAINFEST.in
include_package_data=True,
install_requires=requirements,
python_requires=f'>={cfg["min_python"]},<{cfg["max_python"]}',
zip_safe=False,
)
| 2.25
| 2
|
utils/aes_test.py
|
aidan-lane/MastersProject
| 0
|
12782246
|
<filename>utils/aes_test.py
"""Unittest for aes.py
"""
import unittest
import random
import string
from os.path import exists
import string
from utils import aes
class TestAes(unittest.TestCase):
def test_key_gen(self):
""" Test AES private key generation
"""
# Test default (32 bytes)
key = aes.gen_rand_bytes(aes.KEY_SIZE)
self.assertTrue(len(key), aes.KEY_SIZE)
# Test user-specified amount
new_len = 64
key = aes.gen_rand_bytes(new_len)
self.assertTrue(len(key), new_len)
def test_string(self):
key = aes.gen_rand_bytes(aes.KEY_SIZE)
iv = aes.gen_rand_bytes(aes.IV_SIZE)
for _ in range(10):
s = "".join(random.choices(string.ascii_lowercase, k=10))
encrypted = aes.encrypt_string(s, key, iv)
decrypted = aes.decrypt_string(encrypted, key, iv)
# Ensure encrypted is a hex string
self.assertTrue(all(c in string.hexdigits for c in encrypted))
self.assertEqual(decrypted, s)
def test_encrypt_file(self):
filename = "sample.txt"
file = open(filename, "w")
file.write("test")
file.close()
out_path = aes.encrypt_file(aes.gen_rand_bytes(aes.KEY_SIZE), filename)
self.assertTrue(exists(out_path))
self.assertRegex(out_path, ".*\.enc$")
def test_aes(self):
"""Test encryption and decryption of a file using AES
"""
filename = "sample.txt"
dec_filename = "decrypted.txt"
msg = "test_aes1234"
key = aes.gen_rand_bytes(aes.KEY_SIZE)
file = open(filename, "w")
file.write(msg)
file.close()
out_path = aes.encrypt_file(key, filename)
aes.decrypt_file(key, out_path, dec_filename)
dfile = open(dec_filename, "r")
dec_msg = dfile.readline()
dfile.close()
self.assertEqual(dec_msg, msg)
if __name__ == "__main__":
unittest.main()
| 2.9375
| 3
|
awx/sso/urls.py
|
gitEdouble/awx
| 17
|
12782247
|
<gh_stars>10-100
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
from django.conf.urls import url
from awx.sso.views import (
sso_complete,
sso_error,
sso_inactive,
saml_metadata,
)
app_name = 'sso'
urlpatterns = [
url(r'^complete/$', sso_complete, name='sso_complete'),
url(r'^error/$', sso_error, name='sso_error'),
url(r'^inactive/$', sso_inactive, name='sso_inactive'),
url(r'^metadata/saml/$', saml_metadata, name='saml_metadata'),
]
| 1.507813
| 2
|
ShopperMiles/products/migrations/0001_initial.py
|
juansahe/shoppy
| 0
|
12782248
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-06-28 20:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('providers', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Nombre')),
('img', models.ImageField(upload_to='uploads/category/', verbose_name='Imagen')),
],
options={
'ordering': ['name'],
'verbose_name': 'Categoria',
'verbose_name_plural': 'Categorias',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Nombre del producto ')),
('description', models.TextField(blank=True, null=True, verbose_name='Descripci\xf3n del producto ')),
('img', models.ImageField(upload_to='uploads/products/', verbose_name='Imagen del producto ')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Category', verbose_name='Categoria')),
('provider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='providers.Provider', verbose_name='Marca o Retail')),
],
options={
'ordering': ['name'],
'verbose_name': 'Producto',
'verbose_name_plural': 'Productos',
},
),
]
| 1.648438
| 2
|
104_manejoCadenas/funcionSorted.py
|
josuerojasq/netacad_python
| 0
|
12782249
|
#La función "sorted()" toma un argumento (una lista) y devuelve una nueva lista,
# con los elementos ordenados del argumento.
# Demostración de la función sorted()
firstGreek = ['omega', 'alfa', 'pi', 'gama']
firstGreek2 = sorted(firstGreek)
print(firstGreek)
print(firstGreek2)
print()
lista1 = ['T', 'e', 'x', 't', 'o', ' ', 'd', 'e', ' ', 'e', 'j', 'e', 'm', 'p', 'l', 'o', ',', ' ', 'f', 'i', 'n']
lista2 = sorted(lista1)
print(lista1)
print(lista2)
| 4.15625
| 4
|
src/scrawl/moves/text.py
|
astromancer/graphical
| 0
|
12782250
|
import matplotlib.pyplot as plt
from matplotlib.text import Text
class DragHandler(object):
# NOTE: DOES NOT HANDLE TEXT WITH ARBITRARY TRANSFORMS!!!!
"""
A simple class to handle Drag n Drop.
This is a simple example, which works for Text objects only
"""
def __init__(self, figure=None):
""" Create a new drag handler and connect it to the figure's event system.
If the figure handler is not given, the current figure is used instead
"""
if figure is None:
figure = plt.gcf()
# simple attibute to store the dragged text object
self.dragged = None
# Connect events and callbacks
figure.canvas.mpl_connect("pick_event", self.on_pick_event)
figure.canvas.mpl_connect("button_release_event", self.on_release_event)
def on_pick_event(self, event):
" Store which text object was picked and were the pick event occurs."
if isinstance(event.artist, Text):
self.dragged = event.artist
self.pick_pos = (event.mouseevent.xdata, event.mouseevent.ydata)
return True
def on_release_event(self, event):
" Update text position and redraw"
if self.dragged is not None:
old_pos = self.dragged.get_position()
new_pos = (old_pos[0] + event.xdata - self.pick_pos[0],
old_pos[1] + event.ydata - self.pick_pos[1])
self.dragged.set_position(new_pos)
self.dragged = None
plt.draw()
return True
| 3.84375
| 4
|