hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7e543a1539edd2bc645863389913b22de09fceee | 722 | py | Python | ro_py/economy.py | jmkd3v/ro.py | 7c50267ccc1eed333e73c5cfb0740aec00a62989 | [
"MIT"
] | null | null | null | ro_py/economy.py | jmkd3v/ro.py | 7c50267ccc1eed333e73c5cfb0740aec00a62989 | [
"MIT"
] | null | null | null | ro_py/economy.py | jmkd3v/ro.py | 7c50267ccc1eed333e73c5cfb0740aec00a62989 | [
"MIT"
] | null | null | null | """
This file houses functions and classes that pertain to the Roblox economy endpoints.
"""
from ro_py.utilities.url import url
endpoint = url("economy")
class Currency:
"""
Represents currency data.
"""
def __init__(self, currency_data):
self.robux = currency_data["robux"]
class LimitedResaleData:
"""
Represents the resale data of a limited item.
"""
def __init__(self, resale_data):
self.asset_stock = resale_data["assetStock"]
self.sales = resale_data["sales"]
self.number_remaining = resale_data["numberRemaining"]
self.recent_average_price = resale_data["recentAveragePrice"]
self.original_price = resale_data["originalPrice"]
| 24.896552 | 84 | 0.686981 | 558 | 0.772853 | 0 | 0 | 0 | 0 | 0 | 0 | 283 | 0.391967 |
7e550a8084b1674ea1aeeea565a6759f3741fb88 | 1,557 | py | Python | ask/qa/forms.py | kzn-bulat-s/abracadabra | 25d582a3d6fc6587772776479ef2f1d284fbe0e9 | [
"Unlicense"
] | null | null | null | ask/qa/forms.py | kzn-bulat-s/abracadabra | 25d582a3d6fc6587772776479ef2f1d284fbe0e9 | [
"Unlicense"
] | null | null | null | ask/qa/forms.py | kzn-bulat-s/abracadabra | 25d582a3d6fc6587772776479ef2f1d284fbe0e9 | [
"Unlicense"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
from models import Question, Answer
class AskForm(forms.ModelForm):
class Meta:
model = Question
fields = ['title', 'text']
def save(self, commit=True):
question = super(AskForm, self).save(commit=False)
question.author = self.user
if commit:
question.save()
return question
class AnswerForm(forms.Form):
text = forms.CharField()
question = forms.IntegerField()
def clean(self):
data = self.cleaned_data
try:
question = Question.objects.get(pk=data['question'])
except (Question.DoesNotExist, KeyError):
raise forms.ValidationError("Question doesn't exists.")
else:
data['question'] = question
return data
def save(self):
data = self.cleaned_data
answer = Answer.objects.create(text=data['text'],
question=data['question'],
author=self.user)
return answer
class UserCreationFormWithEmail(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'password', 'email',)
widgets = {
'password': forms.PasswordInput(),
}
def save(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
email = self.cleaned_data.get('email')
user = User.objects.create_user(username, email, password)
return user | 27.803571 | 67 | 0.594091 | 1,445 | 0.928067 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.089274 |
7e555fdeeb4c2079106c242cdd66f9f9ada341c5 | 7,720 | py | Python | tests/pow_tests.py | pythononwheels/coronadash | 876258d00f5b8bcccd4746713a15b3de54534fcf | [
"MIT"
] | null | null | null | tests/pow_tests.py | pythononwheels/coronadash | 876258d00f5b8bcccd4746713a15b3de54534fcf | [
"MIT"
] | null | null | null | tests/pow_tests.py | pythononwheels/coronadash | 876258d00f5b8bcccd4746713a15b3de54534fcf | [
"MIT"
] | null | null | null | #
# Pow Default Tests
#
#
# runtest script.
# runs test with respect to some paramters
# currently only os
import sys
import pytest
# possible sys.platform results:
# http://stackoverflow.com/questions/446209/possible-values-from-sys-platform
MODELNAME = "pow_test_model"
class TestClass:
@pytest.mark.notonosx
@pytest.mark.run(order=1)
@pytest.mark.minimal
def test_server(self):
""" test if server starts
calls baseurl:port/test/12
must return 12.
This test the server, routing and method dispatching
"""
print(" .. Test if server works" )
from multiprocessing import Process
import coronadash.server
import requests
import coronadash.conf.config as cfg
import time
p = Process(target=coronadash.server.main)
p.start()
testurl=cfg.server_settings["protocol"] + cfg.server_settings["host"] + ":" + str(cfg.server_settings["port"]) + "/test/12"
r = requests.get(testurl)
p.terminate()
assert int(r.text)==12
@pytest.mark.run(order=2)
@pytest.mark.minimal
def test_sql_generate_model(self):
""" test if sql model is generated"""
print(" .. Test generate_model")
import coronadash.generate_model as gm
import uuid
import os.path
ret = gm.generate_model(MODELNAME, "sql", appname="coronadash")
# generate model returns true in case of success
assert ret is True
assert os.path.exists(os.path.normpath("../models/sql/" + MODELNAME + ".py"))
@pytest.mark.run(order=3)
@pytest.mark.minimal
def test_sql_model_type(self):
""" based on test_generate_model. Tests if a model can insert values
DB sqlite by default.
"""
print(" .. Test model is correct type")
from coronadash.models.sql.pow_test_model import PowTestModel
m = PowTestModel()
assert isinstance(m, PowTestModel)
@pytest.mark.run(order=4)
def test_sql_dbsetup(self):
""" test the setup of the alembic environment """
print(" .. Test SQL: db_setup")
import coronadash.init_sqldb_environment
import os
os.chdir("..")
r = coronadash.init_sqldb_environment.init_migrations()
assert r == True
os.chdir(os.path.abspath(os.path.dirname(__file__)))
@pytest.mark.run(order=5)
def test_sql_migration(self):
""" test the setup of the alembic environment
generate a migration
"""
print(" .. Test SQL: generate_migration")
import coronadash.generate_migration
import os
os.chdir("..")
script = coronadash.generate_migration.generate_migration(message="pow_test")
assert os.path.exists(os.path.normpath(script.path))
os.chdir(os.path.abspath(os.path.dirname(__file__)))
@pytest.mark.run(order=6)
def test_sql_dbupdate(self):
""" test the setup of the alembic environment
actually migrate the DB schema up
"""
print(" .. Test SQL: update_db -d up")
import coronadash.update_db
import os, time
ret = None
os.chdir("..")
time.sleep(1)
try:
ret = coronadash.update_db.migrate("up")
except Exception as e:
print(e)
ret = True
time.sleep(5)
os.chdir(os.path.abspath(os.path.dirname(__file__)))
@pytest.mark.run(order=7)
def test_if_sql_model_validation_works(self):
"""
check if validation works
"""
print(" .. Test SQL: model.upsert() and model.find()")
from coronadash.models.sql.pow_test_model import PowTestModel
m = PowTestModel()
assert m.validate() == True
@pytest.mark.run(order=8)
def test_if_sql_model_validation_fails_successfully(self):
"""
check if validation fails if type is wrong
"""
print(" .. Test SQL: model.upsert() and model.find()")
from coronadash.models.sql.pow_test_model import PowTestModel
m = PowTestModel()
m.title="123456789123456789123456789123456789"
assert m.validate() == False
@pytest.mark.run(order=9)
def test_sql_insert_and_find(self):
""" based on test_generate_model.
Tests if a model can insert values in the DB
and can be found by title attribute.
"""
print(" .. Test SQL: model.upsert() and model.find()")
from coronadash.models.sql.pow_test_model import PowTestModel
import os
m = PowTestModel()
m.title = "TestnamePowTestRunner"
m.upsert()
res=m.find(PowTestModel.title=="TestnamePowTestRunner")
assert res.count()==1
m.session.close()
os.chdir(os.path.abspath(os.path.dirname(__file__)))
#
# tinyDB tests
#
@pytest.mark.run(order=10)
@pytest.mark.minimal
def test_tinydb_generate_model(self):
""" test if sql model is generated"""
print(" .. Test tinyDB generate_model")
import coronadash.generate_model as gm
import uuid
import os.path
ret = gm.generate_model(MODELNAME, "tinydb", appname="coronadash")
# generate model returns true in case of success
assert ret is True
assert os.path.exists(os.path.normpath("../models/tinydb/" + MODELNAME + ".py"))
@pytest.mark.run(order=11)
@pytest.mark.minimal
def test_if_tinydb_model_validation_works(self):
"""
check if validation works
"""
print(" .. Test SQL: model.upsert() and model.find()")
from coronadash.models.tinydb.pow_test_model import PowTestModel
m = PowTestModel()
assert m.validate() == True
@pytest.mark.run(order=12)
@pytest.mark.minimal
def test_if_tinydb_model_validation_fails_successfully(self):
"""
check if validation fails if type is wrong
"""
print(" .. Test SQL: model.upsert() and model.find()")
from coronadash.models.tinydb.pow_test_model import PowTestModel
m = PowTestModel()
m.title="123456789123456789123456789123456789"
assert m.validate() == False
@pytest.mark.run(order=13)
@pytest.mark.minimal
def test_tinydb_model_type(self):
""" based on test_generate_model. Tests if a model can insert values
DB sqlite by default.
"""
print(" .. Test model tinyDB is correct type")
from coronadash.models.tinydb.pow_test_model import PowTestModel
m = PowTestModel()
assert isinstance(m, PowTestModel)
@pytest.mark.run(order=14)
def test_tinydb_insert_and_find(self):
""" based on test_generate_model. Tests if a model can insert values
and can be found back.
"""
print(" .. Test tinyDB: model.upsert() and model.find()")
from coronadash.models.tinydb.pow_test_model import PowTestModel
import os
m = PowTestModel()
m.title = "TestnamePowTestRunner"
m.upsert()
res=m.find(m.Query.title=="TestnamePowTestRunner")
assert res
m.db.close()
os.chdir(os.path.abspath(os.path.dirname(__file__)))
if __name__ == "__main__":
print(55*"-")
print(" running pow Tests on: " + sys.platform)
print(" ... ")
if sys.platform.startswith("darwin"):
# osx
ret = pytest.main(["-k-notonosx", "pow_tests.py"])
else:
ret = pytest.main(["pow_tests.py"])
print(" Failures: " +str(ret))
print(55*"-")
| 34.159292 | 133 | 0.613731 | 7,087 | 0.918005 | 0 | 0 | 6,921 | 0.896503 | 0 | 0 | 2,530 | 0.32772 |
7e557776390bf717690c0ffe3d5af9bb9e5270df | 282 | py | Python | codility/python/fib.py | guvkon/aoc-2021 | 15c8d387f5bf218790d461d32dd948d6c667be6c | [
"MIT"
] | null | null | null | codility/python/fib.py | guvkon/aoc-2021 | 15c8d387f5bf218790d461d32dd948d6c667be6c | [
"MIT"
] | null | null | null | codility/python/fib.py | guvkon/aoc-2021 | 15c8d387f5bf218790d461d32dd948d6c667be6c | [
"MIT"
] | null | null | null | import math
def fib(n):
fib = [0] * (n + 1)
fib[1] = 1
for i in range(2, n + 1):
fib[i] = fib[i - 1] + fib[i - 2]
return fib[n]
def fib_formula(n):
return int((math.pow((1 + math.sqrt(5)) / 2, n) - math.pow((1 - math.sqrt(5)) / 2, n)) / math.sqrt(5))
| 21.692308 | 106 | 0.485816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7e55c6c8ebd4a1c5ef44590495d1ea5a8940d7d2 | 1,886 | py | Python | src/pengbot/adapters/facebook/adapter.py | mariocesar/pengbot | 070854f92ac1314ee56f7f6cb9d27430b8f0fda8 | [
"MIT"
] | 1 | 2020-09-21T13:52:04.000Z | 2020-09-21T13:52:04.000Z | src/pengbot/adapters/facebook/adapter.py | mariocesar/pengbot | 070854f92ac1314ee56f7f6cb9d27430b8f0fda8 | [
"MIT"
] | null | null | null | src/pengbot/adapters/facebook/adapter.py | mariocesar/pengbot | 070854f92ac1314ee56f7f6cb9d27430b8f0fda8 | [
"MIT"
] | null | null | null | import json
from pengbot.utils import isbound
from .api import Facebook
from ..web import WebAdapter
class Adapter(WebAdapter):
_fbapi = None
@property
def fbapi(self):
if not self._fbapi:
self._fbapi = Facebook(self)
return self._fbapi
@WebAdapter.route('GET', r'/')
def handle_verify_token(self, start_response, request):
verify_token = request.GET.get('hub.verify_token', None)
challenge = request.GET.get('hub.challenge', None)
if verify_token == self.context.verify_token:
start_response('200 OK', [('Content-Type', 'text/plain')])
return challenge
else:
start_response('401 OK', [('Content-Type', 'text/plain')])
return "Invalid Token"
@WebAdapter.route('POST', r'/')
def handle_payload(self, start_response, request):
payload = json.loads(request.body.decode())
assert 'object' in payload, 'Missing object type in payload: %r' % payload
assert payload['object'] == 'page', 'Unknown object type: %r' % payload['object']
assert 'entry' in payload, 'Missing entry'
for entry in payload['entry']:
try:
assert 'id' in entry, 'Missing id in entry: %r' % entry
assert 'time' in entry, 'Missing timestamp in entry: %r' % entry
self.handle_message(entry)
except Exception as err:
start_response('500 OK', [('Content-Type', 'text/plain')])
return '%r' % err
else:
start_response('200 OK', [('Content-Type', 'text/plain')])
return ''
def say(self, recipient, text):
response = self.fbapi.send_message({
'recipient': {'id': recipient},
'message': {'text': text}}
)
self.logger.debug(response.content)
| 31.966102 | 89 | 0.577943 | 1,780 | 0.943796 | 0 | 0 | 1,498 | 0.794274 | 0 | 0 | 426 | 0.225875 |
7e568b8ddcdfbfe7732b973a52875cd69daa8414 | 4,517 | py | Python | tests/test_policy_loading.py | alancinacio/advanced-security-compliance | 5b8c6951db2c895033d816274abc389f1413cddd | [
"MIT"
] | 83 | 2021-06-01T10:11:15.000Z | 2022-03-15T00:38:42.000Z | tests/test_policy_loading.py | alancinacio/advanced-security-compliance | 5b8c6951db2c895033d816274abc389f1413cddd | [
"MIT"
] | 30 | 2021-06-07T10:18:22.000Z | 2022-03-15T10:51:19.000Z | tests/test_policy_loading.py | alancinacio/advanced-security-compliance | 5b8c6951db2c895033d816274abc389f1413cddd | [
"MIT"
] | 15 | 2021-06-09T17:22:33.000Z | 2022-02-21T00:41:04.000Z | import os
import sys
import yaml
import uuid
import unittest
import tempfile
sys.path.append(".")
from ghascompliance.policy import Policy
class TestPolicyLoading(unittest.TestCase):
def setUp(self):
self.policy_file = self.genTempFile()
return super().setUp()
def tearDown(self):
# if os.path.exists(self.policy_file):
# os.remove(self.policy_file)
return super().tearDown()
def genTempFile(self, ext=".yml"):
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ext)
def writePolicyToFile(self, policy):
with open(self.policy_file, "w") as handle:
yaml.safe_dump(policy, handle)
return self.policy_file
def testBasicLoading(self):
self.writePolicyToFile({"general": {"level": "error"}})
policy = Policy("error", path=self.policy_file)
self.assertEqual(policy.policy.get("general", {}).get("level"), "error")
def testUnwantedSection(self):
self.writePolicyToFile({"codescanning": {"test": "error"}})
with self.assertRaises(Exception) as context:
policy = Policy("error", path=self.policy_file)
self.assertTrue("Schema Validation Failed" in str(context.exception))
def testUnwantedBlock(self):
self.writePolicyToFile({"codescanning": {"conditions": {"tests": []}}})
with self.assertRaises(Exception) as context:
policy = Policy("error", path=self.policy_file)
self.assertTrue("Schema Validation Failed" in str(context.exception))
def testImport(self):
path = self.genTempFile(ext=".txt")
self.writePolicyToFile(
{"codescanning": {"conditions": {"imports": {"ids": path}}}}
)
data = ["test", "each", "line"]
with open(path, "w") as handle:
handle.write("\n".join(data))
policy = Policy("error", path=self.policy_file)
self.assertIsNotNone(policy.policy["codescanning"]["conditions"]["ids"])
self.assertEqual(policy.policy["codescanning"]["conditions"]["ids"], data)
def testImportOfImports(self):
self.writePolicyToFile(
{"codescanning": {"conditions": {"imports": {"imports": "random"}}}}
)
with self.assertRaises(Exception) as context:
policy = Policy("error", path=self.policy_file)
self.assertTrue("Schema Validation Failed" in str(context.exception))
def testImportPathTraversal(self):
self.writePolicyToFile(
{
"codescanning": {
"conditions": {
"imports": {"ids": "../../../../../../../etc/passwd"}
}
}
}
)
with self.assertRaises(Exception) as context:
policy = Policy("error", path=self.policy_file)
self.assertTrue("Path Traversal Detected" in str(context.exception))
class TestPolicyExamples(unittest.TestCase):
def testBasic(self):
path = "examples/policies/basic.yml"
policy = Policy("error", path=path)
self.assertEqual(policy.policy.get("codescanning", {}).get("level"), "error")
self.assertEqual(policy.policy.get("dependabot", {}).get("level"), "high")
self.assertEqual(policy.policy.get("secretscanning", {}).get("level"), "all")
def testGeneral(self):
path = "examples/policies/general.yml"
policy = Policy("error", path=path)
self.assertEqual(policy.policy.get("general", {}).get("level"), "error")
def testConditions(self):
path = "examples/policies/conditions.yml"
policy = Policy("error", path=path)
self.assertEqual(
policy.policy["licensing"]["conditions"]["ids"], ["GPL-2.0", "GPL-3.0"]
)
self.assertEqual(
policy.policy["licensing"]["conditions"]["names"],
[
"maven://org.apache.struts",
"org.apache.struts",
"maven://org.apache.struts#2.0.5",
],
)
self.assertEqual(policy.policy["licensing"]["warnings"]["ids"], ["Other", "NA"])
def testAdvance(self):
path = "examples/policies/advance.yml"
policy = Policy("error", path=path)
self.assertEqual(
policy.policy["licensing"]["conditions"]["ids"], ["GPL-2.0", "GPL-3.0"]
)
self.assertEqual(
policy.policy["licensing"]["warnings"]["names"],
["kibana"],
)
| 30.938356 | 88 | 0.587337 | 4,370 | 0.967456 | 0 | 0 | 0 | 0 | 0 | 0 | 1,123 | 0.248616 |
7e56afeffeb1143dfa4e1a4ffb0c6566c1c6fcad | 1,762 | py | Python | python/se3last-w3c/conftest.py | saucelabs-training/platform-config-tests | 11dfab8f9be2fe118ed0b0fa4adebb75a5f1a64c | [
"MIT"
] | 1 | 2021-11-17T22:29:42.000Z | 2021-11-17T22:29:42.000Z | python/se3last-w3c/conftest.py | saucelabs-training/platform-config-tests | 11dfab8f9be2fe118ed0b0fa4adebb75a5f1a64c | [
"MIT"
] | null | null | null | python/se3last-w3c/conftest.py | saucelabs-training/platform-config-tests | 11dfab8f9be2fe118ed0b0fa4adebb75a5f1a64c | [
"MIT"
] | 1 | 2021-11-17T22:29:35.000Z | 2021-11-17T22:29:35.000Z | import os
import sys
import time
import pytest
from appium import webdriver as appiumdriver
from selenium import webdriver
class Helpers:
@staticmethod
def validate_google(driver):
driver.get("http://google.com")
time.sleep(1)
result = 'passed' if driver.title == 'Google' else 'failed'
driver.execute_script("sauce:job-result={}".format(result))
driver.quit()
@staticmethod
def validate_app(driver):
elements = driver.find_elements_by_accessibility_id('test-Username')
result = 'passed' if len(elements) == 1 else 'failed'
driver.execute_script("sauce:job-result={}".format(result))
@staticmethod
def start_driver(caps):
sauce_username = os.environ["SAUCE_USERNAME"]
sauce_access_key = os.environ["SAUCE_ACCESS_KEY"]
remote_url = "http://{}:{}@ondemand.saucelabs.com/wd/hub".format(sauce_username, sauce_access_key)
caps['sauce:options']['name'] = sys._getframe(1).f_code.co_name
caps['sauce:options']['build'] = 'Python Se3Last W3C - {}'.format(os.environ.get("BUILD_TIME"))
return webdriver.Remote(remote_url, desired_capabilities=caps)
@staticmethod
def start_appium_driver(caps):
sauce_username = os.environ["SAUCE_USERNAME"]
sauce_access_key = os.environ["SAUCE_ACCESS_KEY"]
remote_url = "http://{}:{}@ondemand.saucelabs.com/wd/hub".format(sauce_username, sauce_access_key)
caps['sauce:options']['name'] = sys._getframe(1).f_code.co_name
caps['sauce:options']['build'] = 'Python Se3Last W3C - {}'.format(os.environ.get("BUILD_TIME"))
return appiumdriver.Remote(remote_url, desired_capabilities=caps)
@pytest.fixture
def helpers():
return Helpers
| 33.245283 | 106 | 0.682179 | 1,583 | 0.898411 | 0 | 0 | 1,595 | 0.905221 | 0 | 0 | 432 | 0.245176 |
7e5889544c1792c28e15ce9c96d0ef027587112c | 1,242 | py | Python | PharmacoDI/write_pset_table.py | bhklab/PharmacoDI | a681b2549765190fd646971de9d99a4233cd1cac | [
"MIT"
] | null | null | null | PharmacoDI/write_pset_table.py | bhklab/PharmacoDI | a681b2549765190fd646971de9d99a4233cd1cac | [
"MIT"
] | 7 | 2021-08-05T15:37:11.000Z | 2021-10-15T15:51:44.000Z | PharmacoDI/write_pset_table.py | bhklab/PharmacoDI | a681b2549765190fd646971de9d99a4233cd1cac | [
"MIT"
] | null | null | null | import os
from datatable import dt, as_type
# -- Enable logging
from loguru import logger
import sys
logger_config = {
"handlers": [
{"sink": sys.stdout, "colorize": True, "format":
"<green>{time}</green> <level>{message}</level>"},
{"sink": f"logs/write_pset_tables.log",
"serialize": True, # Write logs as JSONs
"enqueue": True}, # Makes logging queue based and thread safe
]
}
logger.configure(**logger_config)
@logger.catch
def write_pset_table(pset_df, df_name, pset_name, df_dir):
"""
Write a PSet table to a .jay file.
@param pset_df: [`DataFrame`] A PSet DataFrame
@param pset_name: [`string`] The name of the PSet
@param df_dir: [`string`] The name of the directory to hold all the PSet tables
@return [`None`]
"""
pset_path = os.path.join(df_dir, pset_name)
# Make sure directory for this PSet exists
if not os.path.exists(pset_path):
os.mkdir(pset_path)
# Convert to datatable Frame for fast write to disk
pset_df = dt.Frame(pset_df)
print(f'Writing {df_name} table to {pset_path}...')
# Use datatable to convert df to csv
pset_df.to_jay(os.path.join(pset_path, f'{pset_name}_{df_name}.jay'))
| 31.05 | 83 | 0.651369 | 0 | 0 | 0 | 0 | 760 | 0.611916 | 0 | 0 | 682 | 0.549114 |
7e595646df56fa1ccc0f7430fce5f92524bd68a3 | 1,092 | py | Python | timemachines/skaters/orbt/orbitlgtskaterfactory.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 253 | 2021-01-08T17:33:30.000Z | 2022-03-21T17:32:36.000Z | timemachines/skaters/orbt/orbitlgtskaterfactory.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 65 | 2021-01-20T16:43:35.000Z | 2022-03-30T19:07:22.000Z | timemachines/skaters/orbt/orbitlgtskaterfactory.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 28 | 2021-02-04T14:58:30.000Z | 2022-01-17T04:35:17.000Z |
from timemachines.skaters.orbt.orbitinclusion import using_orbit
if using_orbit:
from timemachines.skaters.orbt.orbitwrappers import orbit_lgt_iskater
from timemachines.skatertools.utilities.conventions import Y_TYPE, A_TYPE, R_TYPE, E_TYPE, T_TYPE
from timemachines.skatertools.batch.batchskater import batch_skater_factory
def orbit_lgt_skater_factory(y: Y_TYPE, s, k: int, a: A_TYPE = None, t: T_TYPE = None, e: E_TYPE = None, r: R_TYPE = None,
emp_mass=0.0,
seasonality=None):
return batch_skater_factory(y=y, s=s, k=k, a=a, t=t, e=e, r=r, emp_mass=emp_mass,
iskater=orbit_lgt_iskater,
iskater_kwargs={'seasonality': seasonality},
min_e=0, n_warm=20)
def orbit_lgt_12(y,s,k,a=None, t=None,e=None):
return orbit_lgt_skater_factory(y=y, s=s, k=k, a=a,t=t,e=e, seasonality=12)
def orbit_lgt_24(y,s,k,a=None, t=None,e=None):
return orbit_lgt_skater_factory(y, s, k, a=a,t=t,e=e, seasonality=24)
| 45.5 | 126 | 0.638278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.011905 |
7e597dddf299d58b91fda18f58fa608899357fa1 | 530 | py | Python | script/en_glue/preprocess/qnli.py | kunde122/ERNIE1 | 050327e968b2d7d9090ab882a5dd6b0fdeca80b4 | [
"Apache-2.0"
] | 1 | 2020-10-19T09:41:11.000Z | 2020-10-19T09:41:11.000Z | script/en_glue/preprocess/qnli.py | kunde122/ERNIE1 | 050327e968b2d7d9090ab882a5dd6b0fdeca80b4 | [
"Apache-2.0"
] | 1 | 2019-08-24T02:36:37.000Z | 2019-08-24T02:36:37.000Z | script/en_glue/preprocess/qnli.py | kunde122/ERNIE1 | 050327e968b2d7d9090ab882a5dd6b0fdeca80b4 | [
"Apache-2.0"
] | 1 | 2019-10-19T05:29:08.000Z | 2019-10-19T05:29:08.000Z | import sys
mapping = {'entailment': 1, 'not_entailment': 0}
i = 0
for line in sys.stdin:
arr = line.strip().split('\t')
s1 = arr[1]
s2 = arr[2]
if len(arr) == 4:
if i == 0:
i += 1
print('text_a\ttext_b\tlabel')
continue
s3 = arr[3]
print("{}\t{}\t{}".format(s1, s2, mapping[s3]))
else:
if i == 0:
i += 1
print('qid\ttext_a\ttext_b\tlabel')
continue
print("{}\t{}\t{}\t-1".format(arr[0], s1, s2))
| 23.043478 | 55 | 0.450943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.209434 |
7e5a4b7a911240a4176bf8ae58a08ef0931285dc | 62 | py | Python | gsee/__init__.py | scene-connect/gsee | f5515fb2a20af9278e7ca11312388717760a8225 | [
"BSD-3-Clause"
] | null | null | null | gsee/__init__.py | scene-connect/gsee | f5515fb2a20af9278e7ca11312388717760a8225 | [
"BSD-3-Clause"
] | 5 | 2022-03-11T10:52:24.000Z | 2022-03-11T14:09:09.000Z | gsee/__init__.py | scene-connect/gsee | f5515fb2a20af9278e7ca11312388717760a8225 | [
"BSD-3-Clause"
] | null | null | null | __all__ = ["brl_model", "pv"]
from gsee import brl_model, pv
| 15.5 | 30 | 0.693548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.241935 |
7e5c4ac6eeaa4f1f47b3333663069355e76954d0 | 1,366 | py | Python | src/data_standardization/tests/test_boston_volume.py | nsteins/crash-model | 521a63e48a561298e694432d74caa3c385f913b6 | [
"MIT"
] | 54 | 2018-06-21T18:48:34.000Z | 2020-04-15T23:07:20.000Z | src/data_standardization/tests/test_boston_volume.py | nsteins/crash-model | 521a63e48a561298e694432d74caa3c385f913b6 | [
"MIT"
] | 172 | 2018-06-14T17:33:41.000Z | 2020-06-15T16:45:15.000Z | src/data_standardization/tests/test_boston_volume.py | nsteins/crash-model | 521a63e48a561298e694432d74caa3c385f913b6 | [
"MIT"
] | 25 | 2017-02-13T21:45:07.000Z | 2018-06-13T19:41:38.000Z | from ..boston_volume import BostonVolumeParser
import os
def test_is_readable_ATR():
parser = BostonVolumeParser(os.path.abspath(__file__))
bad = '7147_NA_NA_53_CLAPP-ST_DORCHESTER_24-HOURS_SPEED_02-25-2013.XLS'
assert not parser.is_readable_ATR(bad)
bad = '8652_NA_NA_0_SOUTHWEST-CORRIDOR_ROXBURY_48-HOURS_XXX_09-27-2016.XLS'
assert not parser.is_readable_ATR(bad)
good = '8811_NA_NA_83_PEARL-ST_CHARLESTOWN_24-HOURS_XXX_01-11-2017.XLSX'
assert parser.is_readable_ATR(good)
def test_clean_ATR_fname():
parser = BostonVolumeParser(os.path.abspath(__file__))
file = '7362_NA_NA_147_TRAIN-ST_DORCHESTER_24-HOURS_XXX_03-19-2014.XLSX'
assert parser.clean_ATR_fname(file) == '147 TRAIN ST Boston, MA'
def test_read_ATR():
path = os.path.dirname(
os.path.abspath(__file__)) + '/data/'
file = os.path.join(path,
'8811_NA_NA_83_PEARL-ST_CHARLESTOWN_24-HOURS_XXX_01-11-2017.XLSX')
parser = BostonVolumeParser(path)
assert parser.read_ATR(file) == (
# total
243,
# speed
14,
# motos/bikes
14,
# light vehicles
215,
# heavy vehicles
14,
# date
'2017-01-11',
# hourly totals
[2, 0, 1, 0, 3, 3, 6, 26, 21, 15, 11, 12, 7, 20, 12, 15,
11, 16, 23, 11, 10, 11, 4, 3]
)
| 26.784314 | 79 | 0.649341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 454 | 0.332357 |
7e5d011b9be09b3a31687a7c84c1f20f5a3b8ceb | 3,626 | py | Python | custom_components/luxtronik/binary_sensor.py | WhistleMaster/luxtronik | b479257100def0bb6b668b5ae3e63ccd8ed60995 | [
"MIT"
] | 37 | 2018-11-01T20:30:55.000Z | 2022-03-25T20:57:29.000Z | custom_components/luxtronik/binary_sensor.py | WhistleMaster/luxtronik | b479257100def0bb6b668b5ae3e63ccd8ed60995 | [
"MIT"
] | 41 | 2019-04-08T07:32:35.000Z | 2022-03-28T18:32:55.000Z | custom_components/luxtronik/binary_sensor.py | WhistleMaster/luxtronik | b479257100def0bb6b668b5ae3e63ccd8ed60995 | [
"MIT"
] | 17 | 2019-04-03T09:02:13.000Z | 2022-03-04T19:51:10.000Z | """Support for Luxtronik heatpump binary states."""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID, CONF_SENSORS
import homeassistant.helpers.config_validation as cv
from homeassistant.util import slugify
from . import DOMAIN, ENTITY_ID_FORMAT
from .const import (
CONF_CALCULATIONS,
CONF_GROUP,
CONF_INVERT_STATE,
CONF_PARAMETERS,
CONF_VISIBILITIES,
)
ICON_ON = "mdi:check-circle-outline"
ICON_OFF = "mdi:circle-outline"
_LOGGER = logging.getLogger(__name__)
DEFAULT_DEVICE_CLASS = None
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SENSORS): vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_GROUP): vol.All(
cv.string,
vol.In([CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]),
),
vol.Required(CONF_ID): cv.string,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_ICON): cv.string,
vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean,
}
],
)
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Luxtronik binary sensor."""
luxtronik = hass.data.get(DOMAIN)
if not luxtronik:
return False
sensors = config.get(CONF_SENSORS)
entities = []
for sensor_cfg in sensors:
sensor = luxtronik.get_sensor(sensor_cfg[CONF_GROUP], sensor_cfg[CONF_ID])
if sensor:
entities.append(
LuxtronikBinarySensor(
luxtronik,
sensor,
sensor_cfg.get(CONF_FRIENDLY_NAME),
sensor_cfg.get(CONF_ICON),
sensor_cfg.get(CONF_INVERT_STATE),
)
)
else:
_LOGGER.warning(
"Invalid Luxtronik ID %s in group %s",
sensor_cfg[CONF_ID],
sensor_cfg[CONF_GROUP],
)
add_entities(entities, True)
class LuxtronikBinarySensor(BinarySensorEntity):
"""Representation of a Luxtronik binary sensor."""
def __init__(self, luxtronik, sensor, friendly_name, icon, invert_state):
"""Initialize a new Luxtronik binary sensor."""
self._luxtronik = luxtronik
self._sensor = sensor
self._name = friendly_name
self._icon = icon
self._invert = invert_state
@property
def entity_id(self):
"""Return the entity_id of the sensor."""
if not self._name:
return ENTITY_ID_FORMAT.format(slugify(self._sensor.name))
return ENTITY_ID_FORMAT.format(slugify(self._name))
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def name(self):
"""Return the name of the sensor."""
if not self._name:
return self._sensor.name
return self._name
@property
def is_on(self):
"""Return true if binary sensor is on."""
if self._invert:
return not self._sensor.value
return self._sensor.value
@property
def device_class(self):
"""Return the dvice class."""
return DEFAULT_DEVICE_CLASS
def update(self):
"""Get the latest status and use it to update our sensor state."""
self._luxtronik.update()
| 29.241935 | 88 | 0.606178 | 1,366 | 0.376724 | 0 | 0 | 804 | 0.221732 | 0 | 0 | 527 | 0.145339 |
7e5e2458db8b5363508f982ca60fdfa04db827b4 | 1,298 | py | Python | Algorithms_easy/1065. Index Pairs of a String.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | 4 | 2020-08-11T20:45:15.000Z | 2021-03-12T00:33:34.000Z | Algorithms_easy/1065. Index Pairs of a String.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | null | null | null | Algorithms_easy/1065. Index Pairs of a String.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | null | null | null | """
1065. Index Pairs of a String
Easy
Given a text string and words (a list of strings), return all index pairs [i, j] so that the substring text[i]...text[j] is in the list of words.
Example 1:
Input: text = "thestoryofleetcodeandme", words = ["story","fleet","leetcode"]
Output: [[3,7],[9,13],[10,17]]
Example 2:
Input: text = "ababa", words = ["aba","ab"]
Output: [[0,1],[0,2],[2,3],[2,4]]
Explanation:
Notice that matches can overlap, see "aba" is found in [0,2] and [2,4].
Note:
All strings contains only lowercase English letters.
It's guaranteed that all strings in words are different.
1 <= text.length <= 100
1 <= words.length <= 20
1 <= words[i].length <= 50
Return the pairs [i,j] in sorted order (i.e. sort them by their first coordinate in case of ties sort them by their second coordinate).
"""
class Solution:
def indexPairs(self, text: str, words: List[str]) -> List[List[int]]:
res = []
for word in words:
if word in text:
a = word[0]
for i in range(len(text)):
new = ''
if a == text[i]:
new = text[int(i):int(i+len(word))]
if new == word:
res.append([i,i+len(word)-1])
return sorted(res)
| 30.904762 | 145 | 0.575501 | 476 | 0.366718 | 0 | 0 | 0 | 0 | 0 | 0 | 819 | 0.630971 |
7e602fe1c74aa3fb84a2c81524ea7c737c67eb63 | 233 | py | Python | setup.py | jsdnlb/Tangelo-challenge | 4cd83f5dcd5b870f2dba6a7f08d35fe1f60709ce | [
"MIT"
] | null | null | null | setup.py | jsdnlb/Tangelo-challenge | 4cd83f5dcd5b870f2dba6a7f08d35fe1f60709ce | [
"MIT"
] | 3 | 2022-02-11T00:32:38.000Z | 2022-02-11T14:29:33.000Z | setup.py | jsdnlb/tangelo-challenge | 4cd83f5dcd5b870f2dba6a7f08d35fe1f60709ce | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
# Building and Distributing Packages with Setuptools
# Documentation here: https://setuptools.pypa.io/en/latest/setuptools.html
setup(name='app', version='1.0', packages=find_packages())
| 33.285714 | 74 | 0.793991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.583691 |
7e61331f1d48012031893d41bd0fba4b33926051 | 804 | py | Python | sponsors/migrations/0038_auto_20210827_1223.py | Manny27nyc/pythondotorg | 257c96d3a94755451a5a5cdcd2abad1e27ea299b | [
"Apache-2.0"
] | 911 | 2015-01-03T22:16:06.000Z | 2022-03-31T23:56:22.000Z | sponsors/migrations/0038_auto_20210827_1223.py | Manny27nyc/pythondotorg | 257c96d3a94755451a5a5cdcd2abad1e27ea299b | [
"Apache-2.0"
] | 1,342 | 2015-01-02T16:14:45.000Z | 2022-03-28T08:01:20.000Z | sponsors/migrations/0038_auto_20210827_1223.py | Manny27nyc/pythondotorg | 257c96d3a94755451a5a5cdcd2abad1e27ea299b | [
"Apache-2.0"
] | 551 | 2015-01-04T02:17:31.000Z | 2022-03-23T11:59:25.000Z | # Generated by Django 2.0.13 on 2021-08-27 12:23
from django.db import migrations
def populate_sponsorship_package_fk(apps, schema_editor):
Sponsorship = apps.get_model('sponsors.Sponsorship')
SponsorshipPackage = apps.get_model('sponsors.SponsorshipPackage')
for sponsorship in Sponsorship.objects.all().iterator():
try:
package = SponsorshipPackage.objects.get(name=sponsorship.level_name)
sponsorship.package = package
sponsorship.save()
except SponsorshipPackage.DoesNotExist:
continue
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0037_sponsorship_package'),
]
operations = [
migrations.RunPython(populate_sponsorship_package_fk, migrations.RunPython.noop)
]
| 28.714286 | 88 | 0.70398 | 231 | 0.287313 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.16791 |
7e63755a2eb8725059fe0af8726fe1baae17c773 | 6,020 | py | Python | fugue_sql/workflow.py | LaurentErreca/fugue | 73d551b4d25b50b3d9051dd765e6111db2e3fc76 | [
"Apache-2.0"
] | null | null | null | fugue_sql/workflow.py | LaurentErreca/fugue | 73d551b4d25b50b3d9051dd765e6111db2e3fc76 | [
"Apache-2.0"
] | null | null | null | fugue_sql/workflow.py | LaurentErreca/fugue | 73d551b4d25b50b3d9051dd765e6111db2e3fc76 | [
"Apache-2.0"
] | null | null | null | from builtins import isinstance
from typing import Any, Dict, Tuple
from fugue import (
DataFrame,
FugueWorkflow,
WorkflowDataFrame,
WorkflowDataFrames,
Yielded,
)
from fugue.constants import FUGUE_CONF_SQL_IGNORE_CASE
from fugue.workflow import is_acceptable_raw_df
from fugue_sql._parse import FugueSQL
from fugue_sql._utils import LazyWorkflowDataFrame, fill_sql_template
from fugue_sql._visitors import FugueSQLHooks, _Extensions
from fugue_sql.exceptions import FugueSQLSyntaxError
from triad.utils.assertion import assert_or_throw
from triad.utils.convert import get_caller_global_local_vars
class FugueSQLWorkflow(FugueWorkflow):
"""Fugue workflow that supports Fugue SQL. Please read |FugueSQLTutorial|."""
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self._sql_vars: Dict[str, WorkflowDataFrame] = {}
@property
def sql_vars(self) -> Dict[str, WorkflowDataFrame]:
return self._sql_vars
def __call__(self, code: str, *args: Any, **kwargs: Any) -> None:
global_vars, local_vars = get_caller_global_local_vars()
variables = self._sql(
code, self._sql_vars, global_vars, local_vars, *args, **kwargs
)
for k, v in variables.items():
if isinstance(v, WorkflowDataFrame) and v.workflow is self:
self._sql_vars[k] = v
def _sql(
self, code: str, *args: Any, **kwargs: Any
) -> Dict[str, Tuple[WorkflowDataFrame, WorkflowDataFrames, LazyWorkflowDataFrame]]:
# TODO: move dict construction to triad
params: Dict[str, Any] = {}
for a in args:
assert_or_throw(
isinstance(a, Dict), lambda: f"args can only have dict: {a}"
)
params.update(a)
params.update(kwargs)
params, dfs = self._split_params(params)
code = fill_sql_template(code, params)
sql = FugueSQL(
code,
"fugueLanguage",
ignore_case=self.conf.get_or_throw(FUGUE_CONF_SQL_IGNORE_CASE, bool),
simple_assign=True,
)
v = _Extensions(
sql, FugueSQLHooks(), self, dfs, local_vars=params # type: ignore
)
v.visit(sql.tree)
return v.variables
def _split_params(
self, params: Dict[str, Any]
) -> Tuple[Dict[str, Any], Dict[str, LazyWorkflowDataFrame]]:
p: Dict[str, Any] = {}
dfs: Dict[str, LazyWorkflowDataFrame] = {}
for k, v in params.items():
if isinstance(v, (int, str, float, bool)):
p[k] = v
elif isinstance(v, (DataFrame, Yielded)) or is_acceptable_raw_df(v):
dfs[k] = LazyWorkflowDataFrame(k, v, self)
else:
p[k] = v
return p, dfs
def fsql(
sql: str, *args: Any, fsql_ignore_case: bool = False, **kwargs: Any
) -> FugueSQLWorkflow:
"""Fugue SQL functional interface
:param sql: the Fugue SQL string (can be a jinja template)
:param args: variables related to the SQL string
:param fsql_ignore_case: whether to ignore case when parsing the SQL string
defaults to False.
:param kwargs: variables related to the SQL string
:return: the translated Fugue workflow
.. code-block:: python
# Basic case
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run()
# With external data sources
df = pd.DataFrame([[0],[1]], columns=["a"])
fsql('''
SELECT * FROM df WHERE a=0
PRINT
''').run()
# With external variables
df = pd.DataFrame([[0],[1]], columns=["a"])
t = 1
fsql('''
SELECT * FROM df WHERE a={{t}}
PRINT
''').run()
# The following is the explicit way to specify variables and datafrems
# (recommended)
df = pd.DataFrame([[0],[1]], columns=["a"])
t = 1
fsql('''
SELECT * FROM df WHERE a={{t}}
PRINT
''', df=df, t=t).run()
# Using extensions
def dummy(df:pd.DataFrame) -> pd.DataFrame:
return df
fsql('''
CREATE [[0]] SCHEMA a:int
TRANSFORM USING dummy SCHEMA *
PRINT
''').run()
# It's recommended to provide full path of the extension inside
# Fugue SQL, so the SQL definition and exeuction can be more
# independent from the extension definition.
# Run with different execution engines
sql = '''
CREATE [[0]] SCHEMA a:int
TRANSFORM USING dummy SCHEMA *
PRINT
'''
fsql(sql).run(user_defined_spark_session())
fsql(sql).run(SparkExecutionEngine, {"spark.executor.instances":10})
fsql(sql).run(DaskExecutionEngine)
# Passing dataframes between fsql calls
result = fsql('''
CREATE [[0]] SCHEMA a:int
YIELD DATAFRAME AS x
CREATE [[1]] SCHEMA a:int
YIELD DATAFRAME AS y
''').run(DaskExecutionEngine)
fsql('''
SELECT * FROM x
UNION
SELECT * FROM y
UNION
SELECT * FROM z
PRINT
''', result, z=pd.DataFrame([[2]], columns=["z"])).run()
# Get framework native dataframes
result["x"].native # Dask dataframe
result["y"].native # Dask dataframe
result["x"].as_pandas() # Pandas dataframe
# Use lower case fugue sql
df = pd.DataFrame([[0],[1]], columns=["a"])
t = 1
fsql('''
select * from df where a={{t}}
print
''', df=df, t=t, fsql_ignore_case=True).run()
"""
global_vars, local_vars = get_caller_global_local_vars()
dag = FugueSQLWorkflow(None, {FUGUE_CONF_SQL_IGNORE_CASE: fsql_ignore_case})
try:
dag._sql(sql, global_vars, local_vars, *args, **kwargs)
except FugueSQLSyntaxError as ex:
raise FugueSQLSyntaxError(str(ex)).with_traceback(None) from None
return dag
| 31.851852 | 88 | 0.595349 | 2,203 | 0.365947 | 0 | 0 | 95 | 0.015781 | 0 | 0 | 2,917 | 0.484551 |
7e6449d6fe77effb19a71805ede2cc6094e439b0 | 347 | py | Python | project/WeiboTest/SpiderMain.py | zhengbomo/python_practice | 1bc5c4ff426f806639bbc01249e66747271ec398 | [
"MIT"
] | 2 | 2016-10-03T10:20:02.000Z | 2018-03-20T00:38:53.000Z | project/WeiboTest/SpiderMain.py | zhengbomo/python_practice | 1bc5c4ff426f806639bbc01249e66747271ec398 | [
"MIT"
] | 2 | 2019-10-08T07:13:44.000Z | 2019-10-08T07:13:46.000Z | project/WeiboTest/SpiderMain.py | zhengbomo/python_practice | 1bc5c4ff426f806639bbc01249e66747271ec398 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
from Spider import Spider
# 入口
spider = Spider()
fans = spider.get_my_fans()
for fan in fans:
spider.user_crawl(fan.user_id)
spider.status_crawl(fan.user_id)
followers = spider.get_my_follower()
for follower in followers:
spider.user_crawl(fan.user_id)
spider.status_crawl(fan.user_id)
| 19.277778 | 36 | 0.723343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.133903 |
7e66f22b59e0aa979c2af71f6719b6b98bfbe64a | 842 | py | Python | setup.py | konstanzer/insilico | a3b1f8352e0f2eeeb41ee94dc106e4fb02c8abf3 | [
"MIT"
] | 1 | 2021-11-22T16:25:17.000Z | 2021-11-22T16:25:17.000Z | setup.py | konstanzer/insilico | a3b1f8352e0f2eeeb41ee94dc106e4fb02c8abf3 | [
"MIT"
] | null | null | null | setup.py | konstanzer/insilico | a3b1f8352e0f2eeeb41ee94dc106e4fb02c8abf3 | [
"MIT"
] | null | null | null | from setuptools import setup
long_description = open("README.md").read()
setup(
name='insilico',
version='0.1.2',
description='A Python package to process & model ChEMBL data.',
long_description_content_type="text/markdown",
long_description=long_description,
classifiers=[
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Chemistry',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python'
],
url='https://github.com/konstanzer/insilico',
author='Steven Newton',
author_email='steven.j.newton99@gmail.com',
license='MIT',
packages=['insilico'],
install_requires=[],
include_package_data=True,
package_data={
'insilico': ['fingerprints_xml/*', 'data/empty.txt'],
},
zip_safe=False
) | 30.071429 | 67 | 0.648456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.467933 |
7e67680b7ff285c7a73a411e68537d03ddc32e2e | 1,190 | py | Python | home/board/migrations/0004_add_closed_at.py | he0119/smart-home | bdd3a59a8c46c0fdc07ac3049bf589c7f95a2683 | [
"MIT"
] | null | null | null | home/board/migrations/0004_add_closed_at.py | he0119/smart-home | bdd3a59a8c46c0fdc07ac3049bf589c7f95a2683 | [
"MIT"
] | 223 | 2020-02-21T06:16:56.000Z | 2022-03-01T22:24:19.000Z | home/board/migrations/0004_add_closed_at.py | he0119/smart-home | bdd3a59a8c46c0fdc07ac3049bf589c7f95a2683 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2020-12-25 02:53
from django.db import migrations, models
def set_edited_at(apps, schema_editor):
"""修改话题的时间
将关闭话题的修改时间设置成创建时间
将其关闭时间设置成其修改时间
将置顶话题的修改时间设置成创建时间
"""
Topic = apps.get_model("board", "Topic")
for topic in Topic.objects.all():
if not topic.is_open:
topic.edited_at = topic.created_at
topic.closed_at = topic.edited_at
topic.save()
if topic.is_pin:
topic.edited_at = topic.created_at
topic.save()
def reverse_set_edited_at(apps, schema_editor):
"""不做任何事情"""
pass
class Migration(migrations.Migration):
dependencies = [
("board", "0003_rename_field"),
]
operations = [
migrations.AddField(
model_name="topic",
name="closed_at",
field=models.DateTimeField(blank=True, null=True, verbose_name="关闭时间"),
),
migrations.AlterField(
model_name="topic",
name="edited_at",
field=models.DateTimeField(verbose_name="修改时间"),
),
migrations.RunPython(set_edited_at, reverse_code=reverse_set_edited_at),
]
| 24.791667 | 83 | 0.610924 | 580 | 0.436747 | 0 | 0 | 0 | 0 | 0 | 0 | 367 | 0.276355 |
7e6813b29c6eccafd45b8c9a108f2365b9ee6257 | 17,737 | py | Python | week_5/Service.py | aaaaaaaalesha/oop-and-design-patterns | 1076002e0066b334ed7c82b47985b7fcdcf6264f | [
"MIT"
] | null | null | null | week_5/Service.py | aaaaaaaalesha/oop-and-design-patterns | 1076002e0066b334ed7c82b47985b7fcdcf6264f | [
"MIT"
] | null | null | null | week_5/Service.py | aaaaaaaalesha/oop-and-design-patterns | 1076002e0066b334ed7c82b47985b7fcdcf6264f | [
"MIT"
] | null | null | null | import pygame
import random
import yaml
import os
import Objects
OBJECT_TEXTURE = os.path.join("texture", "objects")
ENEMY_TEXTURE = os.path.join("texture", "enemies")
ALLY_TEXTURE = os.path.join("texture", "ally")
def create_sprite(img, sprite_size, mmp_tile):
icon = pygame.image.load(img).convert_alpha()
icon_mmp = pygame.transform.scale(icon, (mmp_tile, mmp_tile))
icon = pygame.transform.scale(icon, (sprite_size, sprite_size))
sprite = pygame.Surface((sprite_size, sprite_size), pygame.HWSURFACE)
sprite_mmp = pygame.Surface((mmp_tile, mmp_tile), pygame.HWSURFACE)
sprite.blit(icon, (0, 0))
sprite_mmp.blit(icon_mmp, (0, 0))
return sprite, sprite_mmp
def reload_game(engine, hero):
global level_list
level_list_max = len(level_list) - 1
engine.level += 1
hero.position = [1, 1]
engine.objects = []
generator = level_list[min(engine.level, level_list_max)]
_map = generator['map'].get_map()
engine.load_map(_map)
engine.add_objects(generator['obj'].get_objects(_map))
engine.add_hero(hero)
def restore_hp(engine, hero):
if random.randint(1, 10) == 1:
engine.score -= 0.05
engine.hero = Objects.EvilEye(hero)
engine.notify("You were cursed: unlucky")
else:
engine.score += 0.1
hero.hp = hero.max_hp
engine.notify("HP restored")
def apply_blessing(engine, hero):
if hero.gold >= int(20 * 1.5 ** engine.level) - 2 * hero.stats["intelligence"]:
engine.score += 0.2
hero.gold -= int(20 * 1.5 ** engine.level) - \
2 * hero.stats["intelligence"]
if random.randint(0, 1) == 0:
engine.hero = Objects.Blessing(hero)
engine.notify("Blessing applied")
else:
engine.hero = Objects.Berserk(hero)
engine.notify("Berserk applied")
else:
engine.score -= 0.1
engine.notify("Nothing happened")
def remove_effect(engine, hero):
if hero.gold >= int(10 * 1.5 ** engine.level) - 2 * hero.stats["intelligence"] and "base" in dir(hero):
hero.gold -= int(10 * 1.5 ** engine.level) - \
2 * hero.stats["intelligence"]
engine.hero = hero.base
engine.hero.calc_max_HP()
engine.notify("Effect removed")
else:
engine.notify("Nothing happened")
def add_gold(engine, hero):
if random.randint(1, 10) == 1:
engine.score -= 0.05
engine.hero = Objects.Weakness(hero)
engine.notify("You were cursed: weak")
else:
engine.score += 0.1
gold = int(random.randint(10, 1000) * (1.1 ** (engine.hero.level - 1)))
hero.gold += gold
engine.notify(f"{gold} gold added")
def fight(engine, enemy, hero):
enemy_value = enemy.stats['strength'] + enemy.stats['endurance'] + \
enemy.stats['intelligence'] + enemy.stats['luck']
hero_value = sum(hero.stats.values())
while random.randint(1, enemy_value + hero_value) > hero_value and hero.hp > 0:
hero.hp -= 1
if hero.hp > 0:
engine.score += 1
hero.exp += enemy.xp
engine.notify("Defeated enemy!")
hero.level_up()
else:
engine.game_process = False
engine.notify("Lost!")
engine.notify("GAME OVER!!!")
def enhance(engine, hero):
engine.score += 0.2
engine.hero = Objects.Enhance(hero)
hero.hp = max(hero.max_hp, hero.hp)
engine.notify("You was enhanced!")
class MapFactory(yaml.YAMLObject):
@classmethod
def from_yaml(cls, loader, node):
def get_end(loader, node):
return {'map': EndMap.Map(), 'obj': EndMap.Objects()}
def get_random(loader, node):
return {'map': RandomMap.Map(), 'obj': RandomMap.Objects()}
def get_special(loader, node):
data = loader.construct_mapping(node)
try:
rat = data["rat"]
except KeyError:
rat = 0
try:
knight = data["knight"]
except KeyError:
knight = 0
ret = {}
_map = SpecialMap.Map()
_obj = SpecialMap.Objects()
_obj.config = {'rat': rat, 'knight': knight}
ret["map"] = _map
ret["obj"] = _obj
return ret
def get_empty(loader, node):
return {'map': EmptyMap.Map(), 'obj': EmptyMap.Objects()}
data = loader.construct_mapping(node)
try:
rat = data["rat"]
except KeyError:
rat = 0
try:
knight = data["knight"]
except KeyError:
knight = 0
_obj = cls.create_objects()
_obj.config = {'rat': rat, 'knight': knight}
return {'map': cls.create_map(), 'obj': _obj}
@classmethod
def create_map(cls):
return cls.Map()
@classmethod
def create_objects(cls):
return cls.Objects()
class EndMap(MapFactory):
yaml_tag = "!end_map"
class Map:
def __init__(self):
self.Map = ['000000000000000000000000000000000000000',
'0 0',
'0 0',
'0 0 0 000 0 0 00000 0 0 0',
'0 0 0 0 0 0 0 0 0 0 0',
'0 000 0 0 00000 0000 0 0 0',
'0 0 0 0 0 0 0 0 0 0 0',
'0 0 0 000 0 0 00000 00000 0',
'0 0 0',
'0 0',
'000000000000000000000000000000000000000'
]
self.Map = list(map(list, self.Map))
for i in self.Map:
for j in range(len(i)):
i[j] = wall if i[j] == '0' else floor1
def get_map(self):
return self.Map
class Objects:
def __init__(self):
self.objects = []
def get_objects(self, _map):
return self.objects
class RandomMap(MapFactory):
yaml_tag = "!random_map"
class Map:
w, h = 39, 25
def __init__(self):
w = self.w
h = self.h
self.Map = [[0 for _ in range(w)] for _ in range(h)]
for i in range(w):
for j in range(h):
if i == 0 or j == 0 or i == w - 1 or j == h - 1:
self.Map[j][i] = wall
else:
self.Map[j][i] = [wall, floor1, floor2, floor3, floor1,
floor2, floor3, floor1, floor2][random.randint(0, 8)]
def get_map(self):
return self.Map
class Objects:
def __init__(self):
self.objects = []
def get_objects(self, _map):
w, h = 38, 24
for obj_name in object_list_prob['objects']:
prop = object_list_prob['objects'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(1, w), random.randint(1, h))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, w), random.randint(1, h))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, w), random.randint(1, h))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
for obj_name in object_list_prob['ally']:
prop = object_list_prob['ally'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(1, w), random.randint(1, h))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, w), random.randint(1, h))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, w), random.randint(1, h))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
for obj_name in object_list_prob['enemies']:
prop = object_list_prob['enemies'][obj_name]
for i in range(random.randint(0, 5)):
coord = (random.randint(1, w), random.randint(1, h))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, w), random.randint(1, h))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, w), random.randint(1, h))
self.objects.append(Objects.Enemy(
prop['sprite'], prop, prop['experience'], coord))
return self.objects
class SpecialMap(MapFactory):
yaml_tag = "!special_map"
class Map:
def __init__(self):
self.Map = ['000000000000000000000000000000000000000',
'0 0',
'0 0 0',
'0 0 0 0000 0 0 00 00 0 0',
'0 0 0 0 0 0 0 0 00 0 0 00',
'0 000 0000 0000 0 0 0 00',
'0 0 0 0 0 0 0 0 0 0 00',
'0 0 0 0 0000 0 0 0 0 0',
'0 0 0',
'0 0',
'000000000000000000000000000000000000000'
]
self.Map = list(map(list, self.Map))
for i in self.Map:
for j in range(len(i)):
i[j] = wall if i[j] == '0' else floor1
def get_map(self):
return self.Map
class Objects:
def __init__(self):
self.objects = []
self.config = {}
def get_objects(self, _map):
w, h = 10, 38
for obj_name in object_list_prob['objects']:
prop = object_list_prob['objects'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(1, h), random.randint(1, w))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, h),
random.randint(1, w))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, h),
random.randint(1, w))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
for obj_name in object_list_prob['ally']:
prop = object_list_prob['ally'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(1, h), random.randint(1, w))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, h),
random.randint(1, w))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, h),
random.randint(1, w))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
for enemy, count in self.config.items():
prop = object_list_prob['enemies'][enemy]
for i in range(random.randint(0, count)):
coord = (random.randint(1, h), random.randint(1, w))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, h),
random.randint(1, w))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, h),
random.randint(1, w))
self.objects.append(Objects.Enemy(
prop['sprite'], prop, prop['experience'], coord))
return self.objects
class EmptyMap(MapFactory):
yaml_tag = "!empty_map"
@classmethod
def from_yaml(cls, loader, node):
return {'map': EmptyMap.Map(), 'obj': EmptyMap.Objects()}
class Map:
def __init__(self):
self.Map = [[]]
def get_map(self):
return self.Map
class Objects:
def __init__(self):
self.objects = []
def get_objects(self, _map):
return self.objects
wall = [0]
floor1 = [0]
floor2 = [0]
floor3 = [0]
def service_init(sprite_size, tile, full=True):
global object_list_prob, level_list
global wall
global floor1
global floor2
global floor3
wall[0] = create_sprite(os.path.join("texture", "wall.png"), sprite_size, tile)
floor1[0] = create_sprite(os.path.join("texture", "Ground_1.png"), sprite_size, tile)
floor2[0] = create_sprite(os.path.join("texture", "Ground_2.png"), sprite_size, tile)
floor3[0] = create_sprite(os.path.join("texture", "Ground_3.png"), sprite_size, tile)
file = open("objects.yml", "r")
object_list_tmp = yaml.load(file.read(), Loader=yaml.Loader)
if full:
object_list_prob = object_list_tmp
object_list_actions = {'reload_game': reload_game,
'add_gold': add_gold,
'apply_blessing': apply_blessing,
'remove_effect': remove_effect,
'restore_hp': restore_hp,
'fight': fight,
'enhance': enhance}
for obj in object_list_prob['objects']:
prop = object_list_prob['objects'][obj]
prop_tmp = object_list_tmp['objects'][obj]
prop['sprite'][0] = create_sprite(
os.path.join(OBJECT_TEXTURE, prop_tmp['sprite'][0]), sprite_size, tile)
prop['action'] = object_list_actions[prop_tmp['action']]
for ally in object_list_prob['ally']:
prop = object_list_prob['ally'][ally]
prop_tmp = object_list_tmp['ally'][ally]
prop['sprite'][0] = create_sprite(
os.path.join(ALLY_TEXTURE, prop_tmp['sprite'][0]), sprite_size, tile)
prop['action'] = object_list_actions[prop_tmp['action']]
for enemy in object_list_prob['enemies']:
prop = object_list_prob['enemies'][enemy]
prop_tmp = object_list_tmp['enemies'][enemy]
prop['sprite'][0] = create_sprite(
os.path.join(ENEMY_TEXTURE, prop_tmp['sprite'][0]), sprite_size, tile)
prop['action'] = object_list_actions['fight']
file.close()
if full:
file = open("levels.yml", "r")
level_list = yaml.load(file.read(), Loader=yaml.Loader)['levels']
level_list.append({'map': EndMap.Map(), 'obj': EndMap.Objects()})
file.close()
| 38.062232 | 108 | 0.462649 | 11,734 | 0.661555 | 0 | 0 | 1,585 | 0.089361 | 0 | 0 | 2,124 | 0.11975 |
7e6ac35a244c03c07d3dc37ad1b2d76ac2c72006 | 3,451 | py | Python | test/test_search.py | tomkralidis/sat-search | 48f00dc4ad8c317a131ca70b05ac4ab5d3fcdb4c | [
"MIT"
] | null | null | null | test/test_search.py | tomkralidis/sat-search | 48f00dc4ad8c317a131ca70b05ac4ab5d3fcdb4c | [
"MIT"
] | null | null | null | test/test_search.py | tomkralidis/sat-search | 48f00dc4ad8c317a131ca70b05ac4ab5d3fcdb4c | [
"MIT"
] | null | null | null | import os
import glob
import json
import unittest
import satsearch.config as config
from satstac import Item
from satsearch.search import SatSearchError, Search
class Test(unittest.TestCase):
path = os.path.dirname(__file__)
results = []
@classmethod
def setUpClass(cls):
fnames = glob.glob(os.path.join(cls.path, '*-item*.json'))
for fname in fnames:
with open(fname) as f:
cls.results.append(json.load(f))
def get_searches(self):
""" Initialize and return search object """
return [Search(datetime=r['properties']['datetime']) for r in self.results]
def test_search_init(self):
""" Initialize a search object """
search = self.get_searches()[0]
dts = [r['properties']['datetime'] for r in self.results]
assert(len(search.kwargs) == 1)
assert('time' in search.kwargs)
for kw in search.kwargs:
self.assertTrue(search.kwargs[kw] in dts)
def test_search_for_items_by_date(self):
""" Search for specific item """
search = self.get_searches()[0]
sids = [r['id'] for r in self.results]
items = search.items()
assert(len(items) == 1)
for s in items:
self.assertTrue(s.id in sids)
def test_empty_search(self):
""" Perform search for 0 results """
search = Search(datetime='2001-01-01')
self.assertEqual(search.found(), 0)
def test_geo_search(self):
""" Perform simple query """
with open(os.path.join(self.path, 'aoi1.geojson')) as f:
aoi = json.dumps(json.load(f))
search = Search(datetime='2019-07-01', intersects=aoi)
assert(search.found() == 13)
items = search.items()
assert(len(items) == 13)
assert(isinstance(items[0], Item))
def test_search_sort(self):
""" Perform search with sort """
with open(os.path.join(self.path, 'aoi1.geojson')) as f:
aoi = json.dumps(json.load(f))
search = Search.search(datetime='2019-07-01/2019-07-07', intersects=aoi, sort=['<datetime'])
items = search.items()
assert(len(items) == 27)
def test_get_items_by_id(self):
""" Get Items by ID """
ids = ['LC81692212019263', 'LC81691102019263']
items = Search.items_by_id(ids, collection='landsat-8-l1')
assert(len(items) == 2)
def test_get_ids_search(self):
""" Get Items by ID through normal search """
ids = ['LC81692212019263', 'LC81691102019263']
search = Search.search(ids=ids, collection='landsat-8-l1')
items = search.items()
assert(search.found() == 2)
assert(len(items) == 2)
def test_get_ids_without_collection(self):
with self.assertRaises(SatSearchError):
search = Search.search(ids=['LC80340332018034LGN00'])
items = search.items()
def test_query_bad_url(self):
with self.assertRaises(SatSearchError):
Search.query(url=os.path.join(config.API_URL, 'collections/nosuchcollection'))
def test_search_property_operator(self):
expected = {'query': {'eo:cloud_cover': {'lte': '10'}, 'collection': {'eq': 'sentinel-2-l1c'}}}
instance = Search.search(collection='sentinel-2-l1c',
property=['eo:cloud_cover<=10'])
actual = instance.kwargs
assert actual == expected
| 34.858586 | 103 | 0.606201 | 3,285 | 0.951898 | 0 | 0 | 217 | 0.06288 | 0 | 0 | 680 | 0.197044 |
7e6ca5f5a32c06e55254d834d4ec7b22d2307f05 | 8,468 | py | Python | src/lck/django/common/forms.py | ar4s/kitdjango | 1da443296ac780ae002b66452df2251642374a13 | [
"MIT"
] | 10 | 2015-06-17T08:22:10.000Z | 2020-12-10T13:48:37.000Z | src/lck/django/common/forms.py | ar4s/kitdjango | 1da443296ac780ae002b66452df2251642374a13 | [
"MIT"
] | null | null | null | src/lck/django/common/forms.py | ar4s/kitdjango | 1da443296ac780ae002b66452df2251642374a13 | [
"MIT"
] | 8 | 2015-03-23T10:59:22.000Z | 2022-01-17T09:49:26.000Z | """
Albeit useful, this module is still somewhat a mess in a really early state of development. Beware, there be dragons.
"""
import datetime
import os
import re
from subprocess import check_call, CalledProcessError
from tempfile import NamedTemporaryFile
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django import forms
from django.forms.extras.widgets import RE_DATE, SelectDateWidget
from django.forms.widgets import Select, RadioFieldRenderer, HiddenInput
from django.forms.util import flatatt
from django.utils.dates import MONTHS
from django.utils.encoding import StrAndUnicode, force_unicode
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
class JQueryUIRadioInput(StrAndUnicode):
"""
An object used by RadioFieldRenderer that represents a single
<input type='radio'>.
"""
def __init__(self, name, value, attrs, choice, index):
self.name, self.value = name, value
self.attrs = attrs
self.choice_value = force_unicode(choice[0])
self.choice_label = force_unicode(choice[1])
self.index = index
def __unicode__(self):
if 'id' in self.attrs:
label_for = ' for="%s_%s"' % (self.attrs['id'], self.index)
else:
label_for = ''
choice_label = conditional_escape(force_unicode(self.choice_label))
return mark_safe(u'%s<label%s>%s</label>' % (self.tag(), label_for,
choice_label))
def is_checked(self):
return self.value == self.choice_value
def tag(self):
if 'id' in self.attrs:
self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index)
final_attrs = dict(self.attrs, type='radio', name=self.name,
value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return mark_safe(u'<input%s />' % flatatt(final_attrs))
class JQueryUIRenderer(StrAndUnicode):
"""
A customized renderer for radio fields.
"""
def __init__(self, name, value, attrs, choices):
self.name, self.value, self.attrs = name, value, attrs
self.choices = choices
def __iter__(self):
for i, choice in enumerate(self.choices):
if not choice[0]:
continue
yield JQueryUIRadioInput(self.name,
self.value,
self.attrs.copy(),
choice,
i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return JQueryUIRadioInput(self.name,
self.value,
self.attrs.copy(),
choice,
idx)
def __unicode__(self):
return self.render()
def render(self):
return mark_safe('<div class="radio">\n%s\n</div>' \
% '\n'.join([force_unicode(w) for w in self]))
class JQueryMobileVerticalRadioGroupRenderer(JQueryUIRenderer):
data_type = ""
def render(self):
return mark_safe('<div data-role="fieldcontain"><fieldset '
'data-role="controlgroup" %s>\n%s\n</fieldset></div>' %
(self.data_type, '\n'.join([force_unicode(w) for w in self])))
class JQueryMobileHorizontalRadioGroupRenderer(JQueryMobileVerticalRadioGroupRenderer):
data_type = 'data-type="horizontal"'
class JQueryUIRadioSelect(forms.RadioSelect):
renderer = JQueryUIRenderer
@classmethod
def id_for_label(cls, id_):
return id_
class JQueryMobileVerticalRadioGroup(JQueryUIRadioSelect):
renderer = JQueryMobileVerticalRadioGroupRenderer
class JQueryMobileHorizontalRadioGroup(JQueryUIRadioSelect):
renderer = JQueryMobileHorizontalRadioGroupRenderer
class PolishSelectDateWidget(SelectDateWidget):
def __init__(self, attrs=None, years=None, reverse_years=False):
self.reverse_years = reverse_years
super(PolishSelectDateWidget, self).__init__(attrs, years)
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, basestring):
match = RE_DATE.match(value)
if match:
year_val, month_val, day_val = [int(v) for v in match.groups()]
output = []
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
day_choices = [(i, i) for i in range(1, 32)]
local_attrs = self.build_attrs(id=self.day_field % id_)
select_html = Select(choices=day_choices).render(self.day_field % name, day_val, local_attrs)
output.append(select_html)
month_choices = MONTHS.items()
month_choices.sort()
local_attrs['id'] = self.month_field % id_
select_html = Select(choices=month_choices).render(self.month_field % name, month_val, local_attrs)
output.append(select_html)
year_choices = [(i, i) for i in self.years]
local_attrs['id'] = self.year_field % id_
if self.reverse_years:
year_choices.reverse()
select_html = Select(choices=year_choices).render(self.year_field % name, year_val, local_attrs)
output.append(select_html)
return mark_safe(u'\n'.join(output))
class WebpImageField(forms.ImageField):
"""Extends the default django ImageField with WEBP support through
`dwebp <http://code.google.com/intl/pl/speed/webp/docs/dwebp.html>`_.
Converts image data to PNG on the fly so that PIL (as of 1.1.7) is able
to use it.
"""
def __init__(self, *args, **kwargs):
print args
print kwargs
super(WebpImageField, self).__init__(*args, **kwargs)
def to_python(self, data):
try:
# try PIL-supported images first
return super(WebpImageField, self).to_python(data)
except forms.ValidationError, e:
pass
if hasattr(data, 'temporary_file_path'):
# file already on disk
file = data.temporary_file_path()
abs_path = os.path.splitext(file)[0] + '.png'
devnull = os.open(os.devnull, os.O_RDWR)
try:
check_call(['dwebp', file, '-o', abs_path],
stdout=devnull, stderr=devnull)
data.temporary_file_path = abs_path
data.name = os.path.basename(abs_path)
data.size = getsize(abs_path)
os.unlink(file)
except CalledProcessError:
raise ValidationError(self.error_messages['invalid_image'])
finally:
os.close(devnull)
else:
with NamedTemporaryFile() as file:
abs_path = os.path.splitext(file.name)[0] + '.png'
if hasattr(data, 'read'):
# InMemoryUploadFile
data.seek(0)
file.write(data.read())
output = data.file
else:
file.write(data['content'])
output = data['content']
file.flush()
devnull = os.open(os.devnull, os.O_RDWR)
try:
check_call(['dwebp', file.name, '-o', abs_path],
stdout=devnull, stderr=devnull)
# Monkey-patch the UploadFile object.
data.name = os.path.basename(abs_path)
data.size = os.path.getsize(abs_path)
with open(abs_path, 'rb') as image:
# InMemoryUploadFile
if hasattr(data, 'read'):
data.file = StringIO(image.read())
else:
data['content'] = StringIO(image.read())
except CalledProcessError:
raise ValidationError(self.error_messages['invalid_image'])
finally:
os.close(devnull)
# Return the monkey-patched UploadFile object.
return data
| 35.430962 | 117 | 0.590694 | 7,626 | 0.900567 | 362 | 0.042749 | 63 | 0.00744 | 0 | 0 | 1,164 | 0.137459 |
7e6ce7954f14f677ee1fb45483daa4344da6bdb8 | 2,502 | py | Python | utils/model_utils.py | Pandinosaurus/PerceptualImageError | 0513613d1f575136ea52418e35f2cebc5a49f783 | [
"BSD-2-Clause"
] | null | null | null | utils/model_utils.py | Pandinosaurus/PerceptualImageError | 0513613d1f575136ea52418e35f2cebc5a49f783 | [
"BSD-2-Clause"
] | null | null | null | utils/model_utils.py | Pandinosaurus/PerceptualImageError | 0513613d1f575136ea52418e35f2cebc5a49f783 | [
"BSD-2-Clause"
] | null | null | null | import tensorflow as tf
def extract_image_patches(image_batch, patch_size,patch_stride):
patches = tf.extract_image_patches(images =image_batch,ksizes=[1,patch_size,patch_size,1],rates=[1,1,1,1],strides=[1,patch_stride,patch_stride,1],padding='VALID')
patches_shape = patches.get_shape().as_list()
return tf.reshape(patches,[-1,patch_size,patch_size,3])#, patches_shape[1]*patches_shape[2] # NOTE: assuming 3 channels
def conv_init(name,input_channels, filter_height, filter_width, num_filters, groups=1):
weights = get_scope_variable(name, 'weights', shape=[filter_height, filter_width, input_channels/groups, num_filters], trainable=False)
biases = get_scope_variable(name, 'biases', shape = [num_filters],trainable=False)
def fc_init(name, num_in, num_out):
weights = get_scope_variable(name, 'weights', shape=[num_in, num_out], trainable=False)
biases = get_scope_variable(name, 'biases', shape=[num_out], trainable=False)
def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name, padding='SAME', relu=True):
input_channels = int(x.get_shape().as_list()[3])
convolve = lambda i, k: tf.nn.conv2d(i, k, strides = [1, stride_y, stride_x, 1], padding = padding)
weights = get_scope_variable(name, 'weights', shape=[filter_height, filter_width, input_channels, num_filters])
biases = get_scope_variable(name, 'biases', shape = [num_filters])
conv = convolve(x, weights)
bias_val = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(conv))
if relu == True:
relu = tf.nn.relu(bias_val, name = name)
return relu
else:
return bias_val
def fc(x, num_in, num_out, name, relu = True):
weights = get_scope_variable(name, 'weights', shape=[num_in, num_out])
biases = get_scope_variable(name, 'biases', shape=[num_out])
act = tf.nn.xw_plus_b(x, weights, biases, name=name)
if relu == True:
relu = tf.nn.relu(act)
return relu
else:
return act
def max_pool(x, filter_height, filter_width, stride_y, stride_x, name, padding='SAME'):
return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1], strides = [1, stride_y, stride_x, 1], padding = padding, name = name)
def dropout(x, keep_prob):
return tf.nn.dropout(x, keep_prob)
def get_scope_variable(scope_name, var, shape=None, initialvals=None,trainable=False):
with tf.variable_scope(scope_name,reuse=tf.AUTO_REUSE) as scope:
v = tf.get_variable(var,shape,dtype=tf.float32, initializer=initialvals,trainable=trainable)
return v
| 42.40678 | 163 | 0.735412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.060352 |
7e6cf6505ad3c6585ee75f9c82d03c7ef24336fd | 7,302 | py | Python | kubelet/datadog_checks/kubelet/summary.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | kubelet/datadog_checks/kubelet/summary.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | kubelet/datadog_checks/kubelet/summary.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from __future__ import division
from fnmatch import fnmatch
from datadog_checks.base.utils.tagging import tagger
from .common import replace_container_rt_prefix, tags_for_docker, tags_for_pod
class SummaryScraperMixin(object):
"""
This class scrapes metrics from Kubelet "/stats/summary" endpoint
"""
def process_stats_summary(self, pod_list_utils, stats, instance_tags, main_stats_source):
# Reports system container metrics (node-wide)
self._report_system_container_metrics(stats, instance_tags)
# Reports POD & Container metrics. If `main_stats_source` is set, retrieve everything it can
# Otherwise retrieves only what we cannot get elsewhere
self._report_metrics(pod_list_utils, stats, instance_tags, main_stats_source)
def _report_metrics(self, pod_list_utils, stats, instance_tags, main_stats_source):
for pod in stats.get('pods', []):
pod_namespace = pod.get('podRef', {}).get('namespace')
pod_name = pod.get('podRef', {}).get('name')
pod_uid = pod.get('podRef', {}).get('uid')
if pod_namespace is None or pod_name is None or pod_uid is None:
self.log.warning("Got incomplete results from '/stats/summary', missing data for POD: %s", pod)
continue
if pod_list_utils.is_namespace_excluded(pod_namespace):
continue
self._report_pod_stats(
pod_namespace, pod_name, pod_uid, pod, pod_list_utils, instance_tags, main_stats_source
)
self._report_container_stats(
pod_namespace, pod_name, pod.get('containers', []), pod_list_utils, instance_tags, main_stats_source
)
def _report_pod_stats(
self, pod_namespace, pod_name, pod_uid, pod, pod_list_utils, instance_tags, main_stats_source
):
# avoid calling the tagger for pods that aren't running, as these are
# never stored
pod_phase = pod_list_utils.pods.get(pod_uid, {}).get('status', {}).get('phase', None)
if pod_phase != 'Running':
return
pod_tags = tags_for_pod(pod_uid, tagger.ORCHESTRATOR)
if not pod_tags:
self.log.debug("Tags not found for pod: %s/%s - no metrics will be sent", pod_namespace, pod_name)
return
pod_tags += instance_tags
used_bytes = pod.get('ephemeral-storage', {}).get('usedBytes')
if used_bytes:
self.gauge(self.NAMESPACE + '.ephemeral_storage.usage', used_bytes, pod_tags)
# Metrics below should already be gathered by another mean (cadvisor endpoints)
if not main_stats_source:
return
# Processing summary based network level metrics
net_pod_metrics = {'rxBytes': 'kubernetes.network.rx_bytes', 'txBytes': 'kubernetes.network.tx_bytes'}
for k, v in net_pod_metrics.items():
# ensure we can filter out metrics per the configuration.
pod_level_match = any([fnmatch(v, p) for p in self.pod_level_metrics])
enabled_rate = any([fnmatch(v, p) for p in self.enabled_rates])
if pod_level_match and enabled_rate:
net_bytes = pod.get('network', {}).get(k)
if net_bytes:
self.rate(v, net_bytes, pod_tags)
def _report_container_stats(
self, pod_namespace, pod_name, containers, pod_list_utils, instance_tags, main_stats_source
):
# Metrics below should already be gathered by another mean (cadvisor endpoints)
if not main_stats_source:
return
for container in containers:
container_name = container.get('name')
if container_name is None:
self.log.warning(
"Kubelet reported stats without container name for pod: %s/%s", pod_namespace, pod_name
)
continue
# No mistake, we need to give a tuple as parameter
container_id = pod_list_utils.get_cid_by_name_tuple((pod_namespace, pod_name, container_name))
if container_id is None:
self.log.debug(
"Container id not found from /pods for container: %s/%s/%s - no metrics will be sent",
pod_namespace,
pod_name,
container_name,
)
continue
# TODO: In `containers` we also have terminated init-containers, probably to be excluded?
if pod_list_utils.is_excluded(container_id):
continue
# Finally, we can get tags for this container
container_tags = tags_for_docker(replace_container_rt_prefix(container_id), tagger.HIGH, True)
if not container_tags:
self.log.debug(
"Tags not found for container: %s/%s/%s:%s - no metrics will be sent",
pod_namespace,
pod_name,
container_name,
container_id,
)
container_tags += instance_tags
cpu_total = container.get('cpu', {}).get('usageCoreNanoSeconds')
if cpu_total:
self.rate(self.NAMESPACE + '.cpu.usage.total', cpu_total, container_tags)
working_set = container.get('memory', {}).get('workingSetBytes')
if working_set:
self.gauge(self.NAMESPACE + '.memory.working_set', working_set, container_tags)
# TODO: Review meaning of these metrics as capacity != available + used
# availableBytes = container.get('rootfs', {}).get('availableBytes')
capacity_bytes = container.get('rootfs', {}).get('capacityBytes')
used_bytes = container.get('rootfs', {}).get('usedBytes')
if used_bytes is not None:
self.gauge(self.NAMESPACE + '.filesystem.usage', used_bytes, container_tags)
if used_bytes is not None and capacity_bytes is not None:
self.gauge(self.NAMESPACE + '.filesystem.usage_pct', float(used_bytes) / capacity_bytes, container_tags)
def _report_system_container_metrics(self, stats, instance_tags):
sys_containers = stats.get('node', {}).get('systemContainers', [])
for ctr in sys_containers:
if ctr.get('name') == 'runtime':
mem_rss = ctr.get('memory', {}).get('rssBytes')
if mem_rss:
self.gauge(self.NAMESPACE + '.runtime.memory.rss', mem_rss, instance_tags)
cpu_usage = ctr.get('cpu', {}).get('usageNanoCores')
if cpu_usage:
self.gauge(self.NAMESPACE + '.runtime.cpu.usage', cpu_usage, instance_tags)
if ctr.get('name') == 'kubelet':
mem_rss = ctr.get('memory', {}).get('rssBytes')
if mem_rss:
self.gauge(self.NAMESPACE + '.kubelet.memory.rss', mem_rss, instance_tags)
cpu_usage = ctr.get('cpu', {}).get('usageNanoCores')
if cpu_usage:
self.gauge(self.NAMESPACE + '.kubelet.cpu.usage', cpu_usage, instance_tags)
| 47.109677 | 120 | 0.613667 | 6,995 | 0.957957 | 0 | 0 | 0 | 0 | 0 | 0 | 2,018 | 0.276363 |
7e6efd55ee5bacdaf7a9ad2f734f50df6af5bd9a | 2,027 | py | Python | Search_result/srr9.py | tanayz/Kaggle | 0dabcf5ccc2432cecd12f91fba9dfda64dc1afdd | [
"Apache-2.0"
] | null | null | null | Search_result/srr9.py | tanayz/Kaggle | 0dabcf5ccc2432cecd12f91fba9dfda64dc1afdd | [
"Apache-2.0"
] | null | null | null | Search_result/srr9.py | tanayz/Kaggle | 0dabcf5ccc2432cecd12f91fba9dfda64dc1afdd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 5 06:34:04 2015
@author: tanay
"""
from lasagne.layers import InputLayer, DropoutLayer, DenseLayer
from lasagne.updates import nesterov_momentum
from lasagne.objectives import binary_crossentropy
from nolearn.lasagne import NeuralNet
import theano
from theano import tensor as T
from theano.tensor.nnet import sigmoid
from sklearn import metrics
from sklearn.utils import shuffle
import numpy as np
learning_rate = theano.shared(np.float32(0.1))
input_size=Xtrh.shape
class AdjustVariable(object):
def __init__(self, variable, target, half_life=20):
self.variable = variable
self.target = target
self.half_life = half_life
def __call__(self, nn, train_history):
delta = self.variable.get_value() - self.target
delta /= 2**(1.0/self.half_life)
self.variable.set_value(np.float32(self.target + delta))
net = NeuralNet(
layers=[
('input', InputLayer),
('hidden1', DenseLayer),
('dropout1', DropoutLayer),
('hidden2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer),
],
# layer parameters:
input_shape=(None, input_size),
hidden1_num_units=400,
dropout1_p=0.4,
hidden2_num_units=200,
dropout2_p=0.4,
output_nonlinearity=sigmoid,
output_num_units=4,
# optimization method:
update=nesterov_momentum,
update_learning_rate=learning_rate,
update_momentum=0.899,
# Decay the learning rate
on_epoch_finished=[
AdjustVariable(learning_rate, target=0, half_life=4),
],
# This is silly, but we don't want a stratified K-Fold here
# To compensate we need to pass in the y_tensor_type and the loss.
regression=True,
y_tensor_type = T.imatrix,
objective_loss_function = binary_crossentropy,
max_epochs=75,
eval_size=0.1,
verbose=1,
)
X, y = shuffle(Xtrh, y, random_state=123)
net.fit(X, y)
_, X_valid, _, y_valid = net.train_test_split(X, y, net.eval_size)
probas = net.predict_proba(X_valid)[:,0]
print("ROC score", metrics.roc_auc_score(y_valid, probas))
| 25.658228 | 66 | 0.730143 | 387 | 0.190923 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.166256 |
7e706c63ed781d4a98c0e2432ffb583ee2d4d55c | 1,795 | py | Python | typarser/_base_optarg.py | ermishechkin/typarser | 00d965f87718c837a2c246f0b5c61401e8a6e3c8 | [
"MIT"
] | null | null | null | typarser/_base_optarg.py | ermishechkin/typarser | 00d965f87718c837a2c246f0b5c61401e8a6e3c8 | [
"MIT"
] | null | null | null | typarser/_base_optarg.py | ermishechkin/typarser | 00d965f87718c837a2c246f0b5c61401e8a6e3c8 | [
"MIT"
] | null | null | null | from __future__ import annotations
import typing
from typing import Callable, Generic, Iterable, Literal, Optional, Tuple, Union
from ._base import RESULT, TYPE, BaseComponent
from ._internal_namespace import set_value
if typing.TYPE_CHECKING:
from ._base import Namespace
from .action import Action
NARGS = Union[int, Literal['*'], Literal['+'], Literal['?']]
class BaseOptArg(BaseComponent[TYPE, RESULT], Generic[TYPE, RESULT]):
def __init__(
self,
*,
type: Callable[[str], TYPE], # pylint: disable=redefined-builtin
nargs: Optional[NARGS],
choices: Optional[Iterable[TYPE]],
default: Optional[Union[RESULT, str]],
metavar: Optional[Union[str, Tuple[str, ...]]],
action: Optional[Action] = None,
help: Optional[str], # pylint: disable=redefined-builtin
) -> None:
super().__init__(help=help)
self._type = type
self._nargs = nargs
self._choices = tuple(choices) if choices else None
self._default = default
self._metavar = metavar
self._action = action
@property
def type(self) -> Callable[[str], TYPE]:
return self._type
@property
def nargs(self) -> Optional[NARGS]:
return self._nargs
@property
def choices(self) -> Optional[Tuple[TYPE, ...]]:
return self._choices
@property
def default(self) -> Optional[Union[RESULT, str]]:
return self._default
@property
def metavar(self) -> Optional[Union[str, Tuple[str, ...]]]:
return self._metavar
@property
def action(self) -> Optional[Action]:
return self._action
def __set__(self, owner: Namespace, value: TYPE):
set_value(owner, self, value)
| 29.42623 | 79 | 0.622284 | 1,416 | 0.788858 | 0 | 0 | 521 | 0.290251 | 0 | 0 | 79 | 0.044011 |
7e706e01366d67075b4a31f867ccfdf06dd51be5 | 766 | py | Python | docs/tutorial/local.py | prodigyfinance/ml2p | a6df55e9ce81e68619fd2f2891981a39a9186651 | [
"0BSD"
] | 3 | 2021-10-11T05:35:45.000Z | 2022-02-21T09:54:16.000Z | docs/tutorial/local.py | hodgestar/ml2p | 3f82e7fbf79345cead67ee18de88589a1ae82b97 | [
"0BSD"
] | 7 | 2020-09-16T13:55:16.000Z | 2021-06-11T08:38:03.000Z | docs/tutorial/local.py | hodgestar/ml2p | 3f82e7fbf79345cead67ee18de88589a1ae82b97 | [
"0BSD"
] | 3 | 2020-09-15T14:38:25.000Z | 2020-09-17T20:35:35.000Z | # -*- coding: utf-8 -*-
""" Train the Boston house prices model on your local machine.
"""
import pandas as pd
from ml2p.core import LocalEnv
import model
def train(env):
""" Train and save the model locally. """
trainer = model.BostonModel().trainer(env)
trainer.train()
def predict(env):
""" Load a model and make predictions locally. """
predictor = model.BostonModel().predictor(env)
predictor.setup()
data = pd.read_csv("house-prices.csv")
house = dict(data.iloc[0])
del house["target"]
print("Making a prediction for:")
print(house)
result = predictor.invoke(house)
print("Prediction:")
print(result)
if __name__ == "__main__":
env = LocalEnv(".", "ml2p.yml")
train(env)
predict(env)
| 21.885714 | 62 | 0.644909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.349869 |
7e713a438f11abe5a5a8f398bae01323ba662ddc | 457 | py | Python | examples/decorator_args.py | gwilkes/botpie | 53d0313beb3f5710cab71ab30dc8a33831e8e6a4 | [
"MIT"
] | null | null | null | examples/decorator_args.py | gwilkes/botpie | 53d0313beb3f5710cab71ab30dc8a33831e8e6a4 | [
"MIT"
] | null | null | null | examples/decorator_args.py | gwilkes/botpie | 53d0313beb3f5710cab71ab30dc8a33831e8e6a4 | [
"MIT"
] | null | null | null | import botpie
import random
message = botpie.utils.argvstr()
bot = botpie.Bot("InfoBot")
@bot.command("sens")
@bot.argument("game", default="ow", nargs="?", help="pick a game")
def sens(game):
gamesens = {
"ow": "2.1",
"cod": "3",
"d2": "4"
}
sens = gamesens.get(game)
if sens:
return f"{game} mouse sensitivity is {sens}"
result = bot.inspectstr(message)
if result:
print(result) | 20.772727 | 67 | 0.562363 | 0 | 0 | 0 | 0 | 291 | 0.636761 | 0 | 0 | 102 | 0.223195 |
7e71b09e8c3b6c84f5eb91628da2d2611269feb8 | 3,242 | py | Python | src/PyMud/model/account.py | NichCritic/pymud | 583ec16f5a75dc7b45146564b39851291dc07b6c | [
"MIT"
] | null | null | null | src/PyMud/model/account.py | NichCritic/pymud | 583ec16f5a75dc7b45146564b39851291dc07b6c | [
"MIT"
] | null | null | null | src/PyMud/model/account.py | NichCritic/pymud | 583ec16f5a75dc7b45146564b39851291dc07b6c | [
"MIT"
] | null | null | null | '''
Created on Nov 27, 2011
@author: Nich
'''
import uuid
from model.base import Base
from sqlalchemy import Column, String, ForeignKey
from sqlalchemy.orm import relationship
class Account(Base):
__tablename__ = "account"
id = Column(String, primary_key=True)
first_name = Column(String)
last_name = Column(String)
email = Column(String, unique = True)
avatars = relationship("AccountAvatar")
def __init__(self, first_name, last_name, email):
self.id = str(uuid.uuid4())
self.first_name = first_name
self.last_name = last_name
self.email = email
class AccountAvatar(Base):
__tablename__ = "account_avatar"
account_id = Column(String, ForeignKey("account.id"), primary_key = True)
avatar_id = Column(String, ForeignKey("entity.id"), primary_key = True)
avatar = relationship("Avatar")
class AccountUtils():
def __init__(self, avatar_factory):
self.avatar_factory = avatar_factory
def print_all(self, session):
for instance in session.query(Account).order_by(Account.account_id):
print(instance.name)
def get(self, email, session):
try:
acct = session.query(Account).filter(Account.email==email).one()
return acct
except:
return None
def get_by_id(self, id, session):
acct = session.query(Account).filter(Account.id==id).one()
return acct
def make_account(self, first_name, last_name, email, session):
a = Account(first_name, last_name, email)
session.add(a)
return a
def create_new_avatar_for_account(self, account_id, data, session):
a = self.avatar_factory.create_default_avatar(data)
account = session.query(Account).filter(Account.id==account_id).one()
ac_av = AccountAvatar()
ac_av.account_id = account.id
ac_av.avatar = a
account.avatars.append(ac_av)
session.add(ac_av)
def get_previous_avatar_for_player(self, player_id, session):
from objects.components import PlayerControlled
avatar = session.query(PlayerControlled.__table__).filter(PlayerControlled.pid==player_id).first()
return avatar.entity_id
def get_avatars_for_account(self, account, session):
session.add(account)
avatars = account.avatars
return avatars
def set_avatars_pid(self, avatar_id, player_id, session):
from objects.components import PlayerControlled
from sqlalchemy import update
ex = update(PlayerControlled.__table__).where(PlayerControlled.entity_id==avatar_id).values(pid=player_id)
session.execute(ex)
def handle_login(self, user, player_factory, session):
db_user = self.get(user["email"], session)
if db_user is None:
db_user = self.make_account(user["given_name"], user["family_name"], user["email"], session)
return db_user
#Base.metadata.create_all(engine) | 28.438596 | 114 | 0.623381 | 3,014 | 0.929673 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.058297 |
7e72844189e895886424dc16300b05e203165f43 | 46 | py | Python | elevadorJahNoUltimoAndarException.py | IpRocha1/dsoo_exercicio_6 | 69ece39b2189b3a17a9185dca8a6d17acb6b5aa5 | [
"MIT"
] | null | null | null | elevadorJahNoUltimoAndarException.py | IpRocha1/dsoo_exercicio_6 | 69ece39b2189b3a17a9185dca8a6d17acb6b5aa5 | [
"MIT"
] | null | null | null | elevadorJahNoUltimoAndarException.py | IpRocha1/dsoo_exercicio_6 | 69ece39b2189b3a17a9185dca8a6d17acb6b5aa5 | [
"MIT"
] | null | null | null | class ElevadorJahNoUltimoAndarException( ...
| 15.333333 | 44 | 0.826087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7e72b5817d31b0de3104c24886db921a7779c4b5 | 630 | py | Python | pysatModels/models/__init__.py | pysat/pysatModelUtils | e563c36531632cd68b4e15d2ac6099fa3cb45aef | [
"BSD-3-Clause"
] | null | null | null | pysatModels/models/__init__.py | pysat/pysatModelUtils | e563c36531632cd68b4e15d2ac6099fa3cb45aef | [
"BSD-3-Clause"
] | 17 | 2019-09-18T18:19:00.000Z | 2020-01-30T18:41:59.000Z | pysatModels/models/__init__.py | pysat/pysatModelUtils | e563c36531632cd68b4e15d2ac6099fa3cb45aef | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2022, pysat development team
# Full license can be found in License.md
# -----------------------------------------------------------------------------
"""Routines for loading model data into a pysat Instrument object."""
# Import key modules and skip F401 testing in flake8
from pysatModels.models import methods # noqa: F401
from pysatModels.models import pydineof_dineof # noqa: F401
from pysatModels.models import sami2py_sami2 # noqa: F401
from pysatModels.models import ucar_tiegcm # noqa: F401
__all__ = ['pydineof_dineof', 'sami2py_sami2', 'ucar_tiegcm']
| 42 | 79 | 0.663492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 422 | 0.669841 |
7e72e12b97f96d60c1b664e0376ea773f0bdcd6f | 2,918 | py | Python | backend/accounts/serializers.py | hvitis/geodjango-rest-vue-boilerplate | bd825fdfc81430ecfac3fe7d3c06a6e1145de1cd | [
"MIT"
] | 5 | 2020-11-23T11:31:40.000Z | 2022-03-11T01:24:46.000Z | backend/accounts/serializers.py | hvitis/geodjango-rest-vue-boilerplate | bd825fdfc81430ecfac3fe7d3c06a6e1145de1cd | [
"MIT"
] | null | null | null | backend/accounts/serializers.py | hvitis/geodjango-rest-vue-boilerplate | bd825fdfc81430ecfac3fe7d3c06a6e1145de1cd | [
"MIT"
] | null | null | null | from rest_framework_gis.serializers import GeoFeatureModelSerializer, GeometrySerializerMethodField
from django.contrib.gis.geos import Point
from accounts.models import UserProfile
import datetime
from rest_framework_gis.pagination import GeoJsonPagination
from django.contrib.auth import get_user_model
from django.utils import timezone
from rest_framework import serializers
from rest_framework_simplejwt.settings import api_settings
from rest_framework.reverse import reverse as api_reverse
from rest_framework_gis import serializers as geo_serializers
User = get_user_model()
class UserPublicSerializer(serializers.ModelSerializer):
uri = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = [
'id',
'username',
'uri'
]
def get_uri(self, obj):
request = self.context.get('request')
return api_reverse("api-user:detail", kwargs={"username": obj.username}, request=request)
class UserRegisterSerializer(serializers.ModelSerializer):
password2 = serializers.CharField(
style={'input_type': 'password'}, write_only=True)
token = serializers.SerializerMethodField(read_only=True)
expires = serializers.SerializerMethodField(read_only=True)
message = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = [
'username',
'email',
'password',
'password2',
'token',
'expires',
'message',
]
extra_kwargs = {'password': {'write_only': True}}
def get_message(self, obj):
return "Thank you for registering. Please verify your email before continuing."
def get_expires(self, obj):
return timezone.now() + expire_delta - datetime.timedelta(seconds=200)
def validate_email(self, value):
qs = User.objects.filter(email__iexact=value)
if qs.exists():
raise serializers.ValidationError(
"User with this email already exists")
return value
def validate_username(self, value):
qs = User.objects.filter(username__iexact=value)
if qs.exists():
raise serializers.ValidationError(
"User with this username already exists")
return value
def validate(self, data):
pw = data.get('password')
pw2 = data.pop('password2')
if pw != pw2:
raise serializers.ValidationError("Passwords must match")
return data
def create(self, validated_data):
#print(validated_data)
user_obj = User(
username=validated_data.get('username'),
email=validated_data.get('email'))
user_obj.set_password(validated_data.get('password'))
user_obj.is_active = False
user_obj.save()
return user_obj
| 30.715789 | 99 | 0.664839 | 2,324 | 0.796436 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.138108 |
7e7440fb1d1c7e95290bfc94cc2ad2a4debb8384 | 734 | py | Python | main.py | henryfox/encrypted-chat | c69ba786892dd870cb3fddd7da6ea77eadeb4a7e | [
"MIT"
] | null | null | null | main.py | henryfox/encrypted-chat | c69ba786892dd870cb3fddd7da6ea77eadeb4a7e | [
"MIT"
] | null | null | null | main.py | henryfox/encrypted-chat | c69ba786892dd870cb3fddd7da6ea77eadeb4a7e | [
"MIT"
] | null | null | null | import webapp2
import os
import jinja2
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), "templates")
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class MainHandler(Handler):
def get(self):
self.response.write('Hello world!')
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
| 27.185185 | 98 | 0.690736 | 416 | 0.566757 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.038147 |
7e74455914c904362f3673de521f4d7c1b0cced0 | 32,898 | py | Python | lib/data_processing.py | ofirmg/football2vec | 302dc89bcdb19d00a7bd23ae8d28c103bc2f0482 | [
"Apache-2.0"
] | 11 | 2021-10-01T15:14:41.000Z | 2022-03-31T02:26:08.000Z | lib/data_processing.py | ofirmg/football2vec | 302dc89bcdb19d00a7bd23ae8d28c103bc2f0482 | [
"Apache-2.0"
] | null | null | null | lib/data_processing.py | ofirmg/football2vec | 302dc89bcdb19d00a7bd23ae8d28c103bc2f0482 | [
"Apache-2.0"
] | 1 | 2022-03-17T17:38:06.000Z | 2022-03-17T17:38:06.000Z | """
Created on September 5 2021
Data processing module of football2vec. Contains classes: FootballTokenizer, Corpus.
Also contains the build_data_objects function and its nested function for building the core data objects.
@author: Ofir Magdaci (@Magdaci)
"""
import numpy as np
import pandas as pd
import ast
from tqdm import tqdm
import os
import pickle
import plotly.express as px
from sklearn.preprocessing import OrdinalEncoder, QuantileTransformer
from lib.data_handler import load_all_events_data, load_players_metadata, load_matches_metadata, get_teams_metadata
from lib.language_patterns import check_if_shot_scored, check_if_one_one_one_chance, check_if_shot_outside_box, \
check_if_dribble_won
from lib.params import COLUMNS, PATHS, ARTIFACTS
from lib.utils import to_metric_centered_coordinates, get_location_bin
tqdm.pandas()
class FootballTokenizer:
def __init__(self, **kwargs):
self.tokens_encoder = OrdinalEncoder()
self.num_x_bins = kwargs.get('num_x_bins', 5)
self.num_y_bins = kwargs.get('num_y_bins', 5)
self.actions_to_ignore_outcome = kwargs.get('actions_to_ignore_outcome', ['duel'])
def tokenize_action(self, action: pd.Series) -> str:
'''
Convert action - a record of StatsBomb events data - to a string token
:param action: Series, a single action
:return: token - string
'''
action_name = action.get('type', action.get(COLUMNS.ACTION_TYPE, np.nan))
if action_name is np.nan:
return np.nan
else:
action_name = action_name.lower()
token = f'<{action_name}>'
# Add location
if action['location'] is not np.nan and action['location'] is not np.nan:
if isinstance(action['location'], str):
x, y = ast.literal_eval(action['location'])
elif isinstance(action['location'], list) or isinstance(action['location'], tuple):
x, y = action['location']
else:
raise ValueError("Unfamiliar value for action location:", action['location'])
location_bin = get_location_bin(x, y, num_x_bins=self.num_x_bins, num_y_bins=self.num_y_bins)
token = f"{location_bin}".replace(" ", "") + token
return token.replace(" ", "_")
class Corpus:
def __init__(self, **kwargs):
'''
corpus - list of paragraphs. Resolution/ aggregation is determined by self.aggr_columns.
self.aggr_columns aggregation is executed by string grouping, separated by self.separator
Tokens of language are stored in vocabulary, and their encodings in vocabulary_ix.
Transformation from tokens <-> tokens encodings are allowed by self.ix_2_token & self.token_2_ix.
documents - name of documents, when used for Doc2Vec
:param kwargs:
:type kwargs:
'''
self.aggr_columns = kwargs.get('aggr_columns', None)
if self.aggr_columns is None:
self.aggr_columns = ['match_id', 'period', 'possession']
self.ft = kwargs.get('tokenizer', FootballTokenizer(
actions_to_ignore_outcome=kwargs.get('actions_to_ignore_outcome', ['duel'])))
self.separator = kwargs.get('separator', '-')
# Init None attributes
self.corpus = None
self.vocabulary = None
self.vocabulary_ix = None
self.ix_2_token = None
self.token_2_ix = None
self.documents_names = None
self.verbose = kwargs.get('verbose', False)
def build_corpus(self, events_data: pd.DataFrame, allow_concat_documents_=True, **kwargs) -> pd.DataFrame:
'''
Build corpus using given vocab_data. Associates actions with matching tokens, aggregate them to sentences,
and then to documents.
:param events_data: pd.DataFrame of StatsBomb events data
:param allow_concat_documents_: whether to allow concatenation of sentences to documents if < min length limit
:param kwargs:
:return: vocab_data with new 'token' column. All object attributes are updated.
'''
if self.verbose:
print(f"vocab_data size: {events_data.shape}\n")
vocab_data = events_data.copy()
if self.verbose: print('\nStart Tokenization')
vocab_data['token'] = vocab_data.progress_apply(lambda action:
self.ft.tokenize_action(action),
axis=1)
events_data['token'] = vocab_data['token'].copy()
vocab_data = vocab_data[~vocab_data['token'].isna()]
if self.verbose:
print('Done.')
print(f"Vocab_data size after processing and removing NAs tokens: {events_data.shape}\n")
vocabulary = [val for val in vocab_data['token'].unique() if val is not np.nan]
vocabulary.extend(['oov'])
if self.verbose:
print(f'Raw length of vocabulary: (including oov)', len(vocabulary))
# Create mappers of token to index and vice versa
ix_2_token = dict(enumerate(vocabulary))
ix_2_token = {str(key): val for key, val in ix_2_token.items()}
token_2_ix = {val: key for key, val in ix_2_token.items()}
# Set the appropriate token index for each action
vocab_data['token_ix'] = vocab_data['token'].apply(
lambda token: token_2_ix.get(token, token_2_ix['oov']))
# Keep only columns relevant for sentences grouping
vocab_data = vocab_data[['token_ix', 'token'] + self.aggr_columns]
for col in self.aggr_columns:
vocab_data[col] = vocab_data[col].astype(str)
vocab_data['aggr_key'] = vocab_data[self.aggr_columns].apply(
lambda vec: self.separator.join(vec), axis=1)
# Create sentences and documents
sentences = vocab_data[['aggr_key', 'token_ix']].groupby('aggr_key')
sentences = sentences['token_ix'].agg(list).reset_index()
documents = sentences['aggr_key'].tolist()
sentences = sentences['token_ix'].tolist()
sampling_window = kwargs.get('sampling_window', 5)
corpus = []
if self.verbose:
print('\nBuilding sentences...')
if not allow_concat_documents_:
# If we can't concatenate sentences --> add to corpus sentences that are longer than min threshold
self.documents_names = []
if self.verbose: print('\nBuilding Documents...')
for i, doc_ in tqdm(enumerate(sentences)):
if len(doc_) >= sampling_window:
corpus.append(doc_[:])
self.documents_names.append(documents[i])
if self.verbose: print('Final number of documents_names:', len(self.documents_names))
else:
# Paragraphs can be merged and concatenated
# If we can concatenate multiple short sentences (shorter than min threshold) to longer sentences > merge
if self.verbose:
print('\nConcatenating Documents to build sampling_window sized documents...')
cum_actions_length = 0
cum_actions = []
for sentence_ in tqdm(sentences):
if len(sentence_) >= sampling_window:
corpus.append(sentence_[:])
else:
cum_actions.extend(sentence_[:])
cum_actions_length += len(sentence_)
if cum_actions_length >= sampling_window:
corpus.append(cum_actions[:])
cum_actions_length = 0
cum_actions = []
if self.verbose:
print('Final number of sentences:', len(corpus))
# Update vocabulary
corpus_flat = set([subitem for item in corpus for subitem in item if type(item) is list])
vocaulary_ix = set([token_2_ix[token_] for token_ in vocabulary])
# Update vocabulary after merging sentences
vocabulary_ix = corpus_flat.intersection(vocaulary_ix)
vocabulary = [ix_2_token[token_ix] for token_ix in vocabulary_ix]
if self.verbose:
print('Final length of vocabulary:', len(vocabulary))
# Set class properties
self.corpus = corpus
self.vocabulary = vocabulary
self.vocabulary_ix = vocabulary_ix
self.ix_2_token = ix_2_token
self.token_2_ix = token_2_ix
return events_data
def get_enriched_events_data(force_create=False, verbose=False, save_artifacts=False, **kwargs) -> pd.DataFrame:
'''
Build enriched events_data DataFrame. It apply to_metric_centered_coordinates on the data, and adds features:
- COLUMNS.START_X, COLUMNS.START_Y
- COLUMNS.GOAL, COLUMNS.OUTBOX_SHOT, COLUMNS.FREE_KICK, COLUMNS.HEADER, COLUMNS.DRIBBLE_WON, COLUMNS.PENALTY,
COLUMNS.ONE-ON-ONE, - bool indicators if the events matches the filter
COLUMNS.XA - expected goal (xG) of the shot resulted from the current pass (if exists)
:param force_create: whether to force create or try to load existing file [bool]
:param save_artifacts: bool, whether to save the artifacts in to params.PATHS.ARTIFACTS
:return: enriched_events_data
'''
path_prefix = kwargs.get('path_prefix', None)
if path_prefix is not None:
enriched_events_data_path = os.path.join(path_prefix, PATHS.ENRICH_EVENTS_DATA_PATH)
else:
enriched_events_data_path = PATHS.ENRICH_EVENTS_DATA_PATH
if os.path.exists(enriched_events_data_path) and not force_create:
if verbose: print('Loading existing enriched_events_data...')
return pd.read_csv(enriched_events_data_path)
else:
if verbose: print('Building enriched_events_data...')
events_data = load_all_events_data(verbose=verbose)
# Covert key names to lower case as best practice, despite the confusion it may cause
for col in [COLUMNS.TEAM_NAME, COLUMNS.PLAYER_NAME]:
events_data[col] = events_data[col].apply(lambda name_: name_.lower() if isinstance(name_, str) else name_)
if verbose: print(' - Handling coordinates and location...')
events_data = to_metric_centered_coordinates(events_data)
events_data[COLUMNS.START_X] = events_data[COLUMNS.LOCATION].apply(
lambda val: val[0] if isinstance(val, tuple) or isinstance(val, list) else val)
events_data[COLUMNS.START_Y] = events_data[COLUMNS.LOCATION].apply(
lambda val: val[1] if isinstance(val, tuple) or isinstance(val, list) else val)
if verbose: print(' - Adding xG, xA, OUTBOX_SHOT, etc...')
events_data[COLUMNS.GOAL] = events_data.apply(lambda event_: float(check_if_shot_scored(event_)), axis=1)
events_data[COLUMNS.OUTBOX_SHOT] = events_data.apply(
lambda event_: float(check_if_shot_outside_box(event_)), axis=1)
events_data[COLUMNS.HEADER] = events_data.apply(
lambda event_: 1 if pd.notna(event_['shot_body_part_name']) \
and event_['shot_body_part_name'].lower() == 'head' else 0, axis=1)
events_data[COLUMNS.DRIBBLE_WON] = events_data.apply(lambda event_: float(check_if_dribble_won(event_)), axis=1)
events_data[COLUMNS.PENALTY] = events_data.apply(
lambda event_: 1 if pd.notna(event_['shot_type_name'])
and event_['shot_type_name'].lower() == 'penalty' else 0, axis=1)
events_data[COLUMNS.ONE_ON_ONE] = events_data.apply(lambda event_:
float(check_if_one_one_one_chance(event_)), axis=1)
events_data[COLUMNS.FREE_KICK] = events_data.apply(
lambda event_: 1 if pd.notna(event_['shot_type_name']) and event_['shot_type_name'].lower() == 'free kick'
else 0, axis=1)
# xA = xG of receiver
events_data[COLUMNS.XA] = events_data['pass_assisted_shot_id'].apply( \
lambda shot_id: events_data.loc[events_data['id'] == shot_id, COLUMNS.XG].iloc[0] \
if isinstance(shot_id, str) else np.nan)
pass_recipient = 'pass_recipient_name'
events_data[pass_recipient] = events_data[pass_recipient].apply(
lambda val: val.lower() if isinstance(val, str) else val)
events_data[COLUMNS.DRIBBLE_WON] = events_data.apply(lambda action: check_if_dribble_won(action), axis=1)
events_data[COLUMNS.IS_SHOT] = events_data.apply(lambda action: 1 \
if action[COLUMNS.ACTION_TYPE] == 'Shot' else 0, axis=1)
if save_artifacts:
print(f' - Saving to {enriched_events_data_path}...')
if not os.path.exists(ARTIFACTS):
if verbose: print('Creating new ARTIFACTS folder')
os.makedirs(ARTIFACTS)
events_data.to_csv(enriched_events_data_path)
return events_data
def get_enriched_players_metadata(events_data, force_create=False, path_prefix='', verbose=False,
save_artifacts=False) -> dict:
'''
Combines players metadata given in the dataset and enriches it with events_data (vocab_data) information:
Adds player_name, team_name, position_name per player (take most frequent), jersey_number (take most frequent)
:param save_artifacts: whether to save the artifacts in to params.PATHS.ARTIFACTS
:param force_create: whether to force create or try to load existing file [bool]
:param events_data: data frame of events data
:param verbose: print control
'''
enriched_players_metadata_path = path_prefix + PATHS.ENRICH_PLAYERS_METADATA_PATH
if os.path.exists(enriched_players_metadata_path) and not force_create:
if verbose:
print(f'\nLoading players metadata')
with open(enriched_players_metadata_path, 'rb') as f:
return pickle.load(f)
else:
players_metadata = load_players_metadata(force_create=force_create)
players_metadata['player_name_lower'] = players_metadata[COLUMNS.PLAYER_NAME].apply(
lambda val: val.lower() if isinstance(val, str) else val)
vocab_data_cp = events_data[events_data[COLUMNS.PLAYER_NAME].notna()].copy()
players_2_positions = vocab_data_cp[
[COLUMNS.PLAYER_NAME, COLUMNS.POSITION, COLUMNS.MATCH_ID]].copy() \
.drop_duplicates(). \
groupby([COLUMNS.PLAYER_NAME, COLUMNS.POSITION]). \
agg({COLUMNS.MATCH_ID: np.size}).reset_index().sort_values(by=COLUMNS.MATCH_ID, ascending=False)
players_2_positions = players_2_positions.drop_duplicates(subset=[COLUMNS.PLAYER_NAME], keep='first')
players_2_positions.set_index(COLUMNS.PLAYER_NAME, inplace=True)
players_2_positions = players_2_positions.to_dict(orient='index')
players_2_jersey_num = players_metadata[[COLUMNS.PLAYER_NAME, 'jersey_number']].copy(). \
groupby([COLUMNS.PLAYER_NAME]). \
agg({'jersey_number': np.size}).reset_index().sort_values(by='jersey_number', ascending=False)
players_2_jersey_num.set_index(COLUMNS.PLAYER_NAME, inplace=True)
players_2_jersey_num = players_2_jersey_num.to_dict(orient='index')
matches_metadata = load_matches_metadata()
matches_metadata[COLUMNS.MATCH_ID] = matches_metadata[COLUMNS.MATCH_ID].astype(str)
matches_metadata = matches_metadata.set_index(COLUMNS.MATCH_ID)
matches_metadata = matches_metadata.to_dict(orient='index')
vocab_data_cp['match_date'] = vocab_data_cp[COLUMNS.MATCH_ID].apply(
lambda match_: matches_metadata.get(str(match_).split('.')[0], {'match_date': 'unknown'})['match_date'])
players_2_teams = vocab_data_cp.sort_values(by=['match_date'], ascending=False)[
[COLUMNS.PLAYER_NAME, COLUMNS.TEAM_NAME]].copy()
players_2_teams = players_2_teams.drop_duplicates(subset=[COLUMNS.PLAYER_NAME], keep='first')
players_2_teams = players_2_teams.set_index(COLUMNS.PLAYER_NAME)
players_2_teams = players_2_teams.to_dict(orient='index')
# Add most frequent position name into metadata > position_name
players_metadata[COLUMNS.POSITION] = players_metadata[COLUMNS.PLAYER_NAME].apply( \
lambda name_: players_2_positions.get(name_.lower(), {COLUMNS.POSITION: np.nan})[COLUMNS.POSITION])
players_metadata['jersey_number'] = players_metadata[COLUMNS.PLAYER_NAME].apply( \
lambda name_: players_2_jersey_num.get(name_, {'jersey_number': np.nan})['jersey_number'])
players_metadata['team_name'] = players_metadata[COLUMNS.PLAYER_NAME].apply( \
lambda name_: players_2_teams.get(name_.lower(), {COLUMNS.TEAM_NAME: ''})[COLUMNS.TEAM_NAME])
players_metadata = players_metadata.drop_duplicates(subset=[COLUMNS.PLAYER_NAME], keep='first')
players_metadata = players_metadata.set_index(COLUMNS.PLAYER_NAME)
players_metadata = players_metadata.to_dict(orient='index')
if verbose:
print('DONE.')
if save_artifacts:
if not os.path.exists(os.path.join(path_prefix, ARTIFACTS)):
if verbose: print('Creating new ARTIFACTS folder')
os.makedirs(os.path.join(path_prefix, ARTIFACTS))
print(f'Saving enrich_players_metadata to {enriched_players_metadata_path}')
with open(enriched_players_metadata_path, 'wb') as f:
pickle.dump(players_metadata, f, protocol=pickle.HIGHEST_PROTOCOL)
return players_metadata
def create_players_metrics_df(enriched_events_data: pd.DataFrame, matches_metadata: pd.DataFrame, force_create=False,
**kwargs) -> (pd.DataFrame, dict):
'''
Build a DataFrame of conversion and LIFT stats for players - xG, xA, lifts for each shot type, etc...
It aggregates all players actions results within a single DataFrame.
:param enriched_events_data: pd.DataFrame, enriched version of StatsBomb events data (see get_enriched_events_data)
:param matches_metadata: DataFrame - adds season_name, competition_name for each match in the dataset
:param force_create: whether to force create or try to load existing file [bool]
:return: players_metrics_df (pd.DataFrame), baselines (dict) - the metrics output for variety of benchmarks
'''
verbose = kwargs.get('verbose', False)
metrics_df_path = kwargs.get('metrics_df_path', PATHS.PLAYERS_METRICS_PATH)
save_artifacts = kwargs.get('save_artifacts', False)
baselines_path = kwargs.get('baselines_path', PATHS.BASELINE_PLAYERS_METRICS_PATH)
baseline_dimensions = kwargs.get('baseline_dimensions', [COLUMNS.COMPETITION_NAME, COLUMNS.POSITION])
if (os.path.exists(metrics_df_path) and os.path.exists(baselines_path)) and not force_create:
if verbose:
print('\nLoading existing players_metrics_df')
players_metrics_df = pd.read_csv(metrics_df_path)
with open(baselines_path, 'rb') as f:
baselines = pickle.load(f)
else:
if verbose: print('Creating players_metrics_df:\n> Columns formatting')
# Add matches_metadata to enriched_events_data, allowing grouping metrics for baselines
enriched_events_data[COLUMNS.MATCH_ID] = enriched_events_data[COLUMNS.MATCH_ID].astype(str)
enriched_events_data[COLUMNS.MATCH_ID] = enriched_events_data[COLUMNS.MATCH_ID].apply(
lambda val: val.split('.')[0])
matches_metadata[COLUMNS.MATCH_ID] = matches_metadata[COLUMNS.MATCH_ID].astype(str)
matches_metadata[COLUMNS.MATCH_ID] = matches_metadata[COLUMNS.MATCH_ID].apply(lambda val: val.split('.')[0])
enriched_events_data[COLUMNS.ASSISTS] = enriched_events_data[COLUMNS.ASSISTS].apply(lambda val: int(val) \
if isinstance(val, bool) else val)
enriched_events_data[COLUMNS.DRIBBLE_WON] = enriched_events_data[COLUMNS.DRIBBLE_WON].astype(float)
if verbose: print('>> Done.\n> Merging events data with matches metadata')
enriched_events_data = enriched_events_data.merge(matches_metadata, on=COLUMNS.MATCH_ID)
if verbose: print('>> Done.\n> Keeping only male players for evaluation')
enriched_events_data = enriched_events_data[
enriched_events_data['home_team_home_team_gender'].apply(lambda gender: gender.lower() == 'male')]
# Aggr dimensions. By default - player_name
player_dimensions = kwargs.get('player_dimensions', [COLUMNS.PLAYER_NAME])
if verbose: print(f'>> Done.\n> Creating shot types probabilities and scoring features...')
# Prepare columns for lift
enriched_events_data[f'{COLUMNS.SHOOTING}:{COLUMNS.XG}'] = enriched_events_data[COLUMNS.GOAL] * \
enriched_events_data[COLUMNS.XG]
enriched_events_data[f'{COLUMNS.SHOOTING}:{COLUMNS.GOAL}'] = enriched_events_data[COLUMNS.GOAL].copy()
# Sub-categories of shooting
shots_types = [COLUMNS.HEADER, COLUMNS.OUTBOX_SHOT, COLUMNS.FREE_KICK, COLUMNS.PENALTY, COLUMNS.ONE_ON_ONE]
for shot_type in tqdm(shots_types):
# shot_type:xg - the xg of each shot_type event. float x 1 if the event, else 0 or np.nan
# shot_type:goal - goal by shot_type event. 1 x 1 if the event occur, else 0 or np.nan
enriched_events_data[f'{shot_type}:{COLUMNS.XG}'] = enriched_events_data[shot_type] * \
enriched_events_data[COLUMNS.XG]
enriched_events_data[f'{shot_type}:{COLUMNS.GOAL}'] = enriched_events_data[shot_type] * \
enriched_events_data[COLUMNS.GOAL]
if verbose:
print('>> Done.\n> Creating Plotly xG distribution plot...')
fig = px.histogram(enriched_events_data, x=COLUMNS.XG, labels={'x': 'xG', 'y': 'count'},
title=f'xG distribution plot:')
fig.show()
print('> Aggregating data by player')
# Calculate players_metrics_df
metrics_columns = [COLUMNS.XA, COLUMNS.DRIBBLE_WON, COLUMNS.IS_SHOT, COLUMNS.GOAL] + shots_types + \
[f'{shot_type}:{COLUMNS.XG}' for shot_type in shots_types] + \
[f'{shot_type}:{COLUMNS.GOAL}' for shot_type in shots_types] + \
[f'{COLUMNS.SHOOTING}:{COLUMNS.XG}', f'{COLUMNS.SHOOTING}:{COLUMNS.GOAL}']
# Agg DataFrame to produce the metrics
columns = player_dimensions + metrics_columns
players_metrics_df = enriched_events_data[columns].groupby(player_dimensions) \
.agg([np.mean, np.sum, np.std]).reset_index()
if len(player_dimensions) == 1:
players_metrics_df.set_index(players_metrics_df.columns[0], inplace=True)
columns = [col for col in player_dimensions if col != COLUMNS.PLAYER_NAME]
else:
columns = player_dimensions[:]
# Exclude player_name since it is the index
for col in metrics_columns:
columns.extend([f"{col}:mean", f"{col}:sum", f"{col}:std"])
players_metrics_df.columns = columns
if verbose:
print('>> Done.\n> Shots distribution:')
print(players_metrics_df[f'{COLUMNS.IS_SHOT}:sum'].describe(
percentiles=[0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 1.]))
print('> Aggregating data by player')
if verbose: print('>> Done.\n> Removing players with less than min_actions_count shots')
# Filter players with too few shots
min_actions_count = kwargs.get('min_actions_count', 30)
min_subactions_count = kwargs.get('min_subactions_count', 10)
players_metrics_df = players_metrics_df[players_metrics_df[f"{COLUMNS.IS_SHOT}:sum"] > min_actions_count]
players_w_data = set(list(players_metrics_df.index))
if verbose: print('>> Done.\n> Calculating LIFTS')
for shot_type in shots_types + [COLUMNS.SHOOTING]:
# Lift by shot type = total goals scored by shot type / total xG achieved by shot type
players_metrics_df[f'{shot_type}:LIFT'] = players_metrics_df[[f'{shot_type}:{COLUMNS.GOAL}:sum',
f'{shot_type}:{COLUMNS.XG}:sum']].apply( \
lambda row: row[f'{shot_type}:{COLUMNS.GOAL}:sum'] / row[f'{shot_type}:{COLUMNS.XG}:sum']
if row[f'{shot_type}:{COLUMNS.XG}:sum'] > 0 else 1, axis=1)
if shot_type in shots_types:
# For players with sum current shot_type < num_actions_count -> put np.nan
players_metrics_df[f'{shot_type}:LIFT'] = players_metrics_df.apply( \
lambda row: row[f'{shot_type}:LIFT'] if row[f'{shot_type}:sum'] > min_subactions_count
else np.nan, axis=1)
# For players with not examples, we have to fill their lift values. Baseline = 1,
# means the player performed similar to his achieved xG
players_metrics_df[f'{shot_type}:LIFT'].fillna(1, inplace=True)
# Percentile transformer on all metrics averages using QuantileTransformer
percentiles_columns = [f"{col_}:percentile" for col_ in
players_metrics_df.select_dtypes(include='number').columns]
if verbose: print('>> Done.\n> Applying QuantileTransformer')
quantile_transformer = QuantileTransformer(n_quantiles=1000, output_distribution='uniform')
percentile_df = quantile_transformer.fit_transform(players_metrics_df.select_dtypes(include='number'), y=None)
percentile_df = pd.DataFrame(percentile_df, columns=percentiles_columns, index=players_metrics_df.index.copy())
players_metrics_df = pd.merge(players_metrics_df, percentile_df, left_index=True, right_index=True)
if verbose:
print(f'>> Done.\n> Creating Plotly xG LIFT distribution plot...')
fig = px.histogram(players_metrics_df, x='shooting:LIFT', nbins=100,
labels={'x': 'xG Lift', 'y': 'Number of player'},
title=f'xG LIFT distribution plot')
fig.show()
print('>> Done.\n> Preparing baselines...')
# Set benchmarks by competition, position name
baselines = {}
if len(baseline_dimensions) > 0:
baseline_columns = []
for col in metrics_columns:
baseline_columns.extend([f"{col}:mean", f"{col}:sum", f"{col}:std"])
# Baseline aggregate the relevant events according to the baseline_dimension.
# Only player with sufficient amount of shots should be considered: players_w_data
baselines_population = enriched_events_data[enriched_events_data[COLUMNS.PLAYER_NAME].isin(players_w_data)]
for baseline_dimension in baseline_dimensions:
baselines[baseline_dimension] = baselines_population[[baseline_dimension] + metrics_columns]\
.copy().groupby(baseline_dimension).agg([np.mean, np.sum, np.std])
# baselines[baseline_dimension].set_index(baselines[baseline_dimension].columns[0], inplace=True)
baselines[baseline_dimension].columns = baseline_columns
for shot_type in shots_types + [COLUMNS.SHOOTING]:
# Lift by shot type = total goals scored by shot type / total xG achieved by shot type
baselines[baseline_dimension][f'{shot_type}:LIFT'] = baselines[baseline_dimension][
f'{shot_type}:{COLUMNS.GOAL}:sum'] \
/ baselines[baseline_dimension][
f'{shot_type}:{COLUMNS.XG}:sum']
baseline_percentile_df = quantile_transformer.transform(
baselines[baseline_dimension].select_dtypes(include='number'))
baseline_percentile_df = pd.DataFrame(baseline_percentile_df, columns=percentiles_columns,
index=baselines[baseline_dimension].index.copy())
baselines[baseline_dimension] = pd.merge(baselines[baseline_dimension], baseline_percentile_df,
left_index=True,
right_index=True)
baselines[baseline_dimension].columns = players_metrics_df.columns
# Sort columns for easy read
sorted_cols = list(players_metrics_df.columns)
sorted_cols.sort()
players_metrics_df = players_metrics_df[sorted_cols]
if verbose:
print('> Done.')
if save_artifacts:
print('Saving artifacts...')
if not os.path.exists(ARTIFACTS):
if verbose: print('Creating new ARTIFACTS folder')
os.makedirs(ARTIFACTS)
players_metrics_df.to_csv(metrics_df_path)
if len(baseline_dimensions) > 0:
with open(baselines_path, 'wb') as f:
pickle.dump(baselines, f, protocol=pickle.HIGHEST_PROTOCOL)
print('Completed: create_players_metrics_df')
return players_metrics_df, baselines
def get_players_metrics_df(enriched_events_data, matches_metadata, verbose=False, save_artifacts=False, **kwargs) -> (
pd.DataFrame, dict):
'''
:param enriched_events_data: pd.DataFrame, enriched version of StatsBomb events data (see get_enriched_events_data)
:param matches_metadata: DataFrame - adds season_name, competition_name for each match in the dataset
:param verbose: prints control
:param save_artifacts: whether to export artifacts (players_metrics_df, baselines dict) or not.
:return: metrics DataFrame and baselines metrics dict
'''
if (not os.path.exists(PATHS.PLAYERS_METRICS_PATH)) or (not os.path.exists(PATHS.BASELINE_PLAYERS_METRICS_PATH)):
df, baselines = create_players_metrics_df(enriched_events_data, matches_metadata, verbose=verbose,
save_artifacts=save_artifacts, **kwargs)
return df, baselines
df = pd.read_csv(PATHS.PLAYERS_METRICS_PATH)
df.set_index(df.columns[0], inplace=True)
with open(PATHS.BASELINE_PLAYERS_METRICS_PATH, 'rb') as f:
baselines = pickle.load(f)
return df, baselines
def build_data_objects(return_objects=False, verbose=False, **kwargs):
'''
Builds all required data object for the UI and further data analysis
:param kwargs: verbose, force_create, plotly_export, save_artifacts
:param return_objects: whether to return the created data object or not (for saving the artifacts, for example)
:return: None if not return_objects, else, return all data objects created:
- enriched_events_data, matches_metadata, players_metadata, players_metrics_df
'''
if verbose:
print('Starting build_data_objects.\nCreating enriched events_data...')
enriched_events_data = get_enriched_events_data(**kwargs)
if verbose:
print('\n- Done. Starting creating matches metadata...')
matches_metadata = load_matches_metadata()
if verbose:
print('\n- Done. Starting creating players metadata...')
players_metadata = get_enriched_players_metadata(enriched_events_data)
if verbose:
print('\n- Done. Starting creating players and baselines metrics df...')
players_metrics_df, baselines = get_players_metrics_df(enriched_events_data, matches_metadata, **kwargs)
if verbose:
print('\n- Done. Starting creating teams metadata...')
teams_metadata = get_teams_metadata(**kwargs)
if verbose:
print('\n- Done.')
if kwargs.get('save_artifacts', False):
if verbose:
print('\n- Saving artifacts')
enriched_events_data.to_csv(PATHS.ENRICH_EVENTS_DATA_PATH)
matches_metadata.to_csv(PATHS.MATCHES_METADATA_PATH)
with open(PATHS.ENRICH_PLAYERS_METADATA_PATH, 'wb') as f:
pickle.dump(players_metadata, f, protocol=pickle.HIGHEST_PROTOCOL)
teams_metadata.to_csv(PATHS.TEAMS_METADATA_PATH)
players_metrics_df.to_csv(PATHS.PLAYERS_METRICS_PATH)
with open(PATHS.BASELINE_PLAYERS_METRICS_PATH, 'wb') as f:
pickle.dump(baselines, f, protocol=pickle.HIGHEST_PROTOCOL)
if verbose:
print('\n- Completed: build_data_objects')
if return_objects:
return enriched_events_data, matches_metadata, players_metadata, teams_metadata, players_metrics_df, baselines
| 54.466887 | 120 | 0.658338 | 7,652 | 0.232598 | 0 | 0 | 0 | 0 | 0 | 0 | 10,014 | 0.304395 |
7e74c6f460e27189aecb37d569cafb98fe35f2d1 | 5,782 | py | Python | ipyannotator/datasets/download.py | EnriqueMoran/ipyannotator | 5517f8ded24e9e1347d0d72c73d620778f7b3069 | [
"Apache-2.0"
] | 19 | 2020-10-12T19:52:10.000Z | 2022-02-07T18:23:26.000Z | ipyannotator/datasets/download.py | EnriqueMoran/ipyannotator | 5517f8ded24e9e1347d0d72c73d620778f7b3069 | [
"Apache-2.0"
] | 6 | 2021-08-02T09:36:32.000Z | 2022-01-05T15:29:30.000Z | ipyannotator/datasets/download.py | EnriqueMoran/ipyannotator | 5517f8ded24e9e1347d0d72c73d620778f7b3069 | [
"Apache-2.0"
] | 1 | 2020-12-01T22:42:01.000Z | 2020-12-01T22:42:01.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01a_datasets_download.ipynb (unless otherwise specified).
__all__ = ['get_cifar10', 'get_oxford_102_flowers', 'get_cub_200_2011']
# Internal Cell
import glob
import json
from pathlib import Path
import os
import subprocess
import tarfile
import urllib
import zlib
# Internal Cell
def _download_url(url, root, filename=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
"""
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
os.makedirs(root, exist_ok=True)
if not os.path.isfile(fpath):
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath)
else:
print(f'File {filename} already exists, skip download.')
# Internal Cell
def _extract_tar(tar_path, output_dir):
try:
print('Extracting...')
with tarfile.open(tar_path) as tar:
tar.extractall(output_dir)
except (tarfile.TarError, IOError, zlib.error) as e:
print('Failed to extract!', e)
# Cell
def get_cifar10(output_dir):
output_dir = Path(output_dir)
dataset_dir = output_dir / 'cifar10'
_download_url(url='https://s3.amazonaws.com/fast-ai-imageclas/cifar10.tgz', root=output_dir)
if not dataset_dir.is_dir():
_extract_tar(output_dir / 'cifar10.tgz', output_dir)
else:
print(f'Directory {dataset_dir} already exists, skip extraction.')
print('Generating train/test data..')
imdir_train = dataset_dir / 'train'
imdir_test = dataset_dir / 'test'
# split train/test
train = [Path(p) for p in glob.glob(f'{imdir_train}/*/*')]
test = [Path(p) for p in glob.glob(f'{imdir_test}/*/*')]
# generate data for annotations.json
# {'image-file.jpg': ['label1.jpg']}
annotations_train = dict((str(p), [f'{p.parts[-2]}.jpg']) for p in train)
annotations_test = dict((str(p), [f'{p.parts[-2]}.jpg']) for p in test)
train_path = dataset_dir / 'annotations_train.json'
test_path = dataset_dir / 'annotations_test.json'
with open(train_path, 'w') as f:
json.dump(annotations_train, f)
with open(test_path, 'w') as f:
json.dump(annotations_test, f)
print("Done")
return train_path, test_path
# Cell
def get_oxford_102_flowers(output_dir):
output_dir = Path(output_dir)
dataset_dir = output_dir / 'oxford-102-flowers'
_download_url(url='https://s3.amazonaws.com/fast-ai-imageclas/oxford-102-flowers.tgz', root=output_dir)
if not dataset_dir.is_dir():
_extract_tar(output_dir / 'oxford-102-flowers.tgz', output_dir)
else:
print(f'Directory {dataset_dir} already exists, skip extraction.')
print('Generating train/test data..')
with open(dataset_dir / 'train.txt', 'r') as f:
annotations_train = dict(tuple(line.split()) for line in f)
annotations_train = {str(dataset_dir / k): [v+'.jpg'] for k, v in annotations_train.items()}
with open(dataset_dir / 'test.txt', 'r') as f:
annotations_test = dict(tuple(line.split()) for line in f)
annotations_test = {str(dataset_dir / k): [v+'.jpg'] for k, v in annotations_test.items()}
train_path = dataset_dir / 'annotations_train.json'
test_path = dataset_dir / 'annotations_test.json'
with open(train_path, 'w') as f:
json.dump(annotations_train, f)
with open(test_path, 'w') as f:
json.dump(annotations_test, f)
print("Done")
return train_path, test_path
# Cell
def get_cub_200_2011(output_dir):
output_dir = Path(output_dir)
dataset_dir = output_dir / 'CUB_200_2011'
_download_url(url='https://s3.amazonaws.com/fast-ai-imageclas/CUB_200_2011.tgz', root=output_dir)
if not dataset_dir.is_dir():
_extract_tar(output_dir / 'CUB_200_2011.tgz', output_dir)
else:
print(f'Directory {dataset_dir} already exists, skip extraction.')
print('Generating train/test data..')
with open(dataset_dir / 'images.txt','r') as f:
image_id_map = dict(tuple(line.split()) for line in f)
with open(dataset_dir / 'classes.txt','r') as f:
class_id_map = dict(tuple(line.split()) for line in f)
with open(dataset_dir / 'train_test_split.txt','r') as f:
splitter = dict(tuple(line.split()) for line in f)
# image ids for test/train
train_k = [k for k, v in splitter.items() if v == '0']
test_k = [k for k, v in splitter.items() if v == '1']
with open(dataset_dir / 'image_class_labels.txt','r') as f:
anno_ = dict(tuple(line.split()) for line in f)
annotations_train = {str(dataset_dir / 'images' / image_id_map[k]): [class_id_map[v]+'.jpg'] for k, v in anno_.items() if k in train_k}
annotations_test = {str(dataset_dir / 'images' / image_id_map[k]): [class_id_map[v]+'.jpg'] for k, v in anno_.items() if k in test_k}
train_path = dataset_dir / 'annotations_train.json'
test_path = dataset_dir / 'annotations_test.json'
with open(train_path, 'w') as f:
json.dump(annotations_train, f)
with open(test_path, 'w') as f:
json.dump(annotations_test, f)
print("Done")
return train_path, test_path | 35.042424 | 139 | 0.654272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,780 | 0.307852 |
7e760bdc1f9d49e259f501565af56e9e0e123033 | 3,076 | py | Python | airflow/plugins/operators/code_analyzer/utils/analysis/file.py | teiresias-personal-data-discovery/teiresias-system | 50e9d08d6924480f120d2d4f9fbebdc6035a5c5b | [
"MIT"
] | 2 | 2021-09-06T17:32:48.000Z | 2022-02-24T19:58:41.000Z | airflow/plugins/operators/code_analyzer/utils/analysis/file.py | teiresias-personal-data-discovery/teiresias-system | 50e9d08d6924480f120d2d4f9fbebdc6035a5c5b | [
"MIT"
] | null | null | null | airflow/plugins/operators/code_analyzer/utils/analysis/file.py | teiresias-personal-data-discovery/teiresias-system | 50e9d08d6924480f120d2d4f9fbebdc6035a5c5b | [
"MIT"
] | null | null | null | from ruamel.yaml import YAML
from pathlib import Path
from mergedeep import merge
from operators.code_analyzer.utils.analysis.tool import map_tool_to_storage_finder
from operators.code_analyzer.utils.analysis.context import collect_context
from operators.code_analyzer.constants.common import TOOL_CHARACTERISTICS
def parse_YML_file(file_path: str):
with Path(file_path) as path:
try:
yaml = YAML(typ='safe')
return yaml.load(path)
except:
return -1
def traverse_section_map(map, entity, tool, current_candidate_node_name):
findings: dict = {}
new_current_candidate_node_name = current_candidate_node_name
if isinstance(entity, dict):
for map_key, map_value in map.items():
if map_key == "candidates_node_names":
# handle each potential candidate branch recursively
for candidate_node_name, candidate_entity in entity.items():
new_current_candidate_node_name = candidate_node_name
findings = {
**findings,
**traverse_section_map(
map_value, candidate_entity, tool, new_current_candidate_node_name)
}
elif new_current_candidate_node_name is None and map_key not in entity:
# early stop if no match on map
return {}
elif map_value == "candidate_node_name":
# preserve node name of potential storage
new_current_candidate_node_name = entity.get(map_key)
elif callable(map_value) and entity.get(map_key):
# call candidate handler from map with corresponding value from entity
try:
candidate_handler_return = map_value(entity.get(map_key))
findings = {
**findings, new_current_candidate_node_name:
merge({},
findings.get(new_current_candidate_node_name,
{}), candidate_handler_return)
}
except Exception as e:
print('Error', e)
elif not callable(map_value):
# traverse map and entity simultaneously
findings = {
**findings,
**traverse_section_map(map_value, entity.get(map_key, {}), tool, new_current_candidate_node_name)
}
if isinstance(entity, list):
for item in entity:
findings = {
**findings,
**traverse_section_map(map, item, tool, new_current_candidate_node_name)
}
# omit unmatched traces
return {
node: traces
for node, traces in findings.items() if "match" in traces.keys()
}
def get_storages(entity, tool: str) -> list:
section_map = TOOL_CHARACTERISTICS.get(tool, {}).get('section_map', [])
return traverse_section_map(section_map, entity, tool, None)
| 41.567568 | 117 | 0.592653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.108583 |
7e783fa3b3ae047ea9b8d8a178c888dc525a3a94 | 1,647 | py | Python | examples/openmv/simple_tracking.py | bherbruck/simple-tracker | ed237d29a2cc84d0a38aaa2b1e8da7599ff8586f | [
"MIT"
] | 3 | 2020-06-24T13:06:48.000Z | 2021-09-15T11:47:10.000Z | examples/openmv/simple_tracking.py | bherbruck/simple_tracker | ed237d29a2cc84d0a38aaa2b1e8da7599ff8586f | [
"MIT"
] | null | null | null | examples/openmv/simple_tracking.py | bherbruck/simple_tracker | ed237d29a2cc84d0a38aaa2b1e8da7599ff8586f | [
"MIT"
] | null | null | null | import sensor
import image
import time
import json
from simple_tracker import Tracker
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)
sensor.set_auto_whitebal(False)
clock = time.clock()
green_threshold = [(44, 88, -50, 18, 9, 66)]
tracker = Tracker(max_distance=50)
record = True
tracker_file = 'tracker.txt'
data_file = 'data.txt'
def main():
if record:
data = {'tracker': {'max_distance': tracker.max_distance,
'timeout': tracker.timeout}}
write_file(tracker_file, data)
write_file(data_file, '')
while(True):
clock.tick()
img = sensor.snapshot()
find_blobs(img)
print(clock.fps())
def find_blobs(img):
blobs = img.find_blobs(green_threshold, pixels_threshold=500)
centroids = [(blob.cx(), blob.cy()) for blob in blobs]
lost_tracking = tracker.update(centroids)
for blob in blobs:
img.draw_rectangle(blob.rect())
for id, point in tracker.points.items():
img.draw_string(point[0], point[1], str(id), scale=2)
if record:
# ujson isn't the best... so record like this
data = {
'frame': tracker.frame,
'test_data': centroids,
'assertations': tracker.points,
'counts': lost_tracking
}
append_file(data_file, data)
def write_file(path, data):
with open(path, 'w') as file:
file.write(str(data))
def append_file(path, data):
with open(path, 'a') as file:
file.write(str(data) + ',\n')
main()
| 20.333333 | 65 | 0.625987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.092896 |
7e78a3a398d259dcdea4bb0f21c1e2aa3da5c606 | 2,106 | py | Python | ExcelToNetcdf.py | aecryan/netcdf-excel-conversion | b4c3d244ee726ec67fc110bbbda6400a42dd3d3a | [
"MIT"
] | null | null | null | ExcelToNetcdf.py | aecryan/netcdf-excel-conversion | b4c3d244ee726ec67fc110bbbda6400a42dd3d3a | [
"MIT"
] | null | null | null | ExcelToNetcdf.py | aecryan/netcdf-excel-conversion | b4c3d244ee726ec67fc110bbbda6400a42dd3d3a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
#Project name: Excel --> netCDF
#Description: It receives an excel file with a predefined format and returns the equivalent file in netCDF format.
#Programmers: Amir \& Ashley
#Date: 07-09-2020
import pandas as pd
import xarray
import scipy
print("Enter the exact name of the workbook. Please do include the file extension (e.g, .xlsx, .xls, etc):")
workbook_name = input()
print("\n")
print("Enter the exact name of the sheet within this workbook that should be converted:")
excel_sheet_name = input()
data_file = ("./data/input/"+workbook_name).strip()
# choose sheet
sheet_name= excel_sheet_name.strip()
# read excel file
df = pd.read_excel(data_file,
sheet_name=sheet_name,
index_col=[0],
na_values=['b.d.'])
# take description from top left cell
description = df.index.names[0]
# use "SAMPLE NAME" as index header
df.index = df.index.set_names(df.index[3])
# create mask to be able to distinguish difference in the column structure
mask = df.iloc[3,].isnull().values
# gather column names from two different rows
column_names = list(df.iloc[3,~mask].values) + list(df.iloc[0,mask].values)
column_names = [s.replace('/', ' ') for s in column_names]
df.columns = column_names
df.loc["flag"] = ["1.{}".format(i+1) if j == False else "2.{}".format(i+11) for i,j in enumerate(mask)]
# convert dataframe to xarray
xr = df.iloc[5:,].to_xarray()
# global attributes
xr.attrs = {'Conventions': 'CF-1.6', 'Title': sheet_name, 'Description': description}
# add variable attributes
units = df.iloc[2,mask]
method_codes = df.iloc[1,mask]
for i, col in enumerate(df.columns[mask]):
getattr(xr, col).attrs = {'units': units[i], 'comment': 'METHOD CODE: {}'.format(method_codes[i])}
comments = df.iloc[4,~mask]
for i, col in enumerate(df.columns[~mask]):
getattr(xr, col).attrs = {'comment': comments[i]}
# write xarray to netcdf file
xr.to_netcdf('./data/output/{}.nc'.format('-'.join((workbook_name.replace(".","")+" "+excel_sheet_name).split())))
| 18.972973 | 115 | 0.673789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 898 | 0.426401 |
7e79d9eb8ec6eeaae5636e4fa3a0bdb95504f949 | 2,583 | py | Python | scripts/make_mc_scenario.py | BayAreaMetro/Lasso | 189c6fbb2bb9d346ced7600531865db3e18ee23d | [
"Apache-2.0"
] | 2 | 2019-08-15T10:23:45.000Z | 2019-11-12T21:35:11.000Z | scripts/make_mc_scenario.py | BayAreaMetro/Lasso | 189c6fbb2bb9d346ced7600531865db3e18ee23d | [
"Apache-2.0"
] | 91 | 2020-01-06T14:53:22.000Z | 2022-02-24T17:41:32.000Z | scripts/make_mc_scenario.py | wsp-sag/client_met_council_wrangler_utilities | 62819d100af433de71b10e7c79d5b8003017eee1 | [
"Apache-2.0"
] | 2 | 2020-07-10T23:34:25.000Z | 2021-04-14T06:57:45.000Z | import os
import pandas as pd
from network_wrangler import RoadwayNetwork
from network_wrangler import TransitNetwork
from network_wrangler import ProjectCard
from network_wrangler import Scenario
from network_wrangler import WranglerLogger
from lasso import ModelRoadwayNetwork
import warnings
warnings.filterwarnings("ignore")
import yaml
import sys
import ast
USAGE = """
python make_mc_scenario.py mc_config.py
"""
if __name__ == "__main__":
args = sys.argv
if len(args) == 1:
raise ValueError("ERROR - config file must be passed as an argument!!!")
config_file = args[1]
if not os.path.exists(config_file):
raise FileNotFoundError(
"Specified config file does not exists - {}".format(config_file)
)
WranglerLogger.info("\nReading config file: {}".format(config_file))
with open(config_file) as f:
my_config = yaml.safe_load(f)
# Create Base Network
WranglerLogger.info("\nCreating base scenario")
base_scenario = Scenario.create_base_scenario(
my_config["base_scenario"]["shape_file_name"],
my_config["base_scenario"]["link_file_name"],
my_config["base_scenario"]["node_file_name"],
base_dir=my_config["base_scenario"]["input_dir"],
)
# Create Scenaro Network
if len(my_config["scenario"]["project_cards_filenames"]) > 0:
WranglerLogger.info("\nCreating project card objects for scenario")
project_cards_list = [
ProjectCard.read(filename, validate=False)
for filename in my_config["scenario"]["project_cards_filenames"]
]
WranglerLogger.info("\nCreating scenario")
my_scenario = Scenario.create_scenario(
base_scenario=base_scenario,
card_directory=my_config["scenario"]["card_directory"],
tags=my_config["scenario"]["tags"],
project_cards_list=project_cards_list,
glob_search=my_config["scenario"]["glob_search"],
)
WranglerLogger.info(
"\nApplying projects: {}".format("\n".join(my_scenario.get_project_names()))
)
print("Applying these projects to the base scenario ...")
print("\n".join(my_scenario.get_project_names()))
my_scenario.apply_all_projects()
print("Creating model network...")
WranglerLogger.info("\nCreating model network")
model_road_net = ModelRoadwayNetwork.from_RoadwayNetwork(
my_scenario.road_net, parameters=my_config.get("my_parameters", {})
)
WranglerLogger.info("\nCalculating additional variables and writing as shapefile")
model_road_net.write_roadway_as_shp()
| 32.696203 | 86 | 0.711189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 0.309717 |
7e7a1ee381af8c15ff0068d49bcb9d24bce39bcc | 1,018 | py | Python | 973-k-closest-points-to-origin/973-k-closest-points-to-origin.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | 973-k-closest-points-to-origin/973-k-closest-points-to-origin.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | 973-k-closest-points-to-origin/973-k-closest-points-to-origin.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | class Point:
def __init__(self, x, y):
self.x = x
self.y = y
self.dist = math.sqrt(x ** 2 + y ** 2)
class Solution:
"""
Quick Select algo:
Time best -> O(N)
Time worst -> O(N^2)
"""
def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:
distances = [Point(x, y) for x, y in points]
distances = self.quick_select(distances, k)
return [[distance.x, distance.y] for distance in distances]
def quick_select(self, points, k):
if not points:
return []
pivot_dist = random.choice(points)
smaller = [point for point in points if point.dist <= pivot_dist.dist]
bigger = [point for point in points if point.dist > pivot_dist.dist]
M, N = len(smaller), len(bigger)
if k == M:
return smaller
if k > M:
return smaller + self.quick_select(bigger, k - M)
return self.quick_select(smaller, k)
| 31.8125 | 78 | 0.541257 | 1,007 | 0.989194 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.087426 |
7e7ab95088129de2b578b58bebe2a6b3e660cca5 | 6,571 | py | Python | Models/Camera.py | arlanschouwstra/Robotica-Groep-11 | 17cac9527c3cc3f94f5a8ab11741cb5e546c85c1 | [
"BSD-2-Clause"
] | null | null | null | Models/Camera.py | arlanschouwstra/Robotica-Groep-11 | 17cac9527c3cc3f94f5a8ab11741cb5e546c85c1 | [
"BSD-2-Clause"
] | null | null | null | Models/Camera.py | arlanschouwstra/Robotica-Groep-11 | 17cac9527c3cc3f94f5a8ab11741cb5e546c85c1 | [
"BSD-2-Clause"
] | null | null | null | # import the necessary packages
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
import time
class Camera:
def __init__(self):
pass
def nothing(*arg):
pass
def detect(self):
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=32,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green" ball in the HSV color space (HUE SATURATION VALUE)
# Initial HSV GUI slider values to load on program start.
icol = (0, 0, 0, 255, 255, 255) # start Value low/high
cv2.namedWindow('colorTest')
# Lower range colour sliders.
cv2.createTrackbar('low_hue', 'colorTest', icol[0], 255, self.nothing)
cv2.createTrackbar('low_sat', 'colorTest', icol[1], 255, self.nothing)
cv2.createTrackbar('low_val', 'colorTest', icol[2], 255, self.nothing)
# Higher range colour sliders.
cv2.createTrackbar('high_hue', 'colorTest', icol[3], 255, self.nothing)
cv2.createTrackbar('high_sat', 'colorTest', icol[4], 255, self.nothing)
cv2.createTrackbar('high_val', 'colorTest', icol[5], 255, self.nothing)
# redLowerHSV = (170, 200, 50)
# redUpperHSV = (255, 255, 255)
# initialize the list of tracked points, the frame counter, and the coordinate deltas
pts = deque(maxlen=args["buffer"])
counter = 0
(dX, dY) = (0, 0)
direction = ""
# grab the reference to the webcam
camera = cv2.VideoCapture(0)
# keep looping
while True:
# grab the current frame
(_, frame) = camera.read()
# resize the frame, blur it, and convert it to the HSV color space
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Get HSV values from the GUI sliders.
low_hue = cv2.getTrackbarPos('low_hue', 'colorTest')
low_sat = cv2.getTrackbarPos('low_sat', 'colorTest')
low_val = cv2.getTrackbarPos('low_val', 'colorTest')
high_hue = cv2.getTrackbarPos('high_hue', 'colorTest')
high_sat = cv2.getTrackbarPos('high_sat', 'colorTest')
high_val = cv2.getTrackbarPos('high_val', 'colorTest')
# construct a mask for the color "green", then perform a series of dilations and erosions to remove any small blobs left in the mask
color_low = np.array([low_hue, low_sat, low_val])
color_high = np.array([high_hue, high_sat, high_val])
mask = cv2.inRange(hsv, color_low, color_high)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use it to compute the minimum enclosing circle and centroid
contour_sizes = [(cv2.contourArea(contour), contour) for contour in cnts]
c = max(contour_sizes, key=lambda x: x[0])[1]
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame, then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
pts.appendleft(center)
# loop over the set of tracked points
for i in np.arange(1, len(pts)):
# if either of the tracked points are None, ignore them
if pts[i - 1] is None or pts[i] is None:
continue
# check to see if enough points have been accumulated in the buffer
if counter >= 10 and i == 1 and pts[-10] is not None:
# compute the difference between the x and y coordinates and re-initialize the direction text variables
dX = pts[-10][0] - pts[i][0]
dY = pts[-10][1] - pts[i][1]
(dirX, dirY) = ("", "")
# ensure there is significant movement in the x-direction
if np.abs(dX) > 20:
dirX = "East" if np.sign(dX) == 1 else "West"
# ensure there is significant movement in the y-direction
if np.abs(dY) > 20:
dirY = "North" if np.sign(dY) == 1 else "South"
# handle when both directions are non-empty
if dirX != "" and dirY != "":
direction = "{}-{}".format(dirY, dirX)
# otherwise, only one direction is non-empty
else:
direction = dirX if dirX != "" else dirY
# show the movement deltas and the direction of movement on the frame
cv2.putText(frame, direction, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 255), 3)
cv2.putText(frame, "dx: {}, dy: {}".format(dX, dY),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# show the frame to our screen and increment the frame counter
cv2.imshow("Frame", frame)
cv2.imshow("Mask", mask)
key = cv2.waitKey(1) & 0xFF
counter += 1
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
# calling method:
camera = Camera()
camera.detect()
| 43.516556 | 144 | 0.547405 | 6,379 | 0.970781 | 0 | 0 | 0 | 0 | 0 | 0 | 2,287 | 0.348044 |
7e7afd9b5bc486f02d1d5c166ba6efa287e9c8ff | 2,217 | py | Python | custom_components/weatheralerts/sensor.py | ikifar2012/sensor.weatheralerts | 6e0b11c6c0c6223fdfb50e320662b2755b4c15f9 | [
"MIT"
] | null | null | null | custom_components/weatheralerts/sensor.py | ikifar2012/sensor.weatheralerts | 6e0b11c6c0c6223fdfb50e320662b2755b4c15f9 | [
"MIT"
] | null | null | null | custom_components/weatheralerts/sensor.py | ikifar2012/sensor.weatheralerts | 6e0b11c6c0c6223fdfb50e320662b2755b4c15f9 | [
"MIT"
] | null | null | null | """
A component which allows you to get information about next departure from spesified stop.
For more details about this component, please refer to the documentation at
https://github.com/custom-components/sensor.weatheralerts
"""
import voluptuous as vol
from datetime import timedelta
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components.switch import (PLATFORM_SCHEMA)
__version_ = '0.0.3'
REQUIREMENTS = ['weatheralerts']
CONF_SAMEID = 'sameid'
ATTR_DESTINATION = 'destination'
ATTR_PUBLISHED = 'published'
ATTR_URGENCY = 'urgency'
ATTR_SEVERITY = 'severety'
ATTR_CATEGORY = 'category'
ATTR_TITLE = 'title'
ATTR_SUMMARY = 'summary'
ATTR_LINK = 'link'
SCAN_INTERVAL = timedelta(seconds=30)
ICON = 'mdi:weather-hurricane'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SAMEID): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
sameid = str(config.get(CONF_SAMEID))
add_devices([WeatherAlertsSensor(sameid)])
class WeatherAlertsSensor(Entity):
def __init__(self, sameid):
self._sameid = sameid
self.update()
def update(self):
from weatheralerts import WeatherAlerts
nws = WeatherAlerts(samecodes=self._sameid)
self._published = nws.alerts[0].published
self._state = nws.alerts[0].event
self._urgency = nws.alerts[0].urgency
self._severity = nws.alerts[0].severity
self._category = nws.alerts[0].category
self._title = nws.alerts[0].title
self._summary = nws.alerts[0].summary
self._link = nws.alerts[0].link
@property
def name(self):
return 'WeatherAlerts'
@property
def state(self):
return self._state
@property
def icon(self):
return ICON
@property
def device_state_attributes(self):
return {
ATTR_PUBLISHED: self._published,
ATTR_URGENCY: self._urgency,
ATTR_SEVERITY: self._severity,
ATTR_CATEGORY: self._category,
ATTR_TITLE: self._title,
ATTR_SUMMARY: self._summary,
ATTR_LINK: self._link,
}
| 27.37037 | 89 | 0.691926 | 1,157 | 0.521876 | 0 | 0 | 526 | 0.237258 | 0 | 0 | 375 | 0.169147 |
7e7b1b72d7e2f3002b8b48e684583223e28c26f3 | 5,873 | py | Python | src/azure-cli/azure/cli/command_modules/security/tests/latest/test_security_automations_scenario.py | ZengTaoxu/azure-cli | 6be96de450da5ac9f07aafb22dd69880bea04792 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/security/tests/latest/test_security_automations_scenario.py | ZengTaoxu/azure-cli | 6be96de450da5ac9f07aafb22dd69880bea04792 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/security/tests/latest/test_security_automations_scenario.py | ZengTaoxu/azure-cli | 6be96de450da5ac9f07aafb22dd69880bea04792 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
from azure.cli.testsdk import ScenarioTest
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
class SecurityCenterSecurityAutomationsTests(ScenarioTest):
def test_security_automations(self):
# Create Automation scope
security_automation_scope = self.cmd("az security automation-scope create --description 'this is a sample description' --scope-path '/subscriptions/03b601f1-7eca-4496-8f8d-355219eee254/'").get_output_in_json()
assert security_automation_scope["scopePath"] == "/subscriptions/03b601f1-7eca-4496-8f8d-355219eee254/"
# Create Automation rule
security_automation_rule = self.cmd("az security automation-rule create --expected-value 'High' --operator 'Equals' --property-j-path 'properties.metadata.severity' --property-type 'string'").get_output_in_json()
assert security_automation_rule["expectedValue"] == "High"
# Create Automation rule set
security_automation_rule_set = self.cmd("az security automation-rule-set create").get_output_in_json()
assert security_automation_rule_set["rules"] == None
# Create Automation source
security_automation_source = self.cmd("az security automation-source create --event-source 'Assessments'").get_output_in_json()
assert security_automation_source["eventSource"] == "Assessments"
# Create Automation logic app action
security_automation_action_logic_app = self.cmd("az security automation-action-logic-app create --logic-app-resource-id '/subscriptions/03b601f1-7eca-4496-8f8d-355219eee254/resourceGroups/sample-rg/providers/Microsoft.Logic/workflows/LA' --uri 'https://ms.portal.azure.com/'").get_output_in_json()
assert security_automation_action_logic_app["actionType"] == "LogicApp"
# Create Automation event hub action
security_automation_action_event_hub = self.cmd("az security automation-action-event-hub create --event-hub-resource-id '/subscriptions/03b601f1-7eca-4496-8f8d-355219eee254/resourceGroups/sample-rg/providers/Microsoft.EventHub/namespaces/evenhubnamespace1/eventhubs/evenhubname1' --connection-string 'Endpoint=sb://dummy/;SharedAccessKeyName=dummy;SharedAccessKey=dummy;EntityPath=dummy' --sas-policy-name 'Send'").get_output_in_json()
assert security_automation_action_event_hub["actionType"] == "EventHub"
# Create Automation workspace action
security_automation_action_workspace = self.cmd("az security automation-action-workspace create --workspace-resource-id '/subscriptions/03b601f1-7eca-4496-8f8d-355219eee254/resourcegroups/sample-rg/providers/microsoft.operationalinsights/workspaces/sampleworkspace'").get_output_in_json()
assert security_automation_action_workspace["actionType"] == "Workspace"
# List Automations by subscription
security_automations = self.cmd('az security automation list').get_output_in_json()
subscription_previous_automations_count = len(security_automations)
assert subscription_previous_automations_count >= 0
# List Automations by resource group
security_automations = self.cmd('az security automation list -g Sample-RG').get_output_in_json()
first_security_automation_name = security_automations[0]["name"]
assert len(security_automations) >= 0
# Show Automation
security_automation = self.cmd('az security automation show -g Sample-RG -n {}'.format(first_security_automation_name)).get_output_in_json()
assert security_automation["name"] == first_security_automation_name
# # Create/Update Automations
# self.cmd("az security automation create_or_update -g Sample-RG -n ExportToWorkspaceTemp --scopes [{\"description\":\"487bb485-b5b0-471e-9c0d-10717612f869\",\"scopePath\":\"/subscriptions/487bb485-b5b0-471e-9c0d-10717612f869\"}] --sources [{\"eventSource\":\"SubAssessments\",\"ruleSets\":null}] --actions [{\"actionType\":\"EventHub\",\"eventHubResourceId\":\"subscriptions/212f9889-769e-45ae-ab43-6da33674bd26/resourceGroups/ContosoSiemPipeRg/providers/Microsoft.EventHub/namespaces/contososiempipe-ns/eventhubs/surashed-test\",\"connectionString\":\"Endpoint=sb://contososiempipe-ns.servicebus.windows.net/;SharedAccessKeyName=Send;SharedAccessKey=dummy=;EntityPath=dummy\",\"SasPolicyName\":\"dummy\"}] -l eastus")
# # Validates Automations
# self.cmd("az security automation validate -g Sample-RG -n ExportToWorkspaceTemp --scopes [{\"description\":\"487bb485-b5b0-471e-9c0d-10717612f869\",\"scopePath\":\"/subscriptions/487bb485-b5b0-471e-9c0d-10717612f869\"}] --sources [{\"eventSource\":\"SubAssessments\",\"ruleSets\":null}] --actions [{\"actionType\":\"EventHub\",\"eventHubResourceId\":\"subscriptions/212f9889-769e-45ae-ab43-6da33674bd26/resourceGroups/ContosoSiemPipeRg/providers/Microsoft.EventHub/namespaces/contososiempipe-ns/eventhubs/surashed-test\",\"connectionString\":\"Endpoint=sb://contososiempipe-ns.servicebus.windows.net/;SharedAccessKeyName=Send;SharedAccessKey=dummy=;EntityPath=dummy\",\"SasPolicyName\":\"dummy\"}] -l eastus")
# # Delete Automation
# security_automation = self.cmd('az security automation delete -g Sample-RG -n ExportToWorkspaceTemp').get_output_in_json()
# assert security_automation["name"] == "ExportToWorkspace"
# security_automations = self.cmd('az security automation list').get_output_in_json()
# assert len(security_automations) == subscription_previous_automations_count
| 85.115942 | 727 | 0.737443 | 5,404 | 0.920143 | 0 | 0 | 0 | 0 | 0 | 0 | 4,027 | 0.68568 |
7e7b5ff37c1e9db3b446dba397ba3a4bdbfa6aee | 997 | py | Python | trypython/stdlib/argparse_/argparse01.py | devlights/try-python | 67d1d26476794da81c8c76662486108ce03f8fb4 | [
"MIT"
] | 4 | 2019-10-21T11:42:11.000Z | 2020-03-12T16:35:51.000Z | trypython/stdlib/argparse_/argparse01.py | devlights/try-python | 67d1d26476794da81c8c76662486108ce03f8fb4 | [
"MIT"
] | 80 | 2017-02-08T07:55:37.000Z | 2021-10-06T06:30:30.000Z | trypython/stdlib/argparse_/argparse01.py | devlights/try-python | 67d1d26476794da81c8c76662486108ce03f8fb4 | [
"MIT"
] | 1 | 2020-03-12T04:37:17.000Z | 2020-03-12T04:37:17.000Z | """
argparse モジュールのサンプルです。
基本的な使い方について。
参考: http://bit.ly/2UXDCIG
"""
import argparse
import sys
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
#
# argparse モジュールを使う場合の基本は以下の手順
#
# (1) argparse.ArgumentParser オブジェクト生成
# (2) parser に add_argument メソッドで引数情報を追加
# (3) parser.parse_args メソッド呼び出し
# (4) args から 引数情報 を取得
#
parser = argparse.ArgumentParser(description='argparse sample01')
parser.add_argument('indir', type=str, help='input directory')
parser.add_argument('outdir', type=str, help='output directory')
args = parser.parse_args()
pr('type(parser)', type(parser))
pr('type(args)', type(args))
pr('args.indir', args.indir)
pr('args.outdir', args.outdir)
def go():
sys.argv.append('~/indir')
sys.argv.append('~/outdir')
obj = Sample()
obj.exec()
| 23.186047 | 73 | 0.628887 | 806 | 0.69066 | 0 | 0 | 0 | 0 | 0 | 0 | 544 | 0.466153 |
7e7b93a9c5eda7c1107bcc3481dd998203dfcd7d | 949 | py | Python | scripts/check_masks.py | dyollb/segmantic | 8fe47340ff0f67812918f7070e3d6080e5d228ac | [
"MIT"
] | null | null | null | scripts/check_masks.py | dyollb/segmantic | 8fe47340ff0f67812918f7070e3d6080e5d228ac | [
"MIT"
] | 3 | 2021-09-24T20:32:23.000Z | 2022-03-14T10:54:13.000Z | scripts/check_masks.py | dyollb/segmantic | 8fe47340ff0f67812918f7070e3d6080e5d228ac | [
"MIT"
] | 2 | 2021-09-24T11:54:52.000Z | 2021-10-01T13:01:55.000Z | import logging
from pathlib import Path
import nibabel as nib
import numpy as np
import typer
def check_masks(directory: Path, file_glob: str = "*.nii.gz"):
logger = logging.getLogger(__file__)
for file_path in directory.glob(file_glob):
img = nib.load(f"{file_path}")
data = img.get_fdata()
max_value = np.max(data)
if max_value == 0:
logger.error("%s mask is empty", file_path)
return
min_value = np.min(data[data != 0])
if min_value < 1 or max_value != 1:
mask = np.zeros_like(data, dtype=np.uint8)
mask[data > 0.5] = 1
nib.save(nib.Nifti1Image(mask, img.affine), f"{file_path}")
logger.warning(
"%s foreground values in range [%s,%s]",
file_path,
f"{min_value}",
f"{max_value}",
)
if __name__ == "__main__":
typer.run(check_masks)
| 27.911765 | 71 | 0.559536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.140148 |
7e7c1099ca5e6f0cb535351f8035469a042e59e5 | 10,296 | py | Python | draw_classes.py | Fs-agadir/StageDetect | af2841b41f80151e1f3092adbc266eb1d2b8db37 | [
"MIT"
] | null | null | null | draw_classes.py | Fs-agadir/StageDetect | af2841b41f80151e1f3092adbc266eb1d2b8db37 | [
"MIT"
] | null | null | null | draw_classes.py | Fs-agadir/StageDetect | af2841b41f80151e1f3092adbc266eb1d2b8db37 | [
"MIT"
] | null | null | null | # Copyright (c) 2021, Fs-Agadir
# All rights reserved.
#from wx import App, ScreenDC #to get monitor resolution
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import os, cv2
class Drawing:
def __init__(self):
pass
'''----drawing results tools----'''
# '''get montior resolution in dpi'''
# def monitordpi(self):
# app = App(0)
# s = ScreenDC()
# monitordpi = s.GetPPI()[0]
# return monitordpi
'''define different colors for specific number of values'''
def color_spectrum(self, unique_vals, offset=35, color_type='spectral'):
# unique_vals: type is list
# offset to differentiate colors
# color definitions
# output is cmap color values for each data value
cmap = plt.get_cmap(color_type) #'binary'PiYG
colors = []
i = 0
c = 0
while i < len(unique_vals):
colors.append(cmap(c))
i=i+1
c=c+offset
return colors
'''draw points on image'''
def draw_points_onto_image(self, image, image_points, point_id, markSize=2, fontSize=8, switched=False):
# draw image points into image and label the point id
# image_points: array with 2 columns
# point_id: list of point ids in same order as corresponding image_points file; if empty no points labeled
# dpi from screen resolution
#dpi = self.monitordpi()
dpi = 600
set_markersize = markSize
fontProperties_text = {'size' : fontSize,
'family' : 'serif'}
matplotlib.rc('font', **fontProperties_text)
fig = plt.figure(frameon=False) #dpi of screen resolution
fig.set_size_inches(image.shape[1]/float(dpi), image.shape[0]/float(dpi)) #dpi screen resolution!
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
if switched:
ax.plot([p[1] for p in image_points],
[p[0] for p in image_points],
marker='o', ms=set_markersize, color='green', markeredgecolor='green', markeredgewidth=1)
else:
ax.plot([p[0] for p in image_points],
[p[1] for p in image_points],
marker='o', ms=set_markersize, color='red', markeredgecolor='black', markeredgewidth=1)
#ax.plot(image_points[:,0], image_points[:,1], "r.", markersize=set_markersize, markeredgecolor='black')
if len(point_id) > 1:
if not switched:
for label, xl, yl in zip(point_id, image_points[:,0], image_points[:,1]):
ax.annotate(str((label)), xy = (xl, yl), xytext=(xl+5, yl+1), color='blue', **fontProperties_text)
else:
for label, xl, yl in zip(point_id, image_points[:,1], image_points[:,0]):
ax.annotate(str((label)), xy = (xl, yl), xytext=(xl+5, yl+1), color='blue', **fontProperties_text) #str(int(label)
ax.imshow(image, cmap='gray', aspect='normal')
return plt
'''draw points on image'''
def plot_pts(self, img, points, switchColRow=False, plt_title='', output_save=False, edgecolor='blue'):
plt.clf()
plt.figure(frameon=False)
plt.gray()
if switchColRow:
plt.plot([p[1] for p in points],
[p[0] for p in points],
marker='o', ms=5, color='none', markeredgecolor=edgecolor, markeredgewidth=1)
else:
plt.plot([p[0] for p in points],
[p[1] for p in points],
marker='o', ms=5, color='none', markeredgecolor=edgecolor, markeredgewidth=1)
plt.title(plt_title)
plt.axis('off')
plt.imshow(img)
if not output_save:
plt.waitforbuttonpress()
plt.close()
else:
return plt
'''draw Harris points on image'''
def plot_harris_points(self, image, filtered_coords, save=False, directory_img=None):
""" Plots corners found in image. """
plt.figure()
plt.gray()
plt.imshow(image)
plt.plot([p[1] for p in filtered_coords],
[p[0] for p in filtered_coords],
marker='o', ms=2, color='none', markeredgecolor='blue', markeredgewidth=0.2)
plt.axis('off')
if save:
plt.savefig(os.path.join(directory_img, 'harris.jpg'), dpi=600, pad_inches=0)
else:
plt.show()
'''draw SIFT matches on images'''
# source code from Jan Erik Solem
def plot_matches_SIFT(self, imagename1, imagename2, locs1, locs2, matchscores, show_below=True):
'''Show a figure with lines joining the accepted matches
input: im1, im2, (images as arrays), locs1, locs2 (feature locations),
matchscores (as ouptut from 'match()'),
show_below (if images should be shown below matches ). '''
im1 = cv2.imread(imagename1)
im2 = cv2.imread(imagename2)
im3 = self.appendimages(im1, im2)
if show_below:
#im3 = np.vstack((im3, im3))
plt.imshow(im3)
cols1 = im1.shape[1]
for i,m in enumerate(matchscores):
if m > 0:
plt.plot([locs1[i][1], locs2[m][1] + cols1], [locs1[i][0], locs2[m][0]], 'c')
plt.axis('off')
'''draw matches on images'''
# source code from Jan Erik Solem
def plot_matches(self, im1, im2, pts1, pts2, nbr_match_draw_set=0, save=False, directory_img=None):
'''draw matches
im1, im2 location and name of images
pts1, pts2 (numpy array): location of matched points in image
nbr_match_draw: amount of matches to be displayed'''
if nbr_match_draw_set == 0:
nbr_match_draw = pts1.shape[0]
else:
nbr_match_draw = nbr_match_draw_set
img2_show = plt.imread(im2)
if len(img2_show.shape) > 2:
ymax2, xmax2, _ = img2_show.shape #ymax2, xmax2, _ =
else:
ymax2, xmax2 = img2_show.shape
img1_show = plt.imread(im1)
if len(img1_show.shape) > 2:
ymax1, xmax1, _ = img1_show.shape
else:
ymax1, xmax1 = img1_show.shape
if ymax1 > ymax2:
ymax = ymax1
else:
ymax = ymax2
fig = plt.figure(figsize=((xmax1+xmax2)/1000, (ymax)/1000))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
plt.subplots_adjust(wspace=0, hspace=0)
ax1.imshow(img2_show, aspect='auto') #clip white boarder
ax2.imshow(img1_show, aspect='auto', cmap='Greys_r') #clip white boarder
pts1_draw = np.asarray(pts1, dtype=np.float)
pts2_draw = np.asarray(pts2, dtype=np.float)
if len(pts1_draw.shape) == 3:
x1,y1 = pts1_draw[:,:,0:1].flatten(), pts1[:,:,1:2].flatten()
x2,y2 = pts2_draw[:,:,0:1].flatten(), pts2[:,:,1:2].flatten()
else:
x1,y1 = pts1_draw[:,0:1].flatten(), pts1[:,1:2].flatten()
x2,y2 = pts2_draw[:,0:1].flatten(), pts2[:,1:2].flatten()
colors = self.color_spectrum(pts1_draw.tolist(), offset=1)
print 'plotting matches'
i = 0
lines = []
while i < nbr_match_draw:#pts1_draw.shape[0]:
transFigure = fig.transFigure.inverted()
coord1 = transFigure.transform(ax1.transData.transform([x1[i],y1[i]]))
coord2 = transFigure.transform(ax2.transData.transform([x2[i],y2[i]]))
line = plt.matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure, color=colors[i]) #
plt.setp(line, color=colors[i], linewidth=0.2)
lines.append(line,)
ax1.plot(x1[i], y1[i], marker='o', ms=1, color='none', markeredgecolor=colors[i], markeredgewidth=0.2) # color=colors[i], markeredgecolor='none'
ax2.plot(x2[i], y2[i], marker='o', ms=1, color='none', markeredgecolor=colors[i], markeredgewidth=0.2)
ax1.imshow(img2_show, aspect='auto') #re-center image
ax2.imshow(img1_show, aspect='auto', cmap='Greys_r') #re-center image
i = i+1
fig.lines = lines
ax1.axis('off')
ax2.axis('off')
if save:
plt.savefig(os.path.join(directory_img, 'matches.jpg'), dpi=600)
else:
plt.show()
print 'plotting STAR matches done'
return fig
#draw image points on image
def plot_features(self, im, locs, circle=False):
'''Show image with features. input: im (image as array), locs (row, col, scale, orientation of each feature).'''
def draw_circle(c, r):
t = np.arange(0,1.01,.01)*2*np.pi
x = r*np.cos(t) + c[0]
y = r*np.sin(t) + c[1]
plt.plot(x,y,'b',linewidth=2)
plt.imshow(im)
if circle:
for p in locs:
draw_circle(p[:2],p[2])
else:
plt.plot(locs[:,0],locs[:,1],'ob')
plt.axis('off')
#help function to plot assigned SIFT features
def appendimages(self, im1, im2):
'''Return a new image that appends the two images side-by-side.'''
# select the image with the fewest rows and fill in enough empty rows
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.vstack((im1, np.zeros((rows2-rows1, im1.shape[1], im1.shape[2]))))
elif rows1 > rows2:
im2 = np.vstack((im2, np.zeros((rows1-rows2, im2.shape[1], im2.shape[2]))))
# if none of these cases they are equal, no fillng needed.
return np.concatenate((im1, im2), axis=1) | 38.133333 | 163 | 0.54089 | 10,094 | 0.980381 | 0 | 0 | 0 | 0 | 0 | 0 | 2,505 | 0.243298 |
7e7c81aac02ad87573439958e43598f569160606 | 7,325 | py | Python | batch_gnome/batch_gnome/TAP/TAP_ext/test_TAP_ext.py | dylanrighi/GnomeTools | 08ad6810194141ac89a27c95fa9b9499376000ae | [
"Unlicense"
] | 2 | 2017-02-15T20:45:42.000Z | 2020-10-09T16:00:00.000Z | batch_gnome/batch_gnome/TAP/TAP_ext/test_TAP_ext.py | dylanrighi/GnomeTools | 08ad6810194141ac89a27c95fa9b9499376000ae | [
"Unlicense"
] | 10 | 2015-06-25T23:42:11.000Z | 2021-06-22T16:19:19.000Z | batch_gnome/batch_gnome/TAP/TAP_ext/test_TAP_ext.py | dylanrighi/GnomeTools | 08ad6810194141ac89a27c95fa9b9499376000ae | [
"Unlicense"
] | 15 | 2016-01-11T20:49:10.000Z | 2020-10-15T18:02:20.000Z | #!/usr/bin/env python2.3
"""
A simple test routine that tests at least some of the TAP_ext package
"""
import unittest
from Numeric import *
class CalcPolygonsTestCase(unittest.TestCase):
def testCalcPolygons(self):
pass
class check_receptorsTestCase(unittest.TestCase):
def test_check_receptors(self):
"""
The Python and C versions of the receptor site hit test should return the same result
"""
from RandomArray import uniform, seed
from TAP_ext import check_receptors
from time import time
area = 200
num_LEs = 100
num_times = 10
num_sites = 4
sites = [array([(20,65),(40,35),(70,25),(75,45),(55,50),(45,75),(20,65)],Float)]*num_sites
# build site bounding boxes
BBs = []
for site in sites:
max_x = site[0,0]
min_x = site[0,0]
max_y = site[0,1]
min_y = site[0,1]
max_x = max(max_x, max(site[:,0]))
min_x = min(min_x, min(site[:,0]))
max_y = max(max_y, max(site[:,1]))
min_y = min(min_y, min(site[:,1]))
BBs.append(array((max_x,min_x,max_y,min_y),Float))
LEs = uniform(0,area,(num_times,num_LEs,2))
Hit_Table1 = zeros((num_LEs, num_sites),Int)
start = time()
hit_test(LEs,sites,BBs,Hit_Table1,0)
print "Python version took %.3f seconds"%(time()-start)
Hit_Table2 = zeros((num_LEs, num_sites),Int)
start = time()
check_receptors.hit_test(LEs,sites,BBs,Hit_Table2,0)
print "c version took %.3f seconds"%(time()-start)
assert alltrue(equal(Hit_Table1,Hit_Table2)), "Python and C version gave different results"
from TAP_ext import NumericExtras as NE
class NumericExtrasTestCase(unittest.TestCase):
def testFastclip(self):
print "testing fastclip"
A = arange(0,10,1,Float)
B = clip(A, 3, 5)
NE.fastclip(A, 3, 5)
assert alltrue(A == B), "fastclip and clip gave different answers"
def testByteswap(self):
A = arange(10)
B = A.copy()
NE.byteswap(B)
B = B.byteswapped()
assert alltrue(A == B), "NE.byteswap and Numeric.array.byteswapped gave different results"
def testChangetypeA(self):
"""
changetype should fail for non-contiguous arrays
"""
A = arange(18)
A.shape = 3,6
B = A[:,3]
self.assertRaises(ValueError,NE.changetype,B,Float)
def testChangetypeB(self):
"""
changetype should fail for arrays the wrong size for the type
"""
A = arange(25)
self.assertRaises(ValueError,NE.changetype,A,Float)
def testChangetypeC(self):
"""
changetype(m,typecode) should have the same result as:
m = fromstring(m.tostring(),typecode)
"""
A = arange(26)
B = A.copy()
NE.changetype(A,Float)
assert alltrue (A == fromstring(B.tostring(),Float))
## This is the Python version of the check_receptors code, used by the test code above
def hit_test(LEs,sites,BBs,Hit_Table,Start_step):
"""
hit_test computes the receptor site hits given a set of LE positions,
LEs, and the receptor sites, and the bounding boxes of the receptor sites.
LEs is a M X N X 2 NumPy array (of Floats ?)
N is the number of LEs (Num_LEs)
M is the number of timesteps (must be at least 2)
sites is a list of N X 2 NumPy arrays (of Floats)
N is the number of points in a receptor polygon
BBs is a list of 4 X 1 NumPy arrays (of Floats) of the bounding box of the sites (max_x,min_x,max_y,min_y)
Hit_Table is a NumPy array of Int16 (short) of size (Num_LEs, Num_sites),
it hold the values of the first timestep that the site was hit by a given LE.
***Hit_Table is ALTERED by this function!!!***
the function returns None
"""
N_LEs = LEs.shape[1]
N_times = LEs.shape[0]
N_sites = len(sites)
for T_ind in range(1,N_times): # loop over timesteps
for LE_ind in range(N_LEs): # loop over LEs
LE_line = (tuple(LEs[T_ind-1,LE_ind,:]),tuple(LEs[T_ind,LE_ind,:])) # LE-movement segment
# did the LE move?
if (LE_line[0] != LE_line[1]):
# check bounding boxes
bb_LE = (max(LE_line[0][0],LE_line[1][0]),min(LE_line[0][0],LE_line[1][0]),
max(LE_line[0][1],LE_line[1][1]),min(LE_line[0][1],LE_line[1][1]))
for site_ind in range(N_sites): # loop over sites
if BB_check(BBs[site_ind],bb_LE):
# do the line cross check
for segment in map(None,sites[site_ind][:-1],sites[site_ind][1:]):
if LCross(LE_line,segment):
if not Hit_Table[LE_ind,site_ind]:
Hit_Table[LE_ind,site_ind] = Start_step + T_ind
break
return None
def BB_check(bb_1, bb_2):
"""
bb_1 and bb_2 are two bounding boxes.
Each is a 4 element tuple of :
(max_x,min_x,max_y,min_y)
BB_check(bb_1, bb_2)
returns 1 if the two boxes intersect
returns 0 if the two boxes don't intersect
"""
if ( (bb_1[0] > bb_2[1]) and (bb_1[1] < bb_2[0]) and
(bb_1[2] > bb_2[3]) and (bb_1[3] < bb_2[2]) ):
return 1
else:
return 0
def LCross(S1,S2):
"""
S1 and S2 are two element tuples of two element tuples of
x,y coordinates of the two lines:
Routine to check if two line segments intersect
returns 0 if they don't intersect, 1 if they intersect
"""
((px1,py1),(px2,py2)) = S1
((px3,py3),(px4,py4)) = S2
# First some utility functions:
def SideOfLineCheck(x1,y1,x2,y2,Px,Py):
""" Given a line segment x1,y1 to x2,y2
it checks to see if point Px,Py is to the right
or to the left of the line segment looking from
point x1,y1 to point x2,y2.
If D is positive, then the point Px,Py is to the LEFT of the
line segment. If D is negative, P is to the right of segment.
If D is zero then, P is on the segment
If D =0 then that means that the point P is on the line
defined by the two points...they may not be on the segment
The check is done by taking the
cross product of the vectors x1,y1 to x2,y2
and x1,y1 to Px,Py
"""
def CrossProduct(x1,y1,x2,y2):
# Given vectors x1,y1 and x2,y2
# this routine returns the cross product
# which is also the determinant
return x1*y2 - y1*x2
dx = x2 - x1
dy = y2 - y1
dxp = Px - x1
dyp = Py - y1
return CrossProduct(dx,dy,dxp,dyp)
# Check to see if point 3 is to the left of segment 1
D1 = SideOfLineCheck(px1,py1,px2,py2,px3,py3)
# Now check if point 4 is to the left of segment 1
D2 = SideOfLineCheck(px1,py1,px2,py2,px4,py4)
# if points 3 and 4 are on the same side of line 1
# then things don't cross
if(D1*D2 > 0):
return 0
# now we need to check the other way...
#Check to see if point 1 is to the left of segment 2
D1 = SideOfLineCheck(px3,py3,px4,py4,px1,py1)
# Now check if point 2 is to the left of segment 2
D2 = SideOfLineCheck(px3,py3,px4,py4,px2,py2)
# if points 1 and 2 are on the same side of line 2 then things don't cross
if(D1*D2 > 0):
return 0
#if we get here, the hummers cross
return 1
if __name__ == "__main__":
## suite()
unittest.main()
| 26.348921 | 107 | 0.622526 | 2,867 | 0.391399 | 0 | 0 | 0 | 0 | 0 | 0 | 3,306 | 0.451331 |
7e7d5c74d34a346aa2f86f99938df07c5e5fd4cb | 89 | py | Python | ourteam/apps.py | Kgermando/parlementaires | ef9a464373ce2facbb6543be6d3b0d7d5c11200c | [
"Apache-2.0"
] | 1 | 2021-08-09T02:22:42.000Z | 2021-08-09T02:22:42.000Z | ourteam/apps.py | Kgermando/parlementaires | ef9a464373ce2facbb6543be6d3b0d7d5c11200c | [
"Apache-2.0"
] | null | null | null | ourteam/apps.py | Kgermando/parlementaires | ef9a464373ce2facbb6543be6d3b0d7d5c11200c | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class OurteamConfig(AppConfig):
name = 'ourteam'
| 14.833333 | 33 | 0.752809 | 52 | 0.58427 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.101124 |
7e7db118c7f4f3b7e95cfc69811f1d3a63168f03 | 381 | py | Python | backpack/extensions/secondorder/diag_ggn/permute.py | jabader97/backpack | 089daafa0d611e13901fd7ecf8a0d708ce7a5928 | [
"MIT"
] | 395 | 2019-10-04T09:37:52.000Z | 2022-03-29T18:00:56.000Z | backpack/extensions/secondorder/diag_ggn/permute.py | jabader97/backpack | 089daafa0d611e13901fd7ecf8a0d708ce7a5928 | [
"MIT"
] | 78 | 2019-10-11T18:56:43.000Z | 2022-03-23T01:49:54.000Z | backpack/extensions/secondorder/diag_ggn/permute.py | jabader97/backpack | 089daafa0d611e13901fd7ecf8a0d708ce7a5928 | [
"MIT"
] | 50 | 2019-10-03T16:31:10.000Z | 2022-03-15T19:36:14.000Z | """Module defining DiagGGNPermute."""
from backpack.core.derivatives.permute import PermuteDerivatives
from backpack.extensions.secondorder.diag_ggn.diag_ggn_base import DiagGGNBaseModule
class DiagGGNPermute(DiagGGNBaseModule):
"""DiagGGN extension of Permute."""
def __init__(self):
"""Initialize."""
super().__init__(derivatives=PermuteDerivatives())
| 31.75 | 84 | 0.76378 | 190 | 0.498688 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.233596 |
7e7dd5c8d80c1193d34c16e490af85436be7f87e | 1,703 | py | Python | tests.py | snandasena/disaster-response-pipeline | 709af8c5fcb520dae82dc3b75c30ab2609402f53 | [
"MIT"
] | null | null | null | tests.py | snandasena/disaster-response-pipeline | 709af8c5fcb520dae82dc3b75c30ab2609402f53 | [
"MIT"
] | null | null | null | tests.py | snandasena/disaster-response-pipeline | 709af8c5fcb520dae82dc3b75c30ab2609402f53 | [
"MIT"
] | null | null | null | import sys
import numpy as np
from sklearn.externals import joblib
sys.path.append("common")
from train_classifier import *
staring_verb_extractor = StartingVerbExtractor()
verb_count_extractor = VerbCountExtractor()
starting_modal_extractor = StartingModalExtractor()
noun_count_extractor = NounCountExtractor()
def test_load_data(file_name):
return load_data(file_name)
def test_stating_verb_extract(text):
print(staring_verb_extractor.starting_verb(text))
def test_transform(X):
print(staring_verb_extractor.transform(X))
def test_tokenize(text):
print(tokenize_text(text))
def test_total_verb_counts(text):
print(verb_count_extractor.count_verbs(text))
def test_stating_modals(text):
print(starting_modal_extractor.starting_modals(text))
def test_total_noun_counts(text):
print(noun_count_extractor.count_nouns(text))
def test_evaluate_model(X, Y, col_names, model_path='./models/classifier.pkl'):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
model = joblib.load(model_path)
evaluate_model(model, X_test, Y_test, col_names)
if __name__ == "__main__":
debug_data = ['.', './data/DisasterResponse.db', './models/classifier.pkl']
X, Y, col_names = load_data(debug_data[1])
# for text in X[:100].values:
# test_tokenize(text)
# for text in texts:
# test_stating_verb_extract(text)
#
# test_transform(X)
# for text in X[:100].values:
# test_total_verb_counts(text)
#
# for text in X[:100].values:
# test_total_noun_counts(text)
#
# for text in X[:100].values:
# test_stating_modals(text)
test_evaluate_model(X, Y, col_names)
| 24.681159 | 79 | 0.72754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.245449 |
7e7e872c4d53c683d43e32b7bedc06b65ba0d11b | 3,734 | py | Python | sendim/tests/graph.py | ZuluPro/snafu | dbd79da4617db315ba7e76a0af6805cc3b1cbae2 | [
"BSD-3-Clause"
] | 1 | 2017-12-13T20:28:32.000Z | 2017-12-13T20:28:32.000Z | sendim/tests/graph.py | ZuluPro/snafu | dbd79da4617db315ba7e76a0af6805cc3b1cbae2 | [
"BSD-3-Clause"
] | null | null | null | sendim/tests/graph.py | ZuluPro/snafu | dbd79da4617db315ba7e76a0af6805cc3b1cbae2 | [
"BSD-3-Clause"
] | null | null | null | """
"""
from django.utils import unittest
from django.core import management
from referentiel.models import Supervisor
from sendim.tests.defs import create_event, create_alert, internet_is_on
class Graph_TestCase(unittest.TestCase):
"""
Test communicaton with metrology.
"""
def setUp(self):
management.call_command('loaddata', 'test_supervisor.json', database='default', verbosity=0)
def tearDown(self):
pass
@unittest.skipIf(not internet_is_on(), 'No internet connection available.')
def test_RRDTool(self):
"""
Test to get a list of graph from nagios.demo.netways.de
for host 'c1-activedirectory' and service 'win-mem+virtual'.
"""
# Trouver les host supervise sur http://nagios.demo.netways.de/nagios/cgi-bin/status.cgi
management.call_command('loaddata', 'test_rrdtool_host.json', database='default', verbosity=0)
GRAPH_LIST_URL = 'https://nagios.demo.netways.de/pnp4nagios/graph?host=c1-activedirectory-1&srv=win-mem+virtual&view=0'
GRAPH_URL = 'https://nagios.demo.netways.de/pnp4nagios/image?host=c1-activedirectory-1&srv=win-mem+virtual&view=0'
S = Supervisor.objects.get(name__icontains='Netways')
opener = S.getOpener()
A = create_alert(host='c1-activedirectory-1', service='win-mem virtual')
# Find graphs
## Test to get a graph list URL
graph_list_url = S.get_graph_url(alert=A, prefix='graph')
self.assertEqual(GRAPH_LIST_URL, graph_list_url)
## Test to open this URL
response = opener.open(graph_list_url)
info = response.info()
self.assertEqual(200,response.getcode())
self.assertIn('text/html', info['content-type'])
# Get graph
## Test to get a graph URL
graph_url = S.get_graph_url(alert=A)
self.assertEqual(GRAPH_URL, graph_url)
## Test to open this URL
response = opener.open(graph_url)
info = response.info()
self.assertEqual(200,response.getcode())
self.assertIn('image/png', info['content-type'])
@unittest.skipIf(not internet_is_on(), 'No internet connection available.')
def test_N2RDD(self):
"""
Test to get a list of graph from sysnetmon.diglinks.com
for host 'core.diglinks.com' and service '02_load'.
"""
# Trouver les host supervise sur http://nagios.demo.netways.de/nagios/cgi-bin/status.cgi
management.call_command('loaddata', 'test_n2rrd_host.json', database='default', verbosity=0)
GRAPH_LIST_URL = 'http://sysnetmon.diglinks.com/cgi-bin/rrd2graph.cgi?hostname=core.diglinks.com&service=02_load'
GRAPH_URL = 'http://sysnetmon.diglinks.com/cgi-bin/n2rrd_images_cache/core.diglinks.com/core.diglinks.com_load_Daily.png'
S = Supervisor.objects.get(name__icontains='SysNetmon')
opener = S.getOpener()
A = create_alert(host='core.diglinks.com', service='02_load')
# Find graphs
graph_list_url = S.get_graph_url(alert=A, prefix='graph')
self.assertEqual(GRAPH_LIST_URL, graph_list_url)
handle = opener.open(graph_list_url)
# TO FINISH
# Get graph
#graph_url = S.get_graph_url(alert=A)
#self.assertEqual(GRAPH_URL, graph_url)
#handle = opener.open(graph_url)
| 46.675 | 129 | 0.602839 | 3,538 | 0.947509 | 0 | 0 | 3,278 | 0.877879 | 0 | 0 | 1,686 | 0.451527 |
7e8134c8fd80b019bde539a2bf2a08fb9219b671 | 799 | py | Python | PipelineCreator/Testing/Python/ModuleTemplateTests.py | Connor-Bowley/SlicerPipelines | 2fe92230fc6333951b7d99c272f3f2c34739624d | [
"Apache-2.0"
] | null | null | null | PipelineCreator/Testing/Python/ModuleTemplateTests.py | Connor-Bowley/SlicerPipelines | 2fe92230fc6333951b7d99c272f3f2c34739624d | [
"Apache-2.0"
] | 9 | 2021-11-08T20:42:49.000Z | 2022-03-11T19:05:00.000Z | PipelineCreator/Testing/Python/ModuleTemplateTests.py | Connor-Bowley/SlicerPipelines | 2fe92230fc6333951b7d99c272f3f2c34739624d | [
"Apache-2.0"
] | 2 | 2022-01-21T09:13:40.000Z | 2022-02-09T21:16:31.000Z | import unittest
from PipelineCreatorLib._Private.ModuleTemplate import ModuleTemplate
class TestModuleTemplate(unittest.TestCase):
def test_simple(self):
t = ModuleTemplate("{{name}} is here")
self.assertEqual("frank is here", t.substitute({"name":"frank"}))
self.assertEqual("frank is here", t.safe_substitute({"name":"frank"}))
def test_missing(self):
t = ModuleTemplate("{{greeting}}, {{name}}")
with self.assertRaises(KeyError):
t.substitute({"greeting":"hello"})
self.assertEqual("hello, {{name}}", t.safe_substitute({"greeting":"hello"}))
def test_escaped(self):
t = ModuleTemplate("{{{ {{{{{{")
self.assertEqual("{{ {{{{", t.substitute({}))
self.assertEqual("{{ {{{{", t.safe_substitute({}))
if __name__ == '__main__':
unittest.main()
| 33.291667 | 80 | 0.662078 | 662 | 0.828536 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.236546 |
7e82cf4b8fa8c9cf39f0aaa01ba1006d96672fc2 | 3,760 | py | Python | engine/logic/Transform.py | wkta/Python-Bataille-Navale | b6f725519cf1cf559e60ec766aa4f059463b1493 | [
"MIT"
] | null | null | null | engine/logic/Transform.py | wkta/Python-Bataille-Navale | b6f725519cf1cf559e60ec766aa4f059463b1493 | [
"MIT"
] | null | null | null | engine/logic/Transform.py | wkta/Python-Bataille-Navale | b6f725519cf1cf559e60ec766aa4f059463b1493 | [
"MIT"
] | 1 | 2019-12-03T15:42:38.000Z | 2019-12-03T15:42:38.000Z | # Copyright © 2019 CAILLAUD Jean-Baptiste.
from copy import copy
from engine.logic.Math import Mat3x3, Vector2, IDENTITY_MATRIX, ZERO_VECTOR, UNIT_VECTOR
class Transform:
"""
Simple transform.
Used to give information about the objects in the scene.
Attributes:
parent Parent transform of this object.
position Position of the transform in parent-relative space.
rotation Rotation of the transform in parent-relative space.
scale Scale of the transform in parent-relative space.
gameobject GameObject instance attached to this transform.
children List of all the children of this transform.
"""
def __init__(self, parent, position=ZERO_VECTOR, offset=ZERO_VECTOR, rotation=0, scale=UNIT_VECTOR, gameobject=None):
"""
Class constructor.
Creates a new Transform instance.
:param parent: The parent of this transform.
:param offset: The offset of the transform.
:param position: The position of this transform.
:param rotation: The rotation of this transform.
:param scale: The scale of this transform.
:param gameobject: The game object that this transform represents.
"""
self.parent = parent
self.offset = copy(offset)
self.position = copy(position)
self.rotation = rotation
self.scale = scale
self.gameobject = gameobject
self.children = []
# If there is a parent.
if parent is not None:
# Append ourselves to the children list.
parent.children.append(self)
def apply(self, position, world=False):
"""
Applies the transform to the specified position.
:param position: The position to apply the transform to.
:param world: If True, applies world transformation instead of the local one.
:returns: The transformed position.
"""
return (self.get_world_matrix() if world else self.get_matrix()) * position
def get_matrix(self, ignore_position=False, ignore_rotation=False, ignore_scale=True):
"""
Returns the matrix generated by this transform.
"""
return Mat3x3.create_matrix(
ZERO_VECTOR if ignore_position else self.position,
0 if ignore_rotation else self.rotation,
UNIT_VECTOR if ignore_scale else self.scale)
def get_world_matrix(self):
"""
Returns the 3x3 matrix of this transform in world space.
"""
return (self.parent.get_world_matrix() if self.parent is not None else IDENTITY_MATRIX) * self.get_matrix()
def get_world_position(self):
"""
Returns the world position of this transform.
"""
# Compute the position of the center.
center = (self.parent.get_world_matrix() if self.parent is not None else IDENTITY_MATRIX) * self.position
offset = self.get_matrix(ignore_position=True) * self.offset
return center + offset
def set_world_position(self, position):
"""
Defines the world position of the object.
:param position: The position the object is expected to be in.
"""
self.position = position - (self.parent.get_world_position() if self.parent is not None else ZERO_VECTOR)
def get_world_rotation(self):
"""
Returns the world rotation of this transform.
"""
return self.rotation + (self.parent.get_world_rotation() if self.parent is not None else 0)
def get_world_scale(self):
"""
Returns the world scale of this transform.
"""
return self.scale * (self.parent.get_world_scale() if self.parent is not None else UNIT_VECTOR)
| 39.578947 | 121 | 0.655585 | 3,602 | 0.957724 | 0 | 0 | 0 | 0 | 0 | 0 | 1,840 | 0.489232 |
7e840f48bfa9da0d3fea1df9c562b535318a1317 | 5,820 | py | Python | tools/key2java.py | rb-anssi/tataouine | 6aef17cc7f25f1d6ada38439991bdb6247dcc465 | [
"BSD-3-Clause"
] | null | null | null | tools/key2java.py | rb-anssi/tataouine | 6aef17cc7f25f1d6ada38439991bdb6247dcc465 | [
"BSD-3-Clause"
] | null | null | null | tools/key2java.py | rb-anssi/tataouine | 6aef17cc7f25f1d6ada38439991bdb6247dcc465 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, array
# Import our local utils
from crypto_utils import *
def PrintUsage():
executable = os.path.basename(__file__)
print(u'\nkey2java\n\n\tUsage:\t{} token_priv_key.bin token_pub_key.bin platform_pub_key.bin shared_petpin.bin shared_petname.bin shared_userpin.bin master_secret_key.bin enc_local_pet_key.bin max_pin_tries max_secure_channel_tries outfile applet_type\n'.format(executable))
sys.exit(-1)
def Key2Java(argv):
if not os.path.isfile(argv[1]):
print(u'\nFile "{}" does not exist'.format(argv[1]))
PrintUsage()
if not os.path.isfile(argv[2]):
print(u'\nFile "{}" does not exist'.format(argv[2]))
PrintUsage()
if not os.path.isfile(argv[3]):
print(u'\nFile "{}" does not exist'.format(argv[3]))
PrintUsage()
if not os.path.isfile(argv[4]):
print(u'\nFile "{}" does not exist'.format(argv[4]))
PrintUsage()
if not os.path.isfile(argv[5]):
print(u'\nFile "{}" does not exist'.format(argv[5]))
PrintUsage()
if not os.path.isfile(argv[6]):
print(u'\nFile "{}" does not exist'.format(argv[6]))
PrintUsage()
if not os.path.isfile(argv[7]):
print(u'\nFile "{}" does not exist'.format(argv[7]))
PrintUsage()
if not os.path.isfile(argv[8]):
print(u'\nFile "{}" does not exist'.format(argv[8]))
PrintUsage()
# Keys for the secure channel
token_priv_key = argv[1]
token_pub_key = argv[2]
platform_pub_key = argv[3]
shared_petpin = argv[4]
shared_petname = argv[5]
shared_userpin = argv[6]
master_secret_key = argv[7]
enc_local_pet_key = argv[8]
max_pin_tries = int(argv[9], 0)
max_secure_channel_tries = int(argv[10], 0)
outfile = argv[11]
applet_type = argv[12]
sig_priv_key = None
sig_pub_key = None
if applet_type == "sig":
sig_priv_key = argv[13]
sig_pub_key = argv[14]
token_priv_key_data = read_in_file(token_priv_key)
token_pub_key_data = read_in_file(token_pub_key)
platform_pub_key_data = read_in_file(platform_pub_key)
shared_petpin_data = read_in_file(shared_petpin)
shared_petname_data = read_in_file(shared_petname)
shared_userpin_data = read_in_file(shared_userpin)
master_secret_key_data = read_in_file(master_secret_key)
enc_local_pet_key_data = read_in_file(enc_local_pet_key)
if applet_type == "sig":
sig_priv_key_data = read_in_file(sig_priv_key)
sig_pub_key_data = read_in_file(sig_pub_key)
libeccparams = token_priv_key_data[1:3]
token_priv_key_data = token_priv_key_data[3:]
token_pub_key_data = token_pub_key_data[3:int((2*(len(token_pub_key_data)) / 3)+1)]
platform_pub_key_data = platform_pub_key_data[3:int((2*(len(platform_pub_key_data)) / 3)+1)]
if applet_type == "sig":
sig_priv_key_data = sig_priv_key_data[3:]
sig_pub_key_data = sig_pub_key_data[3:int((2*(len(sig_pub_key_data)) / 3)+1)]
text = "package wookey_"+applet_type+";\n\nclass Keys {\n\tstatic byte[] OurPrivKeyBuf = { "
for byte in token_priv_key_data:
text += "(byte)0x%02x, " % ord(byte)
# For public keys, add the '04' uncompressed point
text += " };\n\n\tstatic byte[] OurPubKeyBuf = { (byte)0x04, "
for byte in token_pub_key_data:
text += "(byte)0x%02x, " % ord(byte)
text += " };\n\n\tstatic byte[] WooKeyPubKeyBuf = { (byte)0x04, "
for byte in platform_pub_key_data:
text += "(byte)0x%02x, " % ord(byte)
# Add the curve and signing algorithm information
text += " };\n\n\tstatic byte[] LibECCparams = { "
for byte in libeccparams:
text += "(byte)0x%02x, " % ord(byte)
# Add the PET PIN
text += " };\n\n\tstatic byte[] PetPin = { "
for byte in shared_petpin_data:
text += "(byte)0x%02x, " % ord(byte)
# Add the PET NAME
orig_shared_petname_data_len = len(shared_petname_data)
shared_petname_data = shared_petname_data + (64-orig_shared_petname_data_len)*'\x00'
text += " };\n\n\tstatic short PetNameLength = "+str(orig_shared_petname_data_len)+";\n\n\tstatic byte[] PetName = { "
for byte in shared_petname_data:
text += "(byte)0x%02x, " % ord(byte)
# Add the User PIN
text += " };\n\n\tstatic byte[] UserPin = { "
for byte in shared_userpin_data:
text += "(byte)0x%02x, " % ord(byte)
# Add the master secret key
text += " };\n\n\tstatic byte[] MasterSecretKey = { "
for byte in master_secret_key_data:
text += "(byte)0x%02x, " % ord(byte)
# Add the encrypted local pet key
text += " };\n\n\tstatic byte[] EncLocalPetSecretKey = { "
for byte in enc_local_pet_key_data:
text += "(byte)0x%02x, " % ord(byte)
if applet_type == "sig":
# Add the signature public key
text += " };\n\n\tstatic byte[] FirmwareSigPubKeyBuf = { (byte)0x04, "
for byte in sig_pub_key_data:
text += "(byte)0x%02x, " % ord(byte)
text += " };\n\n\tstatic byte[] FirmwareSigPrivKeyBuf = { "
for byte in sig_priv_key_data:
text += "(byte)0x%02x, " % ord(byte)
#
text += "};\n"
# Add the maximum PIN tries
text += "\n\n\tstatic final byte max_pin_tries = (byte)"+str(max_pin_tries)+";"
# Add the maximum secure channel mounting tries tries
text += "\n\n\tstatic final short max_secure_channel_tries = "+str(max_secure_channel_tries)+";"
text += "\n}"
save_in_file(text, outfile)
return 0
if __name__ == '__main__':
# Register Ctrl+C handler
signal.signal(signal.SIGINT, handler)
if len(sys.argv) < 12:
PrintUsage()
sys.exit(1)
Key2Java(sys.argv)
| 40.416667 | 279 | 0.635739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,860 | 0.319588 |
7e848495b8c58269c0b218c96feb0278fcd138df | 1,949 | py | Python | app/maze_selector.py | rpowel/maze | d98d7a39e6cc0abbd32323be1722fc1a126d8910 | [
"MIT"
] | null | null | null | app/maze_selector.py | rpowel/maze | d98d7a39e6cc0abbd32323be1722fc1a126d8910 | [
"MIT"
] | null | null | null | app/maze_selector.py | rpowel/maze | d98d7a39e6cc0abbd32323be1722fc1a126d8910 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 10:44:44 2020.
@author: powel
"""
import random
from random import randint
import numpy as np
from mazes import RandomMaze, PrimMaze, KruskalMaze, RecursiveDivisionMaze, MazeBase
from percolation import check_percolation
class Maze:
def __init__(self):
self.maze = None
def make_maze(self, n_x: int, n_y: int, maze_type: str = 'Prim') -> np.ndarray:
if maze_type == 'Random':
maze_class = RandomMaze
elif maze_type == 'Prim':
maze_class = PrimMaze
elif maze_type == 'Kruskal':
maze_class = KruskalMaze
elif maze_type == 'Recursive':
maze_class = RecursiveDivisionMaze
else:
raise ValueError("Invalid maze_type")
maze = self._make_check_maze(n_x, n_y, maze_class)
return maze
def _make_check_maze(self, n_x: int, n_y: int, maze_class: MazeBase) -> np.ndarray:
maze = maze_class().make_maze(n_x, n_y)
maze = self._set_entrance(maze)
maze = self._set_exit(maze)
while not check_percolation(maze):
maze = maze_class().make_maze(n_x, n_y)
maze = self._set_entrance(maze)
maze = self._set_exit(maze)
return maze
@staticmethod
def _set_entrance(maze: np.ndarray) -> np.ndarray:
while True:
x, y = randint(1, maze.shape[0] - 1), 0
if maze[x, y + 1] == 0:
break
maze[x, y] = 2
return maze
@staticmethod
def _set_exit(maze: np.ndarray) -> np.ndarray:
while True:
x, y = randint(1, maze.shape[0] - 1), maze.shape[1] - 1
if maze[x, y - 1] == 0:
break
maze[x, y] = 3
return maze
if __name__ == "__main__":
random.seed(1)
N = 10
maze = Maze().make_maze(N, N, maze_type='Recursive')
print(maze.__repr__())
| 27.450704 | 87 | 0.581837 | 1,506 | 0.772704 | 0 | 0 | 494 | 0.253463 | 0 | 0 | 185 | 0.09492 |
7e852564331f423fb21ea2215be78aaea0deff4c | 507 | py | Python | my_lambdata/iqr_oop.py | karlmanalo/lambdata-karlmanalo | e6058fd1b4a5eb64d0afaebdfa14b902fedf931e | [
"MIT"
] | null | null | null | my_lambdata/iqr_oop.py | karlmanalo/lambdata-karlmanalo | e6058fd1b4a5eb64d0afaebdfa14b902fedf931e | [
"MIT"
] | null | null | null | my_lambdata/iqr_oop.py | karlmanalo/lambdata-karlmanalo | e6058fd1b4a5eb64d0afaebdfa14b902fedf931e | [
"MIT"
] | 1 | 2020-05-06T21:26:50.000Z | 2020-05-06T21:26:50.000Z | # my_lambdata\iqr_oop.py
# IQR function written in terms of OOP
# Must feed in list of numbers
import numpy
import pandas
class Numbers():
def __init__(self):
pass
def iqr(self, X):
q1 = numpy.percentile(X, 25, interpolation='midpoint')
q3 = numpy.percentile(X, 75, interpolation='midpoint')
print(f'The IQR for this data set is {q3 - q1}')
if __name__ == "__main__":
data = pandas.DataFrame([1,2,3,4,5])
nums = Numbers()
nums.iqr(data) | 22.043478 | 70 | 0.623274 | 271 | 0.534517 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.321499 |
7e85b0b404fa718713288ae4337dab0afce1d4af | 2,760 | py | Python | app/vendors/migrations/0030_auto_20180305_2301.py | snakrani/discovery | 99690f186a194cabef6a5d1ad18fca715be1e187 | [
"CC0-1.0"
] | null | null | null | app/vendors/migrations/0030_auto_20180305_2301.py | snakrani/discovery | 99690f186a194cabef6a5d1ad18fca715be1e187 | [
"CC0-1.0"
] | null | null | null | app/vendors/migrations/0030_auto_20180305_2301.py | snakrani/discovery | 99690f186a194cabef6a5d1ad18fca715be1e187 | [
"CC0-1.0"
] | null | null | null | # Generated by Django 2.0.2 on 2018-03-05 23:01
from django.db import migrations, models
import django.db.models.deletion
def migrate_membership(apps, schema_editor):
PoolPIID = apps.get_model('vendors', 'PoolPIID')
PoolMembership = apps.get_model('vendors', 'PoolMembership')
PoolMembershipZone = apps.get_model('vendors', 'PoolMembershipZone')
for membership in PoolPIID.objects.all():
new_membership, created = PoolMembership.objects.get_or_create(
vendor=membership.vendor,
pool=membership.pool,
piid=membership.piid
)
if membership.zone:
PoolMembershipZone.objects.get_or_create(
membership=new_membership,
zone=membership.zone
)
class Migration(migrations.Migration):
dependencies = [
('categories', '0003_auto_20180301_2027'),
('vendors', '0029_auto_20180301_2027'),
]
operations = [
migrations.CreateModel(
name='PoolMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('piid', models.CharField(max_length=128)),
('pool', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='categories.Pool')),
],
),
migrations.CreateModel(
name='PoolMembershipZone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('membership', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='zone', to='vendors.PoolMembership')),
('zone', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='categories.Zone')),
],
),
migrations.AlterField(
model_name='vendor',
name='pools',
field=models.ManyToManyField(through='vendors.PoolMembership', to='categories.Pool'),
),
migrations.AddField(
model_name='poolmembership',
name='vendor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vendors.Vendor'),
),
migrations.RunPython(migrate_membership),
migrations.RemoveField(
model_name='poolpiid',
name='pool',
),
migrations.RemoveField(
model_name='poolpiid',
name='vendor',
),
migrations.RemoveField(
model_name='poolpiid',
name='zone',
),
migrations.DeleteModel(
name='PoolPIID',
)
]
| 34.936709 | 156 | 0.593841 | 1,980 | 0.717391 | 0 | 0 | 0 | 0 | 0 | 0 | 493 | 0.178623 |
7e87f76640610e9de62b60abb07ac310224902a8 | 309 | py | Python | hcap_utils/contrib/allauth/login_form.py | fabiommendes/capacidade_hospitalar | 4f675b574573eb3f51e6be8a927ea230bf2712c7 | [
"MIT"
] | null | null | null | hcap_utils/contrib/allauth/login_form.py | fabiommendes/capacidade_hospitalar | 4f675b574573eb3f51e6be8a927ea230bf2712c7 | [
"MIT"
] | 31 | 2020-04-11T13:38:17.000Z | 2021-09-22T18:51:11.000Z | hcap_utils/contrib/allauth/login_form.py | fabiommendes/capacidade_hospitalar | 4f675b574573eb3f51e6be8a927ea230bf2712c7 | [
"MIT"
] | 1 | 2020-04-08T17:04:39.000Z | 2020-04-08T17:04:39.000Z | from allauth.account.forms import LoginForm as AllauthLoginForm
class LoginForm(AllauthLoginForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
del self.fields["login"].widget.attrs["placeholder"]
del self.fields["password"].widget.attrs["placeholder"]
| 34.333333 | 63 | 0.702265 | 242 | 0.783172 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.139159 |
7e892bbc2f9093e6ea3899715b53105db05324f3 | 402 | py | Python | community/migrations/0013_auto_20191108_0918.py | akarakoc/Communityverse | 73ecf51eae3f96cca865e0d7cc526b92c8ad6b5e | [
"MIT"
] | null | null | null | community/migrations/0013_auto_20191108_0918.py | akarakoc/Communityverse | 73ecf51eae3f96cca865e0d7cc526b92c8ad6b5e | [
"MIT"
] | 22 | 2019-11-09T23:23:11.000Z | 2019-12-23T09:38:29.000Z | community/migrations/0013_auto_20191108_0918.py | akarakoc/CommunityVerse | 73ecf51eae3f96cca865e0d7cc526b92c8ad6b5e | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2019-11-08 09:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('community', '0012_auto_20191107_1649'),
]
operations = [
migrations.AlterField(
model_name='communities',
name='communityPrv',
field=models.BooleanField(default=False),
),
]
| 21.157895 | 53 | 0.614428 | 309 | 0.768657 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.273632 |
7e894bd8c2abebfa353b32928132bd335cf2f437 | 3,870 | py | Python | tests/test_api.py | gilbsgilbs/cruft | cc6571e0816bc5149e24ed7d23d90bdb9de8d65e | [
"MIT"
] | null | null | null | tests/test_api.py | gilbsgilbs/cruft | cc6571e0816bc5149e24ed7d23d90bdb9de8d65e | [
"MIT"
] | null | null | null | tests/test_api.py | gilbsgilbs/cruft | cc6571e0816bc5149e24ed7d23d90bdb9de8d65e | [
"MIT"
] | null | null | null | import json
import os
from pathlib import Path
from subprocess import run
import pytest
from examples import verify_and_test_examples
from git import Repo
import cruft
from cruft import exceptions
from cruft._commands.utils import get_cruft_file
def test_invalid_cookiecutter_repo(tmpdir):
with pytest.raises(exceptions.InvalidCookiecutterRepository):
cruft.create("DNE", Path(tmpdir))
def test_no_cookiecutter_dir(tmpdir):
with pytest.raises(exceptions.UnableToFindCookiecutterTemplate):
cruft.create("https://github.com/samj1912/cookiecutter-test", Path(tmpdir))
def test_create_examples(tmpdir):
tmpdir.chdir()
verify_and_test_examples(cruft.create)
def test_check_examples(tmpdir, project_dir):
tmpdir.chdir()
with pytest.raises(exceptions.NoCruftFound):
verify_and_test_examples(cruft.check)
os.chdir(project_dir)
verify_and_test_examples(cruft.check)
def test_update_and_check_real_repo(tmpdir):
tmpdir.chdir()
repo = Repo.clone_from("https://github.com/timothycrosley/cruft", str(tmpdir))
repo.head.reset(commit="86a6e6beda8095690414ff7652c15b7ae36e6128", working_tree=True)
with open(os.path.join(tmpdir, ".cruft.json")) as cruft_file:
cruft_state = json.load(cruft_file)
cruft_state["skip"] = ["cruft/__init__.py", "tests"]
with open(os.path.join(tmpdir, ".cruft.json"), "w") as cruft_file:
json.dump(cruft_state, cruft_file)
repo_dir = Path(tmpdir)
assert not cruft.check(repo_dir)
# Update should fail since we have an unclean git repo
assert not cruft.update(repo_dir)
# Commit the changes so that the repo is clean
run(
[
"git",
"-c",
"user.name='test'",
"-c",
"user.email='user@test.com'",
"commit",
"-am",
"test",
],
cwd=repo_dir,
)
assert cruft.update(repo_dir, skip_apply_ask=True)
def test_relative_repo_check(tmpdir):
tmpdir.chdir()
temp_dir = Path(tmpdir)
Repo.clone_from("https://github.com/samj1912/cookiecutter-test", str(temp_dir / "cc"))
project_dir = cruft.create("./cc", output_dir=str(temp_dir / "output"), directory="dir")
assert cruft.check(project_dir)
def test_update_examples(project_dir, tmpdir):
tmpdir.chdir()
with pytest.raises(exceptions.NoCruftFound):
verify_and_test_examples(cruft.update)
os.chdir(project_dir)
verify_and_test_examples(cruft.update)
def test_link_examples(project_dir, tmpdir):
os.chdir(project_dir)
with pytest.raises(exceptions.CruftAlreadyPresent):
verify_and_test_examples(cruft.link)
tmpdir.chdir()
Repo.clone_from("https://github.com/timothycrosley/cruft", str(tmpdir))
os.remove(os.path.join(tmpdir, ".cruft.json"))
verify_and_test_examples(cruft.link)
def test_directory_and_checkout(tmpdir):
output_path = cruft.create(
"https://github.com/samj1912/cookiecutter-test",
output_dir=Path(tmpdir),
directory="dir",
checkout="initial",
)
cruft_file = get_cruft_file(output_path)
assert cruft_file.exists()
assert cruft.check(output_path, checkout="initial")
assert not cruft.check(output_path, checkout="updated")
assert cruft.update(output_path, checkout="updated")
assert cruft.check(output_path, checkout="updated")
cruft_file.unlink()
assert not cruft_file.exists()
assert cruft.link(
"https://github.com/samj1912/cookiecutter-test",
project_dir=output_path,
directory="dir",
checkout="updated",
)
assert cruft.check(output_path, checkout="updated")
# Add checks for strictness where master is an older
# version than updated
assert not cruft.check(output_path, strict=True)
assert cruft.check(output_path, strict=False)
| 31.463415 | 92 | 0.701034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 739 | 0.190956 |
7e89cb57b76e7e255d355d36bbaa26056332ada5 | 4,303 | py | Python | analysis_engine/prepare_history_dataset.py | virdesai/stock-analysis-engine | 0ca501277c632150717ca499121a34f8f8c71ccb | [
"Apache-2.0"
] | 819 | 2018-09-16T20:33:11.000Z | 2022-03-30T21:18:23.000Z | analysis_engine/prepare_history_dataset.py | gvpathi/stock-analysis-engine | 0ca501277c632150717ca499121a34f8f8c71ccb | [
"Apache-2.0"
] | 14 | 2018-09-16T20:52:25.000Z | 2020-09-06T12:36:36.000Z | analysis_engine/prepare_history_dataset.py | gvpathi/stock-analysis-engine | 0ca501277c632150717ca499121a34f8f8c71ccb | [
"Apache-2.0"
] | 226 | 2018-09-16T20:04:32.000Z | 2022-03-31T01:41:14.000Z | """
Helper for loading a ``Trading History`` dataset
"""
import json
import zlib
import pandas as pd
import analysis_engine.consts as ae_consts
import spylunking.log.setup_logging as log_utils
log = log_utils.build_colorized_logger(name=__name__)
def prepare_history_dataset(
data,
compress=False,
encoding='utf-8',
convert_to_dict=False,
include_keys=None,
ignore_keys=None,
convert_to_dates=None,
verbose=False):
"""prepare_history_dataset
Load a ``Trading History`` dataset into a dictionary
with a ``pd.DataFrame`` for the trading history record
list
:param data: string holding contents of a ``Trading History``
from a file, s3 key or redis-key
:param compress: optional - boolean flag for decompressing
the contents of the ``data`` if necessary
(default is ``False`` and algorithms
use ``zlib`` for compression)
:param convert_to_dict: optional - bool for s3 use ``False``
and for files use ``True``
:param encoding: optional - string for data encoding
:param include_keys: optional - list of string keys
to include before from the dataset
.. note:: tickers are automatically included in the ``pd.DataFrame``
:param ignore_keys: optional - list of string keys
to remove before building the ``pd.DataFrame``
:param convert_to_dates: optional - list of string keys
to convert to datetime before building the ``pd.DataFrame``
:param verbose: optional - bool show the logs
(default is ``False``)
"""
if verbose:
log.debug('start')
use_data = None
parsed_data = None
data_as_dict = None
if compress:
if verbose:
log.debug('decompressing')
parsed_data = zlib.decompress(
data).decode(
encoding)
else:
parsed_data = data
if not parsed_data:
log.error('failed parsing')
return None
if verbose:
log.debug('loading as dict')
use_data = {}
if convert_to_dict:
try:
data_as_dict = json.loads(parsed_data)
except Exception as e:
if (
'the JSON object must be str, bytes or '
'bytearray, not') in str(e):
log.critical(
f'failed decoding json for string - double '
f'compression for history dataset found ex={e}')
data_as_dict = parsed_data
else:
data_as_dict = parsed_data
if len(data_as_dict) == 0:
log.error(
'empty trading history dictionary')
return use_data
convert_these_date_keys = [
'date',
'minute',
'exp_date'
]
use_include_keys = [
'tickers',
'version',
'last_trade_data',
'algo_config_dict',
'algo_name',
'created'
]
if include_keys:
use_include_keys = include_keys
use_ignore_keys = []
if ignore_keys:
use_ignore_keys = ignore_keys
for k in data_as_dict:
if k in use_include_keys:
use_data[k] = data_as_dict[k]
all_records = []
num_records = 0
for ticker in data_as_dict['tickers']:
if ticker not in use_data:
use_data[ticker] = []
for node in data_as_dict[ticker]:
for ignore in use_ignore_keys:
node.pop(ignore, None)
all_records.append(node)
# end for all datasets on this date to load
num_records = len(all_records)
if num_records:
if verbose:
log.info(f'found records={num_records}')
history_df = pd.DataFrame(all_records)
for dc in convert_these_date_keys:
if dc in history_df:
history_df[dc] = pd.to_datetime(
history_df[dc],
format=ae_consts.COMMON_TICK_DATE_FORMAT)
# end of converting all date columns
use_data[ticker] = history_df
else:
log.error(
f'did not find any records={num_records} in history dataset')
# end for all tickers in the dataset
return use_data
# end of prepare_history_dataset
| 29.472603 | 77 | 0.595631 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,758 | 0.408552 |
7e8a45c72fe260a3444e4ee7e5fcd4dbca175324 | 935 | py | Python | scripts/count_git_changes.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 2,753 | 2015-01-02T11:34:13.000Z | 2022-03-25T07:04:27.000Z | scripts/count_git_changes.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 2,404 | 2015-01-02T19:31:41.000Z | 2022-03-09T10:58:22.000Z | scripts/count_git_changes.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 1,156 | 2015-01-03T01:57:21.000Z | 2022-03-26T01:06:28.000Z | import os
import sys
insertions=0
deletions=0
files=0
FROMVER=""
if len(sys.argv)>1:
FROMVER=sys.argv[1]
TOVER=""
if len(sys.argv)>2:
TOVER=sys.argv[2]
TMPNAME=os.tmpnam()
VER=""
if len(FROMVER)>0:
VER=FROMVER+'..'
if len(TOVER)>0:
if len(VER)==0:
VER='..'
VER=VER+TOVER
os.system('git log --oneline --shortstat %s >%s' % (VER,TMPNAME))
for line in file(TMPNAME).readlines():
if line.find('file') == -1:
continue
if line.find('changed') == -1:
continue
if line.find('insertion') == -1 and line.find('deletion') == -1:
continue
entries=line.split(',')
for e in entries:
if e.find('file') != -1:
files+=int(e.strip().split(' ')[0])
elif e.find('insertion') != -1:
insertions+=int(e.strip().split(' ')[0])
elif e.find('deletion') != -1:
deletions+=int(e.strip().split(' ')[0])
print "Files changed: %d" % files
print "Insertions: %d" % insertions
print "Deletions: %d" % deletions
os.unlink(TMPNAME)
| 20.777778 | 65 | 0.628877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.189305 |
7e8d50185ec0ee4eb697f713cf041be29429c89b | 439 | py | Python | pysaintcoinach/ex/language.py | icykoneko/saintcoinach-py | 66898385e1198203a7ec9da83787427bf6fe5c83 | [
"MIT"
] | 7 | 2019-11-20T17:24:49.000Z | 2022-03-29T04:17:53.000Z | pysaintcoinach/ex/language.py | icykoneko/saintcoinach-py | 66898385e1198203a7ec9da83787427bf6fe5c83 | [
"MIT"
] | 7 | 2019-04-08T07:36:46.000Z | 2022-01-17T22:51:54.000Z | pysaintcoinach/ex/language.py | icykoneko/saintcoinach-py | 66898385e1198203a7ec9da83787427bf6fe5c83 | [
"MIT"
] | 3 | 2019-04-08T08:24:22.000Z | 2021-06-27T22:19:15.000Z | from enum import Enum
class Language(Enum):
none = ""
japanese = "ja"
english = "en"
german = "de"
french = "fr"
chinese_simplified = "chs"
chinese_traditional = "cht"
korean = "ko"
unsupported = "?"
def get_code(self):
return self.value
def get_suffix(self):
code = self.get_code()
if len(code) > 0:
return "_" + code
else:
return ""
| 18.291667 | 31 | 0.523918 | 414 | 0.943052 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.091116 |
7e908ee2b8fb406fe3a6030c978abed370619859 | 929 | py | Python | Python_lxf/Python_Basic_Operation/Python_Basic/basicValueAndFunc/Filter.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 2 | 2019-01-24T15:06:59.000Z | 2019-01-25T07:34:45.000Z | Python_lxf/Python_Basic_Operation/Python_Basic/basicValueAndFunc/Filter.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-12-23T09:45:11.000Z | 2019-12-23T09:45:11.000Z | Python_lxf/Python_Basic_Operation/Python_Basic/basicValueAndFunc/Filter.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-07-18T14:21:35.000Z | 2019-07-18T14:21:35.000Z | def is_odd(n):
return n % 2 == 1
def not_empty(s):
return s and s.strip()
a = list(filter(is_odd, [1, 2, 4, 5, 6, 7, 8]))
a = list(filter(lambda n : n % 2 == 1, [1, 2, 4, 5, 6, 7, 8, 9]))
b = list(filter(not_empty, ['A', '' , 'B', None, 'C', ' ']))
# filter 函数返回的是一个Iterator, 也就是一个惰性序列
print(a, b)
def _odd_iter():
n = 1
while True:
n = n + 2
yield n
def _not_divisible(n):
return lambda x: x % n > 0
def primes():
yield 2
it = _odd_iter()
while True:
n = next(it)
yield n
it = filter(_not_divisible(n), it)
# 先过滤掉能整除3的数,从这个基础上过滤掉能整除5的数......
# 这里it = filter((...)(_not_disivible(n-2), fileter(_notdivisble(n-1), filter(_not_divisible(n), it))))
# 也就是说过滤了能整除除以[3, 5, 7, 11...](当前要求的质数之前的质数)的数
for n in primes():
if n < 30:
print(n)
else:
break
func = _not_divisible(9) # _not_divisible(9) 返回一个函数(x % 9 > 0)
print(func(18)) # 18 % 9 == 0, return false
f = lambda x: x * x
print(f(19))
| 21.113636 | 104 | 0.592034 | 0 | 0 | 470 | 0.436397 | 0 | 0 | 0 | 0 | 448 | 0.41597 |
7e9106857781db3212367ee35a3b48eb586b45ed | 40,855 | py | Python | toolcraft/storage/__base__.py | SpikingNeurons/toolcraft | 7290fa70a5d2680ebacf1bc421efaf09545f7c7e | [
"BSD-3-Clause"
] | 6 | 2021-04-06T09:27:48.000Z | 2021-12-17T02:13:11.000Z | toolcraft/storage/__base__.py | SpikingNeurons/toolcraft | 7290fa70a5d2680ebacf1bc421efaf09545f7c7e | [
"BSD-3-Clause"
] | 57 | 2021-03-19T07:33:13.000Z | 2022-03-30T18:59:29.000Z | toolcraft/storage/__base__.py | SpikingNeurons/toolcraft | 7290fa70a5d2680ebacf1bc421efaf09545f7c7e | [
"BSD-3-Clause"
] | 2 | 2021-04-08T18:24:36.000Z | 2021-04-08T22:40:50.000Z | """
Holds the base classes for storage module.
These are special hashables whose state can be serialized on disk.
"""
import typing as t
import pathlib
import datetime
import dataclasses
import abc
from .. import util, logger, settings
from .. import marshalling as m
from .. import error as e
from . import state
# noinspection PyUnreachableCode
if False:
from . import store
_LOGGER = logger.get_logger()
_DOT_DOT_TYPE = t.Literal['..']
# noinspection PyUnresolvedReferences
_DOT_DOT = _DOT_DOT_TYPE.__args__[0]
@dataclasses.dataclass(frozen=True)
class StorageHashable(m.HashableClass, abc.ABC):
@property
@util.CacheResult
def config(self) -> state.Config:
return state.Config(
hashable=self,
path_prefix=self.path.as_posix(),
)
@property
@util.CacheResult
def info(self) -> state.Info:
return state.Info(
hashable=self,
path_prefix=self.path.as_posix(),
)
@property
@util.CacheResult
def internal(self) -> m.Internal:
return m.Internal(self)
@property
@util.CacheResult
def path(self) -> pathlib.Path:
"""
Never override this.
Always resolve folder structure from group_by and name.
Note that root_dir can still be overridden if you want different
result locations
"""
if isinstance(self.group_by, list):
_split_strs = self.group_by
elif isinstance(self.group_by, str):
_split_strs = [self.group_by]
elif self.group_by is None:
_split_strs = []
else:
e.code.ShouldNeverHappen(
msgs=[
f"unsupported group_by value {self.group_by}"
]
)
raise
_path = self.root_dir
for _ in _split_strs:
_path /= _
return _path / self.name
@property
@util.CacheResult
def root_dir(self) -> pathlib.Path:
# if not using parent_folder then this property should never be
# called as it will ideally overridden
if not self.uses_parent_folder:
e.code.ShouldNeverHappen(
msgs=[
f"You have configured class {self.__class__} to not to use "
f"parent_folder so we expect you to override `root_dir` "
f"property inside class {self.__class__}"
]
)
# get parent folder
try:
_parent_folder = getattr(self, 'parent_folder')
except AttributeError:
e.code.CodingError(
msgs=[
f"This is already checked .... ideally field "
f"parent_folder should be present in class {self.__class__}"
]
)
raise
# If parent_folder is provided this property will not be overridden,
# hence we will reach here.
# In order to avoid creating Folder instance for parent_folder we use
# `..` string while saving to disc. This makes sure that there is no
# recursive instances of Folder being created. These instances again
# tries to sync leading to recursion
# But that means the Folder which is creating this instance must set
# itself as parent_folder.
# Also check documentation for `Folder.sync()` and
# `StorageHashable.init_validate()`
# Note in init_validate if self.parent_folder is `..` we raise error
# stating that you are creating instance from yaml file directly and
# it is not allowed as parent_folder should do it while syncing
if _parent_folder == _DOT_DOT:
e.code.CodingError(
msgs=[
f"Yaml on disk can have `..` string so the Folder which "
f"is creating this instance must update it and then call "
f"__post_init__ over the StorageHashable",
f"{Folder.sync} is responsible to set parent_folder while "
f"syncing.",
f"While in case if you are creating instance "
f"directly from yaml file then "
f"{StorageHashable.init_validate} should ideally block "
f"you as it is not possible to create instance.",
f"Also note that we do all this because hex_hash will be "
f"corrupt if parent_folder is not set appropriately "
f"before `Hashable.init` runs"
]
)
raise
# Now if parent_folder is Folder simply return the path of
# parent_folder as it is the root plus the name for this StorageHashable
if isinstance(_parent_folder, Folder):
return _parent_folder.path
# if above thing does not return that means we have a problem so
# raise error
e.code.CodingError(
msgs=[
f"The field parent_folder is not None nor it is valid "
f"Folder",
f"The type is {type(_parent_folder)}"
]
)
raise
@property
def group_by(self) -> t.Optional[t.Union[str, t.List[str]]]:
"""
Default is use not grouping ... override this if you need grouping
"""
return None
@property
def uses_parent_folder(self) -> bool:
"""
Adds a parent_folder behavior i.e. this subclass of StorageHashable
can be managed by parent_folder
"""
return False
@property
def is_created(self) -> bool:
_info_there = self.info.is_available
_config_there = self.config.is_available
if _info_there ^ _config_there:
e.code.CodingError(
msgs=[
f"Both config and info should be present or none should "
f"be present ...",
dict(
_info_there=_info_there, _config_there=_config_there
)
]
)
return _info_there and _config_there
@classmethod
def hook_up_methods(cls):
# call super
super().hook_up_methods()
# hook up create
util.HookUp(
cls=cls,
silent=True,
method=cls.create,
pre_method=cls.create_pre_runner,
post_method=cls.create_post_runner,
)
# hook up delete
util.HookUp(
cls=cls,
silent=True,
method=cls.delete,
pre_method=cls.delete_pre_runner,
post_method=cls.delete_post_runner,
)
def init_validate(self):
# ----------------------------------------------------------- 01
# if uses_parent_folder
if self.uses_parent_folder:
# ------------------------------------------------------- 01.01
# check if necessary field added
if 'parent_folder' not in self.dataclass_field_names:
e.code.CodingError(
msgs=[
f"We expect you to define field `parent_folder` as you "
f"have configured property `uses_parent_folder` to "
f"True for class {self.__class__}"
]
)
# ------------------------------------------------------- 01.02
# the root_dir property must not be overrided
if self.__class__.root_dir != StorageHashable.root_dir:
e.code.CodingError(
msgs=[
f"Please do not override property `root_dir` in class "
f"{self.__class__} as it is configured to use "
f"parent_folder"
]
)
# ------------------------------------------------------- 01.03
# test if parent_folder is Folder
_parent_folder = getattr(self, 'parent_folder')
if not isinstance(_parent_folder, Folder):
if _parent_folder != _DOT_DOT:
e.code.CodingError(
msgs=[
f"We expect parent_folder to be set with instance "
f"of type {Folder}",
f"Instead found value of type "
f"{type(_parent_folder)}"
]
)
# ------------------------------------------------------- 01.04
# If parent_folder is provided this property will not be overridden,
# hence we will reach here.
# In order to avoid creating Folder instance for parent_folder
# we use `..` string while saving to disc. This makes sure that
# there is no recursive instances of Folder being created. These
# instances again tries to sync leading to recursion
# But that means the Folder which is creating this instance must set
# itself as parent_folder.
# Also check documentation for `Folder.sync()` and
# `StorageHashable.init_validate()`
# Note in init_validate if self.parent_folder is `..` we raise error
# stating that you are creating instance from yaml file directly and
# it is not allowed as parent_folder should do it while syncing
if _parent_folder == _DOT_DOT:
e.code.CodingError(
msgs=[
f"Problem with initializing {self.__class__}",
f"Yaml on disc can have `..` string so the Folder "
f"which is creating this instance must update it and "
f"then call __post_init__ over the StorageHashable",
f"{Folder.sync} is responsible to set parent_folder "
f"while syncing.",
f"While in case if you are creating instance "
f"directly from yaml file then we block "
f"you as it is not possible to create instance.",
f"Also note that we do all this because hex_hash "
f"will be corrupt if parent_folder is not set "
f"appropriately before `Hashable.init` runs"
]
)
raise
# ------------------------------------------------------- 01.05
# if parent_folder supplied check what it can contain
_contains = _parent_folder.contains
# if None
if _contains is not None:
# note we do not use isinstance() as all folder
# subclasses will be concrete and subclassing is not anything
# special as there is no hierarchy in folder types
if self.__class__ != _parent_folder.contains:
e.code.NotAllowed(
msgs=[
f"The parent_folder is configured to contain only "
f"instances of class "
f"{_parent_folder.contains} but you "
f"are trying to add instance of type "
f"{self.__class__}"
]
)
# ----------------------------------------------------------- 02
# check for path length
e.io.LongPath(path=self.path, msgs=[])
# if path exists check if it is a folder
if self.path.exists():
if not self.path.is_dir():
e.validation.NotAllowed(
msgs=[
f"We expect {self.path} to be a dir"
]
)
# ----------------------------------------------------------- 03
# call super
super().init_validate()
def init(self):
# ----------------------------------------------------------- 01
# call super
super().init()
# ----------------------------------------------------------- 02
# if root dir does not exist make it
if not self.path.exists():
self.path.mkdir(parents=True)
# ----------------------------------------------------------- 03
# if not created create
if not self.is_created:
self.create()
# ----------------------------------------------------------- 04
# if parent_folder can track then add self to items
# Note that when contains is None we might still have Folder and
# FileGroup inside it but we will not do tracking for it and it is
# job of user to handle in respective parent_folder class
if self.uses_parent_folder:
# noinspection PyUnresolvedReferences
_parent_folder = self.parent_folder # type: Folder
if _parent_folder.contains is not None:
# add item ...
# Note that item can already exist due to sync in that case
_parent_folder.add_item(hashable=self)
@classmethod
def from_dict(
cls,
yaml_state: t.Dict[str, "m.SUPPORTED_HASHABLE_OBJECTS_TYPE"],
**kwargs
) -> "StorageHashable":
if "parent_folder" in yaml_state.keys():
# update .. to parent_folder supplied from kwargs
if yaml_state["parent_folder"] == _DOT_DOT:
if "parent_folder" not in kwargs.keys():
e.code.CodingError(
msgs=[
f"The yaml_state dict loaded from file_or_text "
f"does has parent_folder set to `..`",
f"This means we do not have access to "
f"parent_folder instance so please supply it "
f"while Folder syncs files/folders inside it.",
f"Note that if you are using from_yaml then also "
f"you can supply the extra kwarg so that "
f"from_dict receives it."
]
)
else:
yaml_state["parent_folder"] = kwargs["parent_folder"]
# noinspection PyArgumentList
return cls(**yaml_state)
def as_dict(
self
) -> t.Dict[str, m.SUPPORTED_HASHABLE_OBJECTS_TYPE]:
# get dict from super
_dict = super().as_dict()
# if uses parent_folder
if self.uses_parent_folder:
# get parent folder
_parent_folder = getattr(self, 'parent_folder')
# if there is parent_folder update it to ..
if _parent_folder == _DOT_DOT:
e.code.CodingError(
msgs=[
f"If loading from yaml on disk make sure that a "
f"Folder is doing that un sync so that parent_folder "
f"is set appropriately before calling __post_init__ on "
f"StorageHashable"
]
)
# modify dict so that representation is change on disc
# note that this does not modify self.__dict__ ;)
# we do this only when parent_folder is available
_dict['parent_folder'] = _DOT_DOT
# return
return _dict
def create_pre_runner(self):
# check if already created
if self.is_created:
e.code.NotAllowed(
msgs=[
f"Things related to hashable class {self.__class__} "
f"with name `{self.name}` has already been created ...",
]
)
def create(self) -> t.Any:
e.code.CodingError(
msgs=[
f"There is nothing to create for class {self.__class__}",
F"You might need to override this method if you have "
F"something to create ...",
f"If you override this method make sure you override "
f"corresponding `delete()` too ..."
]
)
# noinspection PyUnusedLocal
def create_post_runner(
self, *, hooked_method_return_value: t.Any
):
# ----------------------------------------------------------- 01
# The below call will create state manager files on the disk
# check if .info and .config file exists i.e. state exists
if self.config.is_available:
e.code.CodingError(
msgs=[
f"Looks like you have updated config before this parent "
f"create_post_runner was called.",
f"Try to make updates to config after the config is "
f"created the parent create_post_runner by calling sync()"
]
)
if self.info.is_available:
e.code.CodingError(
msgs=[
f"looks like info file for this StorageHashable is "
f"already present",
f"As files were just created we expect that this state "
f"file should not be present ..."
]
)
# redundant
_ = self.is_created
# ----------------------------------------------------------- 02
# sync to disk ... note that from here on state files will be on the
# disc and the child methods that will call super can take over and
# modify state files like config
self.info.sync()
self.config.sync()
# ----------------------------------------------------------- 03
# also sync the created on ... note that config can auto sync on
# update to its fields
self.config.created_on = datetime.datetime.now()
# ----------------------------------------------------------- 04
# check if property updated
if not self.is_created:
e.code.NotAllowed(
msgs=[
f"Did you forget to update appropriately the things in "
f"`create()` method of {self.__class__}",
f"Property `self.is_created` should return `True` as "
f"things are now created."
]
)
# noinspection PyUnusedLocal
def delete_pre_runner(self, *, force: bool = False):
# check if already created
if not self.is_created:
e.code.NotAllowed(
msgs=[
f"Things related to hashable class {self.__class__} are "
f"not created ..."
]
)
def delete(self, *, force: bool = False) -> t.Any:
e.code.CodingError(
msgs=[
f"There is nothing to delete for class {self.__class__}",
F"You might need to override this method if you have "
F"something to delete ...",
f"You only `delete()` if you create something in `create()`"
]
)
# noinspection PyUnusedLocal
def delete_post_runner(
self, *, hooked_method_return_value: t.Any
):
# delete state files as they were created along with the
# files for this StorageHashable in create_post_runner
self.info.delete()
self.config.delete()
# also delete the empty path folder
if util.io_is_dir_empty(self.path):
self.path.rmdir()
else:
e.code.CodingError(
msgs=[
f"All the files inside folder should be deleted by now ...",
f"Expected path dir to be empty",
f"Check path {self.path}"
]
)
# check if property updated
if self.is_created:
e.code.NotAllowed(
msgs=[
f"Did you forget to update appropriately the things in "
f"`delete()` method of {self.__class__}",
f"Property `self.is_created` should return `False` as "
f"things are now deleted."
]
)
# if parent_folder is there try to remove item from the tracking dict
# items
if self.uses_parent_folder:
# get parent folder
_parent_folder = getattr(self, 'parent_folder')
# if parent folder can track then delete items that it has tracked
if _parent_folder.contains is not None:
# just do sanity check if we are having same item
if id(self) != id(_parent_folder.items[self.name]):
e.code.CodingError(
msgs=[
f"We expect these objects to be same ... "
f"make sure to add item using "
f"parent_folder.add_item() method for integrity"
]
)
# in init() we added self by calling
# self.parent_folder.add_item(self) ... now we just remove the
# item from tracking dict items so that parent folder is in sync
del _parent_folder.items[self.name]
# now we have removed strong reference to self in parent_folder.items
# dict ... let us make this instance useless as files are deleted
# hence we want to make sure any other references will fail to use
# this instance ...
# To achieve this we just clear out the internal __dict__
if not settings.FileHash.DEBUG_HASHABLE_STATE:
self.__dict__.clear()
@dataclasses.dataclass(frozen=True)
class Folder(StorageHashable):
"""
A folder for hashable instance like Dataset or Model.
Name of the folder:
The name of folder is the name of the hashable it represents. The
dataclass field `for_hashable` signifies the uniqueness of the folder
while the `paren_folder` field super class does not affect uniqueness
as the folder represented by this class is saved under it ;)
Deviation from `HashableClass.name` behaviour:
You might be thinking why not have have folder hex_hash as folder name.
That sounds fine. But the name of folder using hashables name can in
future let us use via external utilities to pick up folders only by
knowing hashable and the path must be provided only once.
Also parent_folder is required only to get parent folder info we can
get away just by knowing the path.
Note that for FileGroup the parent_folder is considered as they have
even more fields and here we ignore parent_folder so that for_hashable
decides the folder name
We do not allow to add fields in subclass:
In order that *.info files do not pollute with more info we do not
allow to add fields to Folder class while subclassing.
In case you want more info please use *.config file via Folder.config
property
The contains property:
Indicates what will stored in this Folder
When parent_folder is None override path
This behaviour is borrowed from super class and well suits the
requirement for Folder class
Made up of three things
+ <hash>.info
- when loaded gives out Folder object with hashable instance object
+ <hash>.config
- the access info
+ <hash> folder
- A folder inside which you can have folder's or file_group's
"""
for_hashable: t.Union[str, m.HashableClass]
@property
def name(self) -> str:
"""
Do not override.
NOTE this also happens to be name of the folder
Note that for Folder the uniqueness is completely decided by
self.for_hashable field.
If self.for_hashable is str then the user is not using hashable and
simply wants to create folder with some specific name
We use self.for_hashable.name as name of the folder. Remember that
name is to be unique across hashable. By default the name returns
hex_hash but when you override it to return string the user need to
take care that it is unique for each instance of that class.
Note that FileGroup considers parent_folder while creating name but
here we ignore as there are no extra fields we will define here. Also
we want fo_hashable to dictate things in Folder like the name of
folder created on disk.
"""
# the name is dictated by for_hashable as we will not allow any
# fields in Folder (check validation)
# This is unlike FileGroup where all fields decide name ... this si
# because in FileGroup we intend to have more fields
if isinstance(self.for_hashable, str):
return self.for_hashable
else:
# the name defaults to hex_hash but if you have overridden it then
# we assume you have taken care of creating unique name for all
# possible instance of hashable class
return self.for_hashable.name
@property
def is_created(self) -> bool:
"""
This does mean we need call to create_all
todo: maybe not that big of a overhead but try to check if call to
this property is minimal
"""
# ----------------------------------------------------------------01
_folder_present = self.path.is_dir()
# (The super method is responsible to do this as state manager is
# available)
_state_manager_files_available = super().is_created
# ----------------------------------------------------------------02
# if _state_manager_files_available then folder must be present... the
# vice versa is not necessary ... this is because when state manager
# files are deleted we might still retain Folders as they hold
# valuable files like download files, processed files, results etc.
# NOTE: we do delete state files in cases when config and info files
# are are modifies over new versions ... so we need to protect data
# deletion
if _state_manager_files_available:
if not _folder_present:
e.code.CodingError(
msgs=[
f"The state is available but respective folder is "
f"absent."
]
)
# ----------------------------------------------------------------03
# time to return
return _state_manager_files_available
@property
def contains(self) -> t.Union[
None, t.Type[StorageHashable]
]:
"""
todo: for contains and read_only we can have a class decorator for
Folder like StoreField ... although that means we need to avoid
subclassing the Folder decorator ... but can figure it out later
Indicates what this folder should contain ... it can be one of Folder
or FileGroup or None (i.e. Files that will not use auto hashing
mechanism)
Default is None that means we have files whose hash is not tested ...
in that case we also will not have state manager files like
*.info and *.hash
If you return t.Any then any arbitrary thing can be added including
Folder's and FileGroup's. It is upto user to take care of name
clashes and managing the state manager files that will be generated.
"""
return None
@property
@util.CacheResult
def items(self) -> util.SmartDict:
if self.contains is None:
e.code.CodingError(
msgs=[
f"You have set contains to None so we do not know what "
f"will be stored .... so ideally you will never track "
f"things in the folder so you should not be using this "
f"property .... check class {self.__class__}"
]
)
return util.SmartDict(
allow_nested_dict_or_list=False,
supplied_items=None,
use_specific_class=self.contains,
)
def init_validate(self):
# ----------------------------------------------------------- 01
# folder can have only two fields
for f in self.dataclass_field_names:
if f not in ['for_hashable', 'parent_folder']:
e.code.CodingError(
msgs=[
f"The subclasses of class {Folder} can have only two "
f"fields {['for_hashable', 'parent_folder']}",
f"Please remove field `{f}` from class {self.__class__}"
]
)
# ----------------------------------------------------------- 03
# call super
super().init_validate()
def init(self):
# call super
super().init()
# # Note due to sync that gets called when parent_folder instance
# # was created the items dict of parent_folder will get instance
# # of self if already present on disc ... in that case delete the
# # item in dict and replace with self ...
# if _hashable.name in self.items.keys():
# # Also we do sanity check for integrity to check if hash of
# # existing item matches hashable ... this ensure that this is
# # safe update of dict
# if _hashable.hex_hash != \
# self.items[_hashable.name].hex_hash:
# e.code.NotAllowed(
# msgs=[
# f"While syncing from disk the hashable had different "
# f"hex_hash than one assigned now",
# f"We expect that while creating objects of "
# f"StorageHashable it should match with hex_hash of "
# f"equivalent object that was instantiated from disk",
# {
# "yaml_on_dsk":
# self.items[_hashable.name].yaml(),
# "yaml_in_memory":
# _hashable.yaml(),
# }
# ]
# )
# # note we do not call delete() method of item as it will delete
# # actual files/folder on disc
# # here we just update dict
# del self.items[_hashable.name]
# this is like `get()` for Folder .... note that all
# FileGroups/Folders will be added here via add_item
if self.contains is not None:
self.sync()
def create(self) -> pathlib.Path:
"""
If there is no Folder we create an empty folder.
"""
if not self.path.is_dir():
self.path.mkdir()
# return
return self.path
def delete(self, *, force: bool = False):
"""
Deletes Folder.
Note: Do not do read_only check here as done in delete_item method as
it is not applicable here and completely depends on parent folder
permissions
Note we delete only empty folders, and the state ... we will not
support deleting non-empty folders
note: force kwarg does not matter for a folder but just kept alongside
FileGroup.delete for generic behaviour
todo: when `self.contains is None` handle delete differently as we
will not have items dict
"""
# todo: do u want to add permission check for
# Folder similar to FileGroup
# when contains is None we delete everything ... this is default
# behaviour if you want to do something special please override this
# method
# todo: fix later
if self.contains is None:
# note that this will also delete folder self.path
util.pathlib_rmtree(path=self.path, recursive=True, force=force)
# remember to make empty dir as per API ... this will be deleted
# by delete_post_runner while deleting state files
self.path.mkdir(exist_ok=True)
# else since the folder can track items delete them using items and
# calling the respective delete of items
else:
_items = self.items.keys()
for item in _items:
# first delete the item physically
self.items[item].delete(force=force)
# todo: remove redundant check
# by now we are confident that folder is empty so just check it
if not util.io_is_dir_empty(self.path):
e.code.CodingError(
msgs=[
f"The folder should be empty by now ...",
f"Check path {self.path}"
]
)
def warn_about_garbage(self):
"""
In sync() we skip anything that does not end with *.info this will also
skip files that are not StorageHashable .... but that is okay for
multiple reasons ...
+ performance
+ we might want something extra lying around in folders
+ we might have deleted state info but the folders might be
lying around and might be wise to not delete it
The max we can do in that case is warn users that some
thing else is lying around in folder check method
warn_about_garbage.
todo: implement this
"""
...
def sync(self):
"""
Sync is heavy weight call rarely we aim to do all validations here
and avoid any more validations later ON ...
todo: We can have special Config class for Folder which can do some
indexing operation
"""
# -----------------------------------------------------------------01
# Validations
if self.contains is None:
e.code.CodingError(
msgs=[
f"The caller code should take care to check if there is "
f"anything trackable inside this Folder",
f"Property contains is None so do not call sync"
]
)
# tracking dict should be empty
if len(self.items) != 0:
e.code.CodingError(
msgs=[
f"We expect that the tracker dict be empty",
f"Make sure that you are calling sync only once i.e. from "
f"__post_init__"
]
)
# -----------------------------------------------------------------02
# track for registered file groups
for f in self.path.iterdir():
# *** NOTE ***
# We skip anything that does not end with *.info this will also
# skip files that are not StorageHashable .... but that is okay
# for multiple reasons ...
# + performance
# + we might want something extra lying around in folders
# + we might have deleted state info but the folders might be
# lying around and might be wise to not delete it
# The max we can do in that case is warn users that some
# thing else is lying around in folder check method
# warn_about_garbage.
# registered items have metainfo file with them
# only consider if meta info file exists
if not f.name.endswith(state.Suffix.info):
continue
# construct hashable instance from meta file
# Note that when instance for hashable is created it will check
# things on its own periodically
# Note the __post_init__ call will also sync things if it is folder
# noinspection PyTypeChecker
_hashable = self.contains.from_yaml(
f, parent_folder=self,
) # type: StorageHashable
# add tuple for tracking
# todo: no longer needed to add here as when we create instance
# the instance adds itself to parent_folder instance ... delete
# this code later ... kept for now just for reference
# noinspection PyTypeChecker
# self.items[_hashable.name] = _hashable
# -----------------------------------------------------------------03
# sync is equivalent to accessing folder
# so update state manager files
self.config.append_last_accessed_on()
def add_item(self, hashable: StorageHashable):
# since we are adding hashable item that are persisted to disk their
# state should be present on disk
if not hashable.is_created:
# err msg
if isinstance(hashable, StorageHashable):
_err_msg = f"This should never happen for " \
f"{hashable.__class__} " \
f"sub-class, there might be some coding error." \
f"Did you forget to call create file/folder " \
f"before adding the item."
e.code.CodingError(
msgs=[
f"We cannot find the state for the following hashable "
f"item on disk",
hashable.yaml(), _err_msg,
]
)
else:
_err_msg = f"Don't know the type {type(hashable)}"
e.code.ShouldNeverHappen(msgs=[_err_msg])
# add item
self.items[hashable.name] = hashable
@dataclasses.dataclass(frozen=True)
class ResultsFolder(Folder):
"""
A special folder that store results for Hashable class with unique naming
convention and unique path checking.
It will also be used by StoreField decorator to store the pyarrow results.
"""
for_hashable: m.HashableClass
@property
@util.CacheResult
def root_dir(self) -> pathlib.Path:
"""
As the results generated by hashable can be deleted ...
"""
return settings.Dir.ROOT_DEL
@property
@util.CacheResult
def store(self) -> "store.StoreFieldsFolder":
"""
Should return location where you intend to save results of method
decorated by StoreField
todo: This restricts us to have only one possible store location for
decorated methods. We can easily have one more argument to
StoreField to indicate which property to use while saving the
results. But we can plan this later on need basis.
Alternative:
Every task is special so we can have multiple Hashable Class for
each task and then we can afford to have a single
path. An easy and effective solution.
"""
from . import store
return store.StoreFieldsFolder(
parent_folder=self, for_hashable="store"
)
@property
@util.CacheResult
def store_fields(self) -> t.List[str]:
"""
Gets you the Tables that will be stored under store
That is returns properties/methods decorated by StoreField where each
of them will have a sub-folder under Store
"""
from . import store
_ret = []
for _name in dir(self.for_hashable.__class__):
if _name.startswith("_"):
continue
if store.is_store_field(
getattr(self.for_hashable.__class__, _name)
):
_ret.append(_name)
return _ret
def init_validate(self):
# call super
super().init_validate()
# check if path has the unique name
# this is needed as the user need to take care of keeping
# path unique as then he can decide the possible
# sequence of folders under which he can store the storage results
if self.path.as_posix().find(
self.for_hashable.name
) == -1:
e.validation.NotAllowed(
msgs=[
f"You need to have unique path for `{self.__class__}` "
f"derived from hashable class"
f" {self.for_hashable.__class__}",
f"Please try to have `self.for_hashable.name` in the "
f"`{self.__class__}` property `path` to avoid this error"
]
)
def init_store_df_files(self):
for _sf in self.store_fields:
getattr(self.for_hashable, _sf)(mode='e')
| 39.819688 | 80 | 0.538465 | 40,215 | 0.984335 | 0 | 0 | 40,323 | 0.986978 | 0 | 0 | 23,172 | 0.567177 |
7e92ae2753098633b02262fd0da3be3a5595b241 | 1,951 | py | Python | scripts/pulp_test.py | rajiv256/cs531_proj_2 | 4fa4628da06e3558b9f8c2917c0e87d9c5287690 | [
"MIT"
] | null | null | null | scripts/pulp_test.py | rajiv256/cs531_proj_2 | 4fa4628da06e3558b9f8c2917c0e87d9c5287690 | [
"MIT"
] | null | null | null | scripts/pulp_test.py | rajiv256/cs531_proj_2 | 4fa4628da06e3558b9f8c2917c0e87d9c5287690 | [
"MIT"
] | null | null | null | import pulp as pl
def create_affine_expression(coeffs, var_names):
assert len(coeffs) == len(var_names)
n = len(coeffs)
X = [pl.LpVariable(var_names[i]) for i in range(n)]
affine = pl.LpAffineExpression([(X[i], coeffs[i]) for i in range(n)])
return affine
def create_constraint(coeffs, var_names, sense, rhs):
"""Creates a constraint based on the args
Args:
coeffs: coefficients of the constraints
vars: Names of the vars
sense: +1, 0, -1 based on >=, ==, <= respectively. Or we can use pl.LpConstraintLE
rhs: numerical value of the rhs
Returns:
"""
assert len(coeffs) == len(var_names)
lhs = create_affine_expression(coeffs, var_names)
constr = pl.LpConstraint(lhs, sense=sense, rhs=rhs)
return constr
def test_affine_expression(coeffs=[1, 2, 3], var_names=['x_0', 'x_1', 'x_2']):
print(f'coeffs: {coeffs} | var_names: {var_names}')
affine = create_affine_expression(coeffs, var_names)
print(affine)
def test_constraint(coeffs=[1, 2, 3], var_names=['x_0', 'x_1', 'x_2'], sense=pl.LpConstraintLE, rhs=1):
print(f'coeffs: {coeffs} | var_names: {var_names} | sense: {sense} | rhs: {rhs}')
constraint = create_constraint(coeffs, var_names, sense, rhs)
print(constraint)
def test_0(solver_type):
model = pl.LpProblem("Example", pl.LpMaximize)
print(solver_type)
solver = pl.getSolver(solver_type)
_var = pl.LpVariable('a', 0, 1)
_var2 = pl.LpVariable('a2', 0, 2)
model += _var + _var2 <= 3
model += _var + _var2
x = _var + _var2
status = model.solve(solver)
print(pl.value(_var), pl.value(_var2))
print(pl.value(x))
return status
if __name__ == "__main__":
# Get the available solvers
av = pl.listSolvers(onlyAvailable=True)
print(av)
# Take the first available solver
status = test_0(av[0])
print(status)
test_affine_expression()
test_constraint()
| 28.275362 | 103 | 0.654024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 520 | 0.26653 |
7e935ec5d2ebb9f636c96a59cc60b230513b416f | 3,434 | py | Python | apps/user_app/models.py | lightless233/npiss | 8338f50d971600fe2b2366836ca2fb543f2276d5 | [
"MIT"
] | 1 | 2016-11-22T13:25:02.000Z | 2016-11-22T13:25:02.000Z | apps/user_app/models.py | LiGhT1EsS/npiss | 8338f50d971600fe2b2366836ca2fb543f2276d5 | [
"MIT"
] | 4 | 2020-06-05T17:28:20.000Z | 2022-03-11T23:15:49.000Z | apps/user_app/models.py | lightless233/npiss | 8338f50d971600fe2b2366836ca2fb543f2276d5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# coding: utf8
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.hashers import make_password, check_password
__author__ = 'lightless'
__email__ = 'root@lightless.me'
class PissUser(models.Model):
"""
存储用户信息
"""
class Meta:
db_table = "piss_users"
username = models.CharField(max_length=64, null=False, blank=False, unique=True)
password = models.CharField(max_length=512, null=False, blank=False)
email = models.CharField(max_length=64, null=False, blank=False, unique=True)
token = models.CharField(max_length=64, unique=True, default="")
status = models.PositiveSmallIntegerField(default=9001, blank=False, null=False)
last_login_time = models.DateTimeField(default=None, null=True, blank=True)
last_login_ip = models.CharField(max_length=16, blank=True)
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
is_deleted = models.BooleanField(default=False)
def save_password(self, new_password):
self.password = make_password(new_password)
def verify_password(self, input_password):
return check_password(input_password, self.password)
def get_user_status(self):
status_dict = {
9001: {"message": u"用户未激活",},
9002: {"message": u"用户正常", },
9003: {"message": u"用户被禁止登录", },
}
try:
return status_dict[self.status]
except KeyError:
return "Unknown Status"
def __str__(self):
return "<{username}, {status}>".format(username=self.username, status=self.get_user_status())
class PissActiveCode(models.Model):
"""
存储激活码信息
"""
class Meta:
db_table = "piss_active_code"
# user_id 存储哪个用户使用了这个激活码,如果没人使用,则置为0
user_id = models.BigIntegerField(null=False, blank=False, default=0)
active_code = models.CharField(max_length=64, null=False, blank=False, unique=True)
used = models.BooleanField(null=False, blank=False, default=False)
used_time = models.DateTimeField(default=None, null=True)
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
is_deleted = models.BooleanField(default=False)
def get_code_status(self):
code_status = {
True: u"激活码已失效",
False: u"激活码有效",
}
return code_status[self.used]
def use_active_code(self):
self.used = True
def __str__(self):
return "<{code}-{used}>".format(code=self.active_code, used=self.used)
class PissUserExtra(models.Model):
"""
存储用户额外信息
"""
class Meta:
db_table = "piss_user_extra"
user_id = models.BigIntegerField()
access_key = models.CharField(max_length=40, blank=True)
secret_key = models.CharField(max_length=40, blank=True)
domain = models.CharField(max_length=255, blank=True)
bucket_name = models.CharField(max_length=64, blank=True)
# 如果该字段为true,则使用qiniu相关的信息和链接
# 如果该字段为False,则使用本站url,302到七牛链接
use_qiniu = models.BooleanField(default=True)
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
is_deleted = models.BooleanField(default=False)
def __str__(self):
return "<{user}-{qiniu}>".format(user=self.use_id, qiniu=self.use_qiniu)
| 31.796296 | 101 | 0.689284 | 3,402 | 0.932055 | 0 | 0 | 0 | 0 | 0 | 0 | 640 | 0.175342 |
7e95766c342499bfa038fc67107c66bd872341ac | 24 | py | Python | src/models/__init__.py | WorqHat/Stanford-MRnet-Challenge | 0a8a7438f55503307d5b6b0ddaaa3c50fa4d692b | [
"MIT"
] | 1 | 2021-06-27T18:22:57.000Z | 2021-06-27T18:22:57.000Z | src/models/__init__.py | WorqHat/Stanford-MRnet-Challenge | 0a8a7438f55503307d5b6b0ddaaa3c50fa4d692b | [
"MIT"
] | null | null | null | src/models/__init__.py | WorqHat/Stanford-MRnet-Challenge | 0a8a7438f55503307d5b6b0ddaaa3c50fa4d692b | [
"MIT"
] | 2 | 2020-05-28T07:30:39.000Z | 2021-06-27T18:22:59.000Z | from .MRnet import MRnet | 24 | 24 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7e97e798b9acfb3c7359d3f9ef7c17e46c59fef5 | 398 | py | Python | storage/models/production_data.py | aagarwal1999/194-web-app | 362efd32d964d780f213b4e5a97fd14b080329ba | [
"Apache-2.0"
] | null | null | null | storage/models/production_data.py | aagarwal1999/194-web-app | 362efd32d964d780f213b4e5a97fd14b080329ba | [
"Apache-2.0"
] | null | null | null | storage/models/production_data.py | aagarwal1999/194-web-app | 362efd32d964d780f213b4e5a97fd14b080329ba | [
"Apache-2.0"
] | null | null | null | from storage.shared import db
import datetime
import uuid
class ProductionData(db.Model):
id = db.Column(db.String(100), primary_key=True, default=lambda: str(uuid.uuid4()))
data = db.Column(db.Text, unique=False, nullable=False)
time = db.Column(db.DateTime, default=datetime.datetime.now)
one_line_summary = db.Column(db.Text)
one_paragraph_summary = db.Column(db.Text)
| 24.875 | 87 | 0.731156 | 333 | 0.836683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7e9894e1fe1b381787e323f05a879ce7b9f749fc | 2,478 | py | Python | nostrild/nostrild.py | nibalizer/galileo | f5d100adcfaa238d98ec2df93040eafc4d9e7420 | [
"Apache-2.0"
] | 1 | 2015-01-11T04:32:57.000Z | 2015-01-11T04:32:57.000Z | nostrild/nostrild.py | nibalizer/galileo | f5d100adcfaa238d98ec2df93040eafc4d9e7420 | [
"Apache-2.0"
] | 1 | 2015-01-12T01:17:07.000Z | 2015-01-12T01:17:07.000Z | nostrild/nostrild.py | nibalizer/galileo | f5d100adcfaa238d98ec2df93040eafc4d9e7420 | [
"Apache-2.0"
] | 2 | 2015-01-17T00:52:39.000Z | 2017-09-09T05:42:18.000Z | # nostrild
# authentication and user info daemon
import getent
import os
import yaml
import ldap
from itsdangerous import TimestampSigner
from flask import Flask, abort, request, jsonify
from flask_cors import CORS
app = Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
cors = CORS(app)
def always_auth():
req = request.get_json(force=True)
if req['user'] is None:
abort(400, "You must specify a user")
if req['password'] is None:
abort(400, "You must specify a password")
secret = s.sign(req['user'])
return secret
def ldap_auth():
req = request.get_json(force=True)
if req['user'] is None:
abort(400, "You must specify a user")
if req['password'] is None:
abort(400, "You must specify a password")
con = ldap.initialize("ldap://" + conf['ldap_server'])
con.start_tls_s()
try:
dn = "uid={0},{1}".format(req['user'], conf['search_scope'])
pw = "{0}".format(req['password'])
con.simple_bind_s( dn, pw )
success = True
except:
success = False
finally:
con.unbind()
if success:
secret = s.sign(req['user'])
return secret
else:
abort(400, "Invalid username or password")
@app.route("/")
def hello():
return "nostrild: authentication for snot"
@app.route("/auth", methods = ["POST"])
def auth():
print request.json
if conf['auth_scheme'] == 'always':
secret = always_auth()
elif conf['auth_scheme'] == 'ldap':
secret = ldap_auth()
return jsonify({"secret_key": secret,
"timeout": conf['auth_timeout']})
@app.route("/user/<name>")
def username(name):
"""
return getent info and snotsig
"""
try:
passwd = dict(getent.passwd(name))
except TypeError:
abort(400, "Invalid user")
snotsig_path = '/home/{0}/solaris/.snotsig'.format(name)
sig_path = '/home/{0}/solaris/.snotsig'.format(name)
if os.path.isfile(snotsig_path):
with open(snotsig_path) as f:
snotsig = f.read()
f.closed
elif os.path.isfile(sig_path):
with open(sig_path) as f:
snotsig = f.read()
f.closed
#TODO check linux homedir as well
else:
snotsig = ""
return jsonify({"passwd": passwd, "snotsig": snotsig})
if __name__ == "__main__":
with open('config.yaml') as f:
conf = yaml.load(f.read())
f.closed
s = TimestampSigner(conf['secret_key'])
app.run(debug=True, port=conf['port'])
| 22.324324 | 64 | 0.619048 | 0 | 0 | 0 | 0 | 1,070 | 0.4318 | 0 | 0 | 676 | 0.272801 |
7e98b2febab6aef339580ecaf31712e7927e5389 | 13,357 | py | Python | P3/regression.py | alepalenc/Aprendizaje-Automatico | b2a6daa29bbed29b3d042a51bc0425b1e80152e6 | [
"MIT"
] | null | null | null | P3/regression.py | alepalenc/Aprendizaje-Automatico | b2a6daa29bbed29b3d042a51bc0425b1e80152e6 | [
"MIT"
] | null | null | null | P3/regression.py | alepalenc/Aprendizaje-Automatico | b2a6daa29bbed29b3d042a51bc0425b1e80152e6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_selection import VarianceThreshold
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn import metrics
from sklearn.base import BaseEstimator
from sklearn.neighbors import KNeighborsRegressor
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
# Semilla fijada
np.random.seed(1)
# Constantes
FILENAME = 'datos/data_regression.csv'
TEST_SIZE = 0.2
N_JOBS = 6
VISUALIZE_TRAIN_SET = False
CROSS_VALIDATION = False
CROSS_VALIDATION_KNR = False
VARIANCE_THRESHOLD = 1e-3
POL_DEGREE = 2
PCA_EXPLAINED_VARIANCE = 0.99999
K_SPLITS = 5
REG_PARAM_VALUES1 = [0.1, 1, 5, 10, 20]
REG_PARAM_VALUES2 = [4, 4.5, 5, 5.5, 6]
REG_PARAM = 5
NUM_NEIGHBORS_VALUES = [5, 10, 15, 20]
NUM_NEIGHBORS = 5
def readData(filename):
X = []
y = []
with open(filename) as f:
for line in f:
attribs_label = line.split(",")
X.append(attribs_label[:-1])
y.append(attribs_label[-1])
X.pop(0)
y.pop(0)
X = np.array(X, np.float64)
y = np.array(y, np.float64)
return X, y
def tableCVResults(cv_results, precision=5):
row = list(cv_results["params"][0].keys())+["mean E_in","mean E_cv"]
format_row = "{:<20}" * len(row)
print(format_row.format(*row))
for i in range(len(cv_results["params"])):
row = list(cv_results["params"][i].values())
row.append(round(1-cv_results["mean_train_score"][i],precision))
row.append(round(1-cv_results["mean_test_score"][i],precision))
print(format_row.format(*row))
class PseudoinverseLinearRegression(BaseEstimator):
def __init__(self, reg_param=0.0):
self.reg_param = reg_param # regularization parameter (lambda)
# Ajuste del modelo
def fit(self, X, y):
inverse = np.linalg.inv(X.T @ X + self.reg_param*np.identity(X.shape[1]))
self.w = np.dot( inverse, np.dot(X.T,y) )
# Predicción de clases
def predict(self, X):
return np.dot(X,self.w)
# Error Cuadrático Medio
def mse(self, X, y):
return metrics.mean_squared_error(y,self.predict(X))
# Error Absoluto Medio
def mae(self, X, y):
return metrics.mean_absolute_error(y,self.predict(X))
# Coeficiente de determinación (R^2)
def R2(self, X, y):
return 1-self.mse(X,y)/np.var(y)
# Score: R^2
def score(self, X, y):
return self.R2(X,y)
class KNR(BaseEstimator):
def __init__(self, num_neighbors=5, weight_function='uniform'):
self.num_neighbors = num_neighbors # número de vecinos
# Ajuste del modelo
def fit(self, X, y):
self.model = KNeighborsRegressor(n_neighbors=self.num_neighbors,
weights='uniform',
n_jobs=N_JOBS)
self.model.fit(X,y)
# Predicción de clases
def predict(self, X):
return self.model.predict(X)
# Error Cuadrático Medio
def mse(self, X, y):
return metrics.mean_squared_error(y,self.predict(X))
# Error Absoluto Medio
def mae(self, X, y):
return metrics.mean_absolute_error(y,self.predict(X))
# Coeficiente de determinación (R^2)
def R2(self, X, y):
return self.model.score(X,y)
# Score: R^2
def score(self, X, y):
return self.R2(X,y)
if __name__ == "__main__":
X, y = readData(FILENAME)
# Separación train-test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=42)
# Representación de datos en histogramas y en un espacio bidimensional mediante PCA y t-SNE
if VISUALIZE_TRAIN_SET:
print("#################################################################")
print("########## VISUALIZACIÓN DEL CONJUNTO DE ENTRENAMIENTO ##########")
print("#################################################################\n")
print("Histograma con las temperaturas críticas y sus frec. absolutas")
plt.hist(y_train, bins=37, density=False, cumulative=False)
plt.xlabel("Temperatura crítica")
plt.ylabel("Frecuencia absoluta")
plt.title("Histograma con las temperaturas críticas y sus frec. absolutas")
plt.grid(True)
plt.show()
input("\n--- Pulsar tecla para continuar ---\n")
print("Histograma con las temperaturas críticas y sus frec. relativas acum.")
plt.hist(y_train, bins=37, density=True, cumulative=True)
plt.xlabel("Temperatura crítica")
plt.ylabel("Frecuencia relativa acumulada")
plt.title("Histograma con las temperaturas críticas y sus frec. relativas acum.")
plt.grid(True)
plt.show()
input("\n--- Pulsar tecla para continuar ---\n")
cmap='plasma'
alpha=0.2
X_train_95 = X_train[np.where(y_train<95.0)]
y_train_95 = y_train[np.where(y_train<95.0)]
print("Representación de los datos con reducción de dimensionalidad usando PCA")
X_PCA = PCA(n_components=2, random_state=42).fit_transform(X_train_95)
plt.scatter(X_PCA[:,0], X_PCA[:,1], c=y_train_95, cmap=cmap, alpha=alpha)
plt.colorbar()
plt.title("Representación de los datos en 2D usando PCA")
plt.show()
input("\n--- Pulsar tecla para continuar ---\n")
print("Representación de los datos con reducción de dimensionalidad usando t-SNE")
X_TSNE = TSNE(n_components=2, init=X_PCA).fit_transform(X_train_95)
plt.scatter(X_TSNE[:,0], X_TSNE[:,1], c=y_train_95, cmap=cmap, alpha=alpha)
plt.colorbar()
plt.title("Representación de los datos en 2D usando t-SNE")
plt.show()
input("\n--- Pulsar tecla para continuar ---\n")
print("##################################")
print("########## PREPROCESADO ##########")
print("##################################\n")
# Matriz de coeficientes de correlación de Pearson con los datos iniciales
# (previamente, eliminamos características constantes)
correlation_matrix = np.corrcoef(np.transpose(VarianceThreshold().fit_transform(X_train)))
print("Matriz de coeficientes de correlación de Pearson (datos iniciales)")
plt.matshow(correlation_matrix, cmap='plasma')
plt.colorbar()
plt.title("Matriz de coef. de corr. de Pearson \n(datos iniciales)", pad=40.0)
plt.show()
input("\n--- Pulsar tecla para continuar ---\n")
print("Evolución del número de características:")
print("\tDatos iniciales:", X_train.shape[1])
# Eliminación de características con varianza muy baja
variance_threshold = VarianceThreshold(VARIANCE_THRESHOLD)
X_train = variance_threshold.fit_transform(X_train)
X_test = variance_threshold.transform(X_test)
print("\tVarianceThreshold:",X_train.shape[1])
# Ampliación con características no lineales (polinomios con grado acotado)
# También añade la característica asociada al término independiente
polynomial_features = PolynomialFeatures(POL_DEGREE)
X_train = polynomial_features.fit_transform(X_train)
X_test = polynomial_features.transform(X_test)
print("\tPolynomialFeatures:",X_train.shape[1])
# Estándarización (características con media 0 y varianza 1)
standard_scaler = StandardScaler()
X_train = standard_scaler.fit_transform(X_train)
X_test = standard_scaler.transform(X_test)
print("\tStandardScaler:",X_train.shape[1])
# Reducción de dimensionalidad mediante Análisis de Componentes Principales
pca = PCA(n_components=PCA_EXPLAINED_VARIANCE)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
print("\tPCA:",X_train.shape[1])
input("\n--- Pulsar tecla para continuar ---\n")
# Matriz de coeficientes de correlación de Pearson con los datos preprocesados
correlation_matrix = np.corrcoef(np.transpose(X_train))
print("Matriz de coeficientes de correlación de Pearson (datos preprocesados)")
plt.matshow(correlation_matrix, cmap='plasma')
plt.colorbar()
plt.title("Matriz de coef. de corr. de Pearson \n(datos preprocesados)", pad=40.0)
plt.show()
input("\n--- Pulsar tecla para continuar ---\n")
# Creación del modelo de Regresión Lineal que usa la Pseudoinversa
plr = PseudoinverseLinearRegression(reg_param=REG_PARAM)
# Añado el término independiente a los elementos del conjunto de entrenamiento y test
X_train = np.hstack(( np.ones((X_train.shape[0],1)), X_train ))
X_test = np.hstack(( np.ones((X_test.shape[0],1)), X_test ))
if CROSS_VALIDATION:
print("######################################")
print("########## CROSS-VALIDATION ##########")
print("######################################\n")
param_grid = {'reg_param':REG_PARAM_VALUES1}
cv_searcher = GridSearchCV(plr, param_grid, n_jobs=N_JOBS, verbose=1, return_train_score=True)
cv_searcher.fit(X_train, y_train)
print()
tableCVResults(cv_searcher.cv_results_)
print()
print("Mejores hiperparámetros:",cv_searcher.best_params_)
print("E_in medio:",round(1-cv_searcher.cv_results_["mean_train_score"][np.where(cv_searcher.cv_results_["rank_test_score"]==1)[0][0]],5))
print("E_cv medio:",round(1-cv_searcher.best_score_,5))
print()
param_grid = {'reg_param':REG_PARAM_VALUES2}
cv_searcher = GridSearchCV(plr, param_grid, n_jobs=N_JOBS, verbose=1, return_train_score=True)
cv_searcher.fit(X_train, y_train)
print()
tableCVResults(cv_searcher.cv_results_)
print()
print("Mejores hiperparámetros:",cv_searcher.best_params_)
print("E_in medio:",round(1-cv_searcher.cv_results_["mean_train_score"][np.where(cv_searcher.cv_results_["rank_test_score"]==1)[0][0]],5))
print("E_cv medio:",round(1-cv_searcher.best_score_,5))
print()
plr.set_params(**(cv_searcher.best_params_))
input("\n--- Pulsar tecla para continuar ---\n")
print("##########################################################")
print("########## EVALUACIÓN DE LA HIPÓTESIS FINAL ##############")
print("##########################################################\n")
plr.fit(X_train, y_train)
print("\nE_in =",round(1-plr.R2(X_train,y_train),5))
print("R²_in =",round(plr.R2(X_train,y_train),5))
print("MAE_in =",round(plr.mae(X_train,y_train),5))
print("\nE_test =",round(1-plr.R2(X_test,y_test),5))
print("R²_test =",round(plr.R2(X_test,y_test),5))
print("MAE_test:",round(plr.mae(X_test,y_test),5))
input("\n--- Pulsar tecla para continuar ---\n")
# Creación del modelo KNR
knr = KNR(num_neighbors=NUM_NEIGHBORS)
# Elimino el término independiente de los elementos del conjunto de entrenamiento y test
X_train = X_train[:,1:]
X_test = X_test[:,1:]
if CROSS_VALIDATION_KNR:
print("############################################")
print("########## CROSS-VALIDATION (KNR) ##########")
print("############################################\n")
param_grid = {'num_neighbors':NUM_NEIGHBORS_VALUES}
cv_searcher = GridSearchCV(knr, param_grid, n_jobs=N_JOBS, verbose=1, return_train_score=True)
cv_searcher.fit(X_train, y_train)
print()
tableCVResults(cv_searcher.cv_results_)
print()
print("Mejores hiperparámetros:",cv_searcher.best_params_)
print("E_in medio:",round(1-cv_searcher.cv_results_["mean_train_score"][np.where(cv_searcher.cv_results_["rank_test_score"]==1)[0][0]],5))
print("E_cv medio:",round(1-cv_searcher.best_score_,5))
print()
knr.set_params(**(cv_searcher.best_params_))
input("\n--- Pulsar tecla para continuar ---\n")
print("################################################################")
print("########## EVALUACIÓN DE LA HIPÓTESIS FINAL (KNR) ##############")
print("################################################################\n")
knr.fit(X_train,y_train)
print("\nE_in =",round(1-knr.R2(X_train,y_train),5))
print("R²_in =",round(knr.R2(X_train,y_train),5))
print("MAE_in =",round(knr.mae(X_train,y_train),5))
print("\nE_test =",round(1-knr.R2(X_test,y_test),5))
print("R²_test =",round(knr.R2(X_test,y_test),5))
print("MAE_test:",round(knr.mae(X_test,y_test),5)) | 36.002695 | 147 | 0.589204 | 1,876 | 0.139823 | 0 | 0 | 0 | 0 | 0 | 0 | 4,339 | 0.323396 |
7e99f08d361c9ea4cb38904ff75221d141f1e187 | 1,838 | py | Python | Line_Bot_Server/LineUsers.py | STU-Idichi-Syoya/Linebot_ | 54941ec0baee40ac21eeed77a0e1d0da02631aea | [
"MIT"
] | null | null | null | Line_Bot_Server/LineUsers.py | STU-Idichi-Syoya/Linebot_ | 54941ec0baee40ac21eeed77a0e1d0da02631aea | [
"MIT"
] | null | null | null | Line_Bot_Server/LineUsers.py | STU-Idichi-Syoya/Linebot_ | 54941ec0baee40ac21eeed77a0e1d0da02631aea | [
"MIT"
] | null | null | null | from Line_Bot_Server.LineBot import line_bot_api
##### STATE #####
class LineUser:
def __init__(self,reply=None,userId=None):
'''line のユーザ情報クラス ここではreplayからUserIdを取得することも
できるし、そのままuserIdを入力できる。
そこからLine APIを通して、名前と ひとこと(status message)を取得する。
'''
#reply か userIdどちらも情報がない場合,userIdはNone とする。
if (reply or userId):
##replyからuserIdを取得
if reply:
self.userId = reply.source.user_id
else:
self.userId=userId
#replyから名前とひとことを取得
profile = line_bot_api.get_profile(self.userId)
self.status = (profile.status_message)
self.name=profile.display_name
else:
self.userId=None
self.name=None
self.status=None
def __eq__(self, other):
if type(other)==LineUser:
return self.userId == other.userId
else:
return self.userId== other
def __str__(self):
return f"userId::{self.userId}\n" \
f"userName::{self.name}\n" \
f"userStatus::{self.status}"
class LineSender:
def __init__(self,lineins:LineBotApi):
self.line_bot_api=lineins
def sendMessage(self,text:str,user_id:LineUser):
if isinstance(user_id,LineUser):
user_id=user_id.userId
msg=lineins.TextMessage(text=text)
self.line_bot_api.push_message(to=user_id,messages=msg)
class LineUsers:
def __init__(self):
self.Users = {}
def getState(self, user: LineUser):
return self.Users[user.userId]
def setState(self,user:LineUser,state):
self.Users[user.userId]=state
def __add__(self, other: LineUser):
self.Users[other.userId] = other
def __len__(self, other):
return len(self.Users)
| 28.276923 | 63 | 0.605005 | 1,961 | 0.962218 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.258587 |
7e99f6ca30156fb9b0a9d2f84235cc68fb4723a3 | 841 | py | Python | wx/stc/mixins/DrawColumnEndMixin.py | dejbug/dejan7 | 0e3d9f4068cd7b39a3a992f4b21fa5042c88227b | [
"MIT"
] | null | null | null | wx/stc/mixins/DrawColumnEndMixin.py | dejbug/dejan7 | 0e3d9f4068cd7b39a3a992f4b21fa5042c88227b | [
"MIT"
] | null | null | null | wx/stc/mixins/DrawColumnEndMixin.py | dejbug/dejan7 | 0e3d9f4068cd7b39a3a992f4b21fa5042c88227b | [
"MIT"
] | null | null | null | import wx
import wx.stc
from dejan7.async.DelayedCall import *
from dejan7.wx.stc import GetRowWidths
class DrawColumnEndMixin(object):
def __init__(self, parent, color="GREY", delay=0.3, margins=[]):
self.sci = parent
self.pen = wx.Pen(color)
self.margins = margins
self.DelayedPaint = DelayedCall(self.Paint, delay)
PaintMethod = self.OnPaintedDelayed if delay > 0 else self.OnPainted
self.sci.Bind(wx.stc.EVT_STC_PAINTED, PaintMethod)
def OnPaintedDelayed(self, e):
e.Skip()
self.DelayedPaint()
def OnPainted(self, e):
e.Skip()
self.Paint()
def Paint(self):
th = self.sci.TextHeight(0)
dc = wx.ClientDC(self.sci)
wx.DCPenChanger(dc, self.pen)
y = 0
for x in GetRowWidths(self.sci, margins=self.margins):
if x >= 0: dc.DrawLine(x, y, x, y + th)
y += th
| 22.72973 | 71 | 0.668252 | 729 | 0.866825 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.007134 |
7e9a6ec65ac1ad83068e34348790df72fdb98a91 | 177 | py | Python | Userful_Script/decimal_to_binary.py | abc907558136/Pytorch-learning | 24c3da7eb134e0b9058a6f8329a311196cbc528b | [
"Apache-2.0"
] | 1 | 2019-12-19T04:49:57.000Z | 2019-12-19T04:49:57.000Z | Userful_Script/decimal_to_binary.py | abc907558136/Pytorch-Learning | 24c3da7eb134e0b9058a6f8329a311196cbc528b | [
"Apache-2.0"
] | null | null | null | Userful_Script/decimal_to_binary.py | abc907558136/Pytorch-Learning | 24c3da7eb134e0b9058a6f8329a311196cbc528b | [
"Apache-2.0"
] | null | null | null | import numpy as np
# Represent each input by an array of its binary digits.
def binary_encode(i, num_digits):
return np.array([i >> d & 1 for d in range(num_digits)])[::-1] | 35.4 | 66 | 0.700565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.316384 |
7e9da2cb9c09decd20d0f9df4db1a688df669596 | 884 | py | Python | tools/telemetry/telemetry/core/platform/power_monitor/android_ds2784_power_monitor_unittest.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231 | 2015-01-08T09:04:44.000Z | 2021-12-30T03:03:10.000Z | tools/telemetry/telemetry/core/platform/power_monitor/android_ds2784_power_monitor_unittest.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2017-02-14T21:55:58.000Z | 2017-02-14T21:55:58.000Z | tools/telemetry/telemetry/core/platform/power_monitor/android_ds2784_power_monitor_unittest.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268 | 2015-01-21T05:53:28.000Z | 2022-03-25T22:09:01.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core.platform.power_monitor import android_ds2784_power_monitor
class DS2784PowerMonitorMonitorTest(unittest.TestCase):
def testEnergyComsumption(self):
data = ('0000 1000 -10 12\n'
'1800 1000 -10 11\n'
'3600 1000 -10 09\n'
'5400 0000 -20 08\n'
'7200 0000 -20 11\n'
'9000 0000 -20 11\n')
results = (
android_ds2784_power_monitor.DS2784PowerMonitor.ParseSamplingOutput(
data))
self.assertEqual(results['power_samples_mw'], [1.2e-07, 1.1e-07, 9e-08,
1.6e-07, 2.2e-07, 2.2e-07])
self.assertEqual(results['energy_consumption_mwh'], 2.1e-07)
| 35.36 | 78 | 0.636878 | 622 | 0.70362 | 0 | 0 | 0 | 0 | 0 | 0 | 321 | 0.363122 |
7e9db911f3ba28aac3dc562f5cd602f52c2d9c10 | 1,231 | py | Python | apps/common/lookups.py | gis4dis/poster_new | cee983bfcfc90f581b18712d558bc9d8a83a400a | [
"BSD-3-Clause"
] | 4 | 2017-10-17T10:52:27.000Z | 2020-08-30T10:13:46.000Z | apps/common/lookups.py | gis4dis/poster_new | cee983bfcfc90f581b18712d558bc9d8a83a400a | [
"BSD-3-Clause"
] | 138 | 2017-10-13T09:09:02.000Z | 2020-06-05T18:55:33.000Z | apps/common/lookups.py | gis4dis/poster_new | cee983bfcfc90f581b18712d558bc9d8a83a400a | [
"BSD-3-Clause"
] | 2 | 2018-01-21T19:44:51.000Z | 2018-02-15T11:27:39.000Z | from django.contrib.postgres.fields import DateTimeRangeField
from django.db.models import Lookup
from psycopg2.extras import NumericRange
@DateTimeRangeField.register_lookup
class Duration(Lookup):
lookup_name = 'duration'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
if type(rhs_params[0]) is NumericRange:
params = lhs_params + [rhs_params[0].lower] + lhs_params + [rhs_params[0].upper]
return "duration(%s) >= (%s || ' second')::interval AND duration(%s) < (%s || ' second')::interval" % \
(lhs, rhs, lhs, rhs), params
return "duration(%s) = (%s || ' second')::interval" % (lhs, rhs), params
@DateTimeRangeField.register_lookup
class Matches(Lookup):
lookup_name = 'matches'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
return "mod(cast(extract(epoch from lower(%s)) as int), %s)=0" % (lhs, rhs), params
| 38.46875 | 115 | 0.665313 | 1,014 | 0.823721 | 0 | 0 | 1,086 | 0.88221 | 0 | 0 | 210 | 0.170593 |
7e9fa3c475c35098becd66c211b5ef3ab43f253f | 21,450 | py | Python | lib/pecoff/Executable.py | arizvisa/syringe | 9f8691d3e70b34d52ce83c8ea33be52d0887f0d6 | [
"BSD-2-Clause"
] | 25 | 2015-04-14T21:53:46.000Z | 2022-03-30T19:15:24.000Z | lib/pecoff/Executable.py | arizvisa/syringe | 9f8691d3e70b34d52ce83c8ea33be52d0887f0d6 | [
"BSD-2-Clause"
] | 5 | 2020-03-23T20:19:59.000Z | 2021-05-24T19:38:31.000Z | lib/pecoff/Executable.py | arizvisa/syringe | 9f8691d3e70b34d52ce83c8ea33be52d0887f0d6 | [
"BSD-2-Clause"
] | 7 | 2015-07-31T13:26:37.000Z | 2021-03-05T19:35:37.000Z | import logging, operator, functools, itertools, array, ptypes
from ptypes import *
from .headers import *
from . import portable
class Signature(pint.enum, uint16):
# We'll just store all signature types here
_values_ = [
('IMAGE_DOS_SIGNATURE', 0x5a4d),
('IMAGE_OS2_SIGNATURE', 0x454e),
('IMAGE_OS2_SIGNATURE_LE', 0x454c),
('IMAGE_NT_SIGNATURE', 0x4550),
]
class IMAGE_DOS_HEADER(pstruct.type):
class e_magic(Signature): pass
class Relocation(pstruct.type):
_fields_ = [
( uint16, 'offset' ),
( uint16, 'segment' ),
]
def linear(self):
return self['segment'].int()*0x10 + self['offset'].int()
def decode(self, **attrs):
p = self.getparent(ptype.boundary)
attrs.setdefault('offset', p['Stub'].getoffset()+self.linear())
return self.new(ptype.undefined, **attrs)
def summary(self):
seg, offset = self['segment'], self['offset']
return "(segment:offset) {:04x}:{:04x} (linear) {:05x}".format(seg.int(), offset.int(), (seg.int() * 0x10 + offset.int()) & 0xfffff)
def repr(self):
return self.summary()
class Oem(pstruct.type):
_fields_ = [
( dyn.array(uint16, 4), 'e_reserved' ),
( uint16, 'e_oemid' ),
( uint16, 'e_oeminfo' ),
( dyn.array(uint16, 10), 'e_reserved2' ),
]
# FIXME: this implementation should be properly tested as there's a chance it could be fucked with
def __e_oem(self):
res = self['e_lfarlc'].li
fields = ['e_magic', 'e_cblp', 'e_cp', 'e_crlc', 'e_cparhdr', 'e_minalloc', 'e_maxalloc', 'e_ss', 'e_sp', 'e_csum', 'e_ip', 'e_cs', 'e_lfarlc', 'e_ovno']
# if our calculated size for the field directly matches the Oem
# structure, then this for sure is going to be a PECOFF executable.
t = IMAGE_DOS_HEADER.Oem
if res.int() == sum(self[fld].li.size() for fld in fields) + t().a.size() + 4:
return t
# otherwise we need to pad it with whatever the input claims it should be
return dyn.block(max(0, res.int() - sum(self[fld].li.size() for fld in fields)))
def __e_lfanew(self):
paragraphs, relocations = self['e_cparhdr'].li, self['e_lfarlc'].li
fields = ['e_magic', 'e_cblp', 'e_cp', 'e_crlc', 'e_cparhdr', 'e_minalloc', 'e_maxalloc', 'e_ss', 'e_sp', 'e_csum', 'e_ip', 'e_cs', 'e_lfarlc', 'e_ovno', 'e_oem']
# if everything matches, then there's a pointer here for PECOFF executables
if 0x10 * paragraphs.int() == relocations.int() == sum(self[fld].li.size() for fld in fields) + 4:
return dyn.rpointer(Next, self, pint.uint32_t)
# otherwise, there isn't anything here.
return pint.uint_t
def __e_rlc(self):
res = self['e_crlc'].li
return dyn.array(IMAGE_DOS_HEADER.Relocation, res.int())
def __e_parhdr(self):
res = 0x10 * self['e_cparhdr'].li.int()
fields = ['e_magic', 'e_cblp', 'e_cp', 'e_crlc', 'e_cparhdr', 'e_minalloc', 'e_maxalloc', 'e_ss', 'e_sp', 'e_csum', 'e_ip', 'e_cs', 'e_lfarlc', 'e_ovno']
fields+= ['e_oem', 'e_rlc', 'e_lfanew']
return dyn.block(res - sum(self[fld].li.size() for fld in fields))
def filesize(self):
res = self['e_cp'].li.int()
if res > 0:
cp = res - 1
return cp * 0x200 + self['e_cblp'].li.int()
return 0
def headersize(self):
res = self['e_cparhdr'].li
return res.int() * 0x10
def datasize(self):
res = self.headersize()
return (self.filesize() - res) if res > 0 else 0
def __e_lfarlc(self):
res = self['e_crlc'].li
t = dyn.array(IMAGE_DOS_HEADER.Relocation, res.int())
return dyn.rpointer(t, self, uint16)
#e_cparhdr << 4
#e_cp << 9
_fields_ = [
( e_magic, 'e_magic' ),
( uint16, 'e_cblp' ), # bytes in last page / len mod 512 / UsedBytesInLastPage
( uint16, 'e_cp' ), # pages / 512b pagees / FileSizeInPages
( uint16, 'e_crlc' ), # relocation count / reloc entries count / NumberOfRelocationItems
( uint16, 'e_cparhdr' ), # header size in paragraphs (paragraph=0x10) / number of paragraphs before image / HeaderSizeInParagraphs
( uint16, 'e_minalloc' ), # required paragraphs / minimum number of bss paragraphs / MinimumExtraParagraphs
( uint16, 'e_maxalloc' ), # requested paragraphs / maximum number of bss paragraphs / MaximumExtraParagraphs
( uint16, 'e_ss' ), # ss / stack of image / InitialRelativeSS
( uint16, 'e_sp' ), # sp / sp of image / InitialSP
( uint16, 'e_csum' ), # checksum / checksum (ignored) / Checksum
( uint16, 'e_ip' ), # ip / ip of entry / InitialIP
( uint16, 'e_cs' ), # cs / cs of entry / InitialrmwwelativeIp
( __e_lfarlc, 'e_lfarlc' ), # relocation table
( uint16, 'e_ovno'), # overlay number
#( uint32, 'EXE_SYM_TAB'), # from inc/exe.inc
# all the data below here changes based on the linker:
# Borland, ARJ, LZEXE, PKLITE, LHARC, LHA, CRUNCH, BSA, LARC, etc..
( __e_oem, 'e_oem'), # oem and reserved data
( __e_lfanew, 'e_lfanew'),
( __e_rlc, 'e_rlc' ), # relocations?
( __e_parhdr, 'e_parhdr'),
]
### What file format the next header is
class NextHeader(ptype.definition):
cache = {}
### What file format the data is
class NextData(ptype.definition):
cache = {}
class Next(pstruct.type):
def __Header(self):
t = self['Signature'].li.serialize()
return NextHeader.withdefault(t, type=t)
def __Data(self):
t = self['Signature'].li.serialize()
return NextData.withdefault(t, type=t)
_fields_ = [
(Signature, 'Signature'),
(__Header, 'Header'),
(__Data, 'Data'),
]
def Header(self):
return self['Header']
def Data(self):
return self['Data']
## Portable Executable (PE)
@NextHeader.define
class IMAGE_NT_HEADERS(pstruct.type, Header):
type = b'PE'
def __Padding(self):
'''Figure out the PE header size and pad according to SizeOfHeaders'''
p = self.getparent(File)
sz = p['Header']['e_lfanew'].li.int()
opt = self['OptionalHeader'].li
f = functools.partial(operator.getitem, self)
res = map(f, ('SignaturePadding', 'FileHeader', 'OptionalHeader', 'DataDirectory', 'Sections'))
res = sum(map(operator.methodcaller('blocksize'), res))
res += 2
return dyn.block(opt['SizeOfHeaders'].int() - res - sz)
def __DataDirectory(self):
cls = self.__class__
length = self['OptionalHeader'].li['NumberOfRvaAndSizes'].int()
if length > 0x10: # XXX
logging.warning("{:s} : OptionalHeader.NumberOfRvaAndSizes specified >0x10 entries ({:#x}) for the DataDirectory. Assuming the maximum of 0x10.".format('.'.join((cls.__module__, cls.__name__)), length))
length = 0x10
return dyn.clone(portable.DataDirectory, length=length)
def __Sections(self):
header = self['FileHeader'].li
length = header['NumberOfSections'].int()
return dyn.clone(portable.SectionTableArray, length=length)
_fields_ = [
(uint16, 'SignaturePadding'),
(portable.IMAGE_FILE_HEADER, 'FileHeader'),
(portable.IMAGE_OPTIONAL_HEADER, 'OptionalHeader'),
(__DataDirectory, 'DataDirectory'),
(__Sections, 'Sections'),
(__Padding, 'Padding'),
]
def FileHeader(self):
'''Return the FileHeader which contains a number of sizes used by the file.'''
return self['FileHeader']
def getaddressbyoffset(self, offset):
section = self['Sections'].getsectionbyoffset(offset)
return section.getaddressbyoffset(offset)
def getoffsetbyaddress(self, address):
section = self['Sections'].getsectionbyaddress(address)
return section.getoffsetbyaddress(address)
def loadconfig(self):
return self['DataDirectory'][10]['Address'].d.li
def tls(self):
return self['DataDirectory'][9]['Address'].d.li
def relocateable(self):
characteristics = self['OptionalHeader']['DllCharacteristics']
return 'DYNAMIC_BASE' in characteristics
def has_seh(self):
characteristics = self['OptionalHeader']['DllCharacteristics']
return 'NO_SEH' not in characteristics
def has_nx(self):
characteristics = self['OptionalHeader']['DllCharacteristics']
return 'NX_COMPAT' in characteristics
def has_integrity(self):
characteristics = self['OptionalHeader']['DllCharacteristics']
return 'FORCE_INTEGRITY' in characteristics
def is64(self):
return self['OptionalHeader'].li.is64()
def checksum(self):
p = self.getparent(File)
res = self['OptionalHeader']['Checksum']
# Make a copy of our checksum initialized to 0
field = res.copy(offset=res.offset - p.offset).set(0)
# Make a copy of our File header, and overwrite the original
# checksum with 0 so that we can calculate what the checksum
# is supposed to be.
data = bytearray(p.serialize())
data[field.offset : field.offset + field.size()] = field.serialize()
# Pad the data so that it's a multiple of a dword
res = 4 - len(data) % 4
padding = b'\0' * (res % 4)
# Calculate 16-bit checksum
res = sum(array.array('I' if len(array.array('I', 4 * b'\0')) > 1 else 'H', bytes(data) + padding))
checksum = len(data)
checksum += res & 0xffff
checksum += res // 0x10000
checksum += checksum // 0x10000
checksum &= 0xffff
# Clamp the result to 32-bits
return checksum & 0xffffffff
def Machine(self):
return self['FileHeader']['Machine']
Portable = IMAGE_NT_HEADERS64 = IMAGE_NT_HEADERS
class SegmentEntry(pstruct.type):
'''
Base class for a section entry that both memory-backed and file-backed
entries inherit from.
'''
def properties(self):
res = super(SegmentEntry, self).properties()
if hasattr(self, 'Section'):
res['SectionName'] = self.Section['Name'].str()
return res
class MemorySegmentEntry(SegmentEntry):
'''
This SegmentEntry represents the structure of a segment that has been
already mapped into memory. This honors the SectionAlignment field from
the OptionalHeader when padding the segment's data.
'''
noncontiguous = True
def __Padding(self):
p = self.getparent(Next)
header = p.Header()
optionalheader = header['OptionalHeader'].li
return dyn.align(optionalheader['SectionAlignment'].int(), undefined=True)
_fields_ = [
(__Padding, 'Padding'),
(lambda self: dyn.block(self.Section.getloadedsize()), 'Data'),
]
class FileSegmentEntry(SegmentEntry):
'''
This SegmentEntry represents the structure of a segment that is on the
disk and hasn't been mapped into memory. This honors the FileAlignment
field from the OptionalHeader when padding the segment's data.
'''
def __Padding(self):
p = self.getparent(Next)
header = p.Header()
optionalheader = header['OptionalHeader'].li
return dyn.align(optionalheader['FileAlignment'].int(), undefined=True)
_fields_ = [
(__Padding, 'Padding'),
(lambda self: dyn.block(self.Section.getreadsize()), 'Data'),
]
class SegmentTableArray(parray.type):
'''
This is a simple array of segment entries where each entry is individually
tied directly to the SectionTableEntry that it is associated with. Each
entry is aligned depending on whether it is being loaded from disk or has
been already loaded into memory.
'''
def _object_(self):
p = self.getparent(Next)
header = p.Header()
sections = header['Sections']
entry = MemorySegmentEntry if isinstance(self.source, ptypes.provider.memorybase) else FileSegmentEntry
return dyn.clone(entry, Section=sections[len(self.value)])
@NextData.define
class IMAGE_NT_DATA(pstruct.type, Header):
type = b'PE'
def __Segments(self):
header = self.p.Header()
fileheader = header['FileHeader'].li
# Warn the user if we're unable to determine whether the source is a
# file-backed or memory-backed provider.
if all(not isinstance(self.source, item) for item in {ptypes.provider.memorybase, ptypes.provider.fileobj}):
cls = self.__class__
logging.warning("{:s} : Unknown ptype source.. treating as a fileobj : {!r}".format('.'.join((cls.__module__, cls.__name__)), self.source))
return dyn.clone(SegmentTableArray, length=fileheader['NumberOfSections'].int())
def __CertificatePadding(self):
header = self.p.Header()
if len(header['DataDirectory']) < 4:
return ptype.undefined
res = header['DataDirectory'][4]
offset, size = res['Address'].int(), res['Size'].int()
if offset == 0 or isinstance(self.source, ptypes.provider.memorybase):
return ptype.undefined
if isinstance(self.source, ptypes.provider.bounded) and offset < self.source.size():
res = self['Segments'].li.getoffset() + self['Segments'].blocksize()
return dyn.block(offset - res)
return ptype.undefined
def __Certificate(self):
header = self.p.Header()
if len(header['DataDirectory']) < 4:
return ptype.undefined
res = header['DataDirectory'][4]
offset, size = res['Address'].int(), res['Size'].int()
if offset == 0 or isinstance(self.source, ptypes.provider.memorybase):
return ptype.undefined
if isinstance(self.source, ptypes.provider.bounded) and offset < self.source.size():
return dyn.clone(parray.block, _object_=portable.headers.Certificate, blocksize=lambda self, size=size: size)
return ptype.undefined
_fields_ = [
(__Segments, 'Segments'),
(__CertificatePadding, 'CertificatePadding'),
(__Certificate, 'Certificate'),
]
@NextHeader.define
class DosExtender(pstruct.type, Header):
type = b'DX'
_fields_ = [
(word, 'MinRModeParams'),
(word, 'MaxRModeParams'),
(word, 'MinIBuffSize'),
(word, 'MaxIBuffSize'),
(word, 'NIStacks'),
(word, 'IStackSize'),
(dword, 'EndRModeOffset'),
(word, 'CallBuffSize'),
(word, 'Flags'),
(word, 'UnprivFlags'),
(dyn.block(104), 'Reserv'),
]
@NextHeader.define
class PharLap(pstruct.type, Header):
type = b'MP'
_fields_ = [
(word, 'SizeRemaind'),
(word, 'ImageSize'),
(word, 'NRelocs'),
(word, 'HeadSize'),
(word, 'MinExtraPages'),
(dword, 'ESP'),
(word, 'CheckSum'),
(dword, 'EIP'),
(word, 'FirstReloc'),
(word, 'NOverlay'),
(word, 'Reserved'),
]
class SegInfo(pstruct.type):
_fields_ = [
(word, 'Selector'),
(word, 'Flags'),
(dword, 'BaseOff'),
(dword, 'MinAlloc'),
]
class RunTimeParams(DosExtender): pass
class RepeatBlock(pstruct.type):
_fields_ = [
(word, 'Count'),
(lambda s: dyn.block(s['Count'].li.int()), 'String'),
]
@NextHeader.define
class PharLap3(PharLap, Header):
type = b'P3'
class OffsetSize(pstruct.type):
def __Offset(self):
t = getattr(self, '_object_', ptype.block)
return dyn.rpointer(lambda _: dyn.clone(t, blocksize=lambda _:self['Size'].li.int()), self.getparent(PharLap3), dword)
_fields_ = [
(__Offset, 'Offset'),
(dword, 'Size'),
]
def summary(self):
return '{:#x}:{:+#x}'.format(self['Offset'].int(), self['Size'].int())
_fields_ = [
(word, 'Level'),
(word, 'HeaderSize'),
(dword, 'FileSize'),
(word, 'CheckSum'),
(dyn.clone(OffsetSize, _object_=PharLap.RunTimeParams), 'RunTimeParams'),
(OffsetSize, 'Reloc'),
(dyn.clone(OffsetSize, _object_=dyn.clone(parray.block, _object_=PharLap.SegInfo)), 'SegInfo'),
(word, 'SegEntrySize'),
(OffsetSize, 'Image'),
(OffsetSize, 'SymTab'),
(OffsetSize, 'GDTLoc'),
(OffsetSize, 'LDTLoc'),
(OffsetSize, 'IDTLoc'),
(OffsetSize, 'TSSLoc'),
(dword, 'MinExtraPages'),
(dword, 'MaxExtraPages'),
(dword, 'Base'),
(dword, 'ESP'),
(word, 'SS'),
(dword, 'EIP'),
(word, 'CS'),
(word, 'LDT'),
(word, 'TSS'),
(word, 'Flags'),
(dword, 'MemReq'),
(dword, 'Checksum32'),
(dword, 'StackSize'),
(dyn.block(0x100), 'Reserv'),
]
@NextHeader.define
class NeHeader(pstruct.type):
type = b'NE'
class NE_Pointer(pstruct.type):
_fields_ = [
( uint16, 'Index' ),
( uint16, 'Offset' )
]
class NE_Version(pstruct.type):
_fields_ = [
( uint8, 'Minor' ),
( uint8, 'Major' )
]
_fields_ = [
( uint8, 'LinkVersion' ),
( uint8, 'LinkRevision' ),
( uint16, 'EntryOffset' ),
( uint16, 'EntryLength' ),
( uint32, 'CRC' ),
( uint8, 'ProgramFlags' ),
( uint8, 'ApplicationFlags' ),
( uint8, 'AutoDataSegmentIndex' ),
( uint16, 'HeapSize' ),
( uint16, 'StackSize' ),
( NE_Pointer, 'EntryPointer' ),
( NE_Pointer, 'StackPointer' ),
( uint16, 'SegmentCount' ),
( uint16, 'ModuleCount' ),
( uint16, 'NRNamesSize' ),
( uint16, 'SegmentOffset' ),
( uint16, 'ResourceOffset' ),
( uint16, 'RNamesOffset' ),
( uint16, 'ModuleOffset' ),
( uint16, 'ImportOffset' ),
( uint32, 'NRNamesOffset' ),
( uint16, 'MoveableEntryPointcount' ),
( uint16, 'AlignmentSize' ),
( uint16, 'ResourceCount' ),
( uint8, 'TargetOS' ),
( uint8, 'OS2_Flags' ),
( uint16, 'ReturnThunksOffset' ),
( uint16, 'SegmentThunksOffset' ),
( uint16, 'SwapMinimumSize' ),
( NE_Version, 'ExpectedVersion' )
]
### FileBase
class File(pstruct.type, ptype.boundary):
def __Padding(self):
dos = self['Header'].li
ofs = dos['e_lfarlc'].int()
return dyn.block(ofs - self.blocksize()) if ofs > 0 else dyn.block(0)
def __Relocations(self):
dos = self['Header'].li
ofs = dos['e_lfarlc'].int()
return dyn.array(Dos.Relocation, dos['e_crlc'].li.int() if ofs == self.blocksize() else 0)
def __Extra(self):
res = self['Header'].li.headersize()
if res > 0:
return dyn.block(res - self.blocksize())
return ptype.undefined
def __Stub(self):
# everything up to e_lfanew
dos = self['Header'].li
res = dos['e_lfanew'].int()
if res > 0:
return dyn.block(res - self.blocksize())
return ptype.undefined
def __Next(self):
dos = self['Header'].li
if dos['e_lfanew'].int() == self.blocksize():
return Next
return dyn.block(dos.filesize() - self.blocksize())
def __NotLoaded(self):
sz = self['Header'].blocksize()
sz+= self['Extra'].blocksize()
sz+= self['Stub'].blocksize()
sz+= self['Next'].blocksize()
if isinstance(self.source, ptypes.provider.bounded):
return dyn.block(self.source.size() - sz)
return ptype.undefined
_fields_ = [
(IMAGE_DOS_HEADER, 'Header'),
(__Extra, 'Extra'),
(__Stub, 'Stub'),
(__Next, 'Next'),
#(__NotLoaded, 'NotLoaded'),
]
if __name__ == '__main__':
import sys
import ptypes, pecoff.Executable
if len(sys.argv) == 2:
filename = sys.argv[1]
ptypes.setsource(ptypes.prov.file(filename, 'rb'))
z = pecoff.Executable.File()
z=z.l
else:
filename = 'obj/kernel32.dll'
ptypes.setsource(ptypes.prov.file(filename, 'rb'))
for x in range(10):
print(filename)
try:
z = pecoff.Executable.File()
z=z.l
break
except IOError:
pass
filename = '../'+filename
v=z['next']['header']
sections = v['Sections']
exports = v['DataDirectory'][0]
while exports['Address'].int() != 0:
exports = exports['Address'].d.l
print(exports.l)
break
imports = v['DataDirectory'][1]
while imports['Address'].int() != 0:
imports = imports['Address'].d.l
print(imports.l)
break
relo = v['DataDirectory'][5]['Address'].d.l
baseaddress = v['OptionalHeader']['ImageBase']
section = sections[0]
data = section.data().serialize()
for item in relo.filter(section):
for type, offset in item.getrelocations(section):
print(type, offset)
continue
| 34.210526 | 214 | 0.58634 | 19,728 | 0.91972 | 0 | 0 | 10,194 | 0.475245 | 0 | 0 | 6,383 | 0.297576 |
7ea13c2421a842ac77b2ae86ea0d9775b78d7084 | 955 | py | Python | test/test_utils.py | undecidedzogvisrainbowvitalispotent-360/i2plib | 6edf51cd5d21cc745aa7e23cb98c582144884fa8 | [
"MIT"
] | 25 | 2018-09-05T16:44:05.000Z | 2022-02-16T18:32:32.000Z | test/test_utils.py | undecidedzogvisvitalispotent8stars360/i2plib | 6edf51cd5d21cc745aa7e23cb98c582144884fa8 | [
"MIT"
] | 2 | 2018-10-24T19:57:16.000Z | 2019-01-26T14:30:40.000Z | test/test_utils.py | undecidedzogvisvitalispotent8stars360/i2plib | 6edf51cd5d21cc745aa7e23cb98c582144884fa8 | [
"MIT"
] | 5 | 2018-10-24T18:01:46.000Z | 2020-12-15T18:16:14.000Z | import unittest
import os
import i2plib.sam
import i2plib.utils
class TestUtils(unittest.TestCase):
def test_session_id_generation(self):
sid = i2plib.utils.generate_session_id()
self.assertEqual(len(sid), 13)
sid = i2plib.utils.generate_session_id(8)
self.assertEqual(len(sid), 15)
def test_sam_address_getter(self):
oldenv = os.environ
if "I2P_SAM_ADDRESS" in os.environ:
del os.environ["I2P_SAM_ADDRESS"]
a = i2plib.utils.get_sam_address()
self.assertEqual(a, i2plib.sam.DEFAULT_ADDRESS)
os.environ["I2P_SAM_ADDRESS"] = "127.0.0.1:11223"
a = i2plib.utils.get_sam_address()
self.assertEqual(a, ("127.0.0.1", 11223))
os.environ = oldenv
def test_port_utils(self):
p = i2plib.utils.get_free_port()
unavail_address = ("127.0.0.1", p)
self.assertFalse(i2plib.utils.is_address_accessible(unavail_address))
| 28.939394 | 77 | 0.66178 | 887 | 0.928796 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.094241 |
7ea185e9e7bf9f8a9049b8e9ecc9f23bee356df7 | 2,156 | py | Python | arm64/conditional.py | c01db33f/reil | 3deec3a3bb69aae51cc0d728d5f83156cfba2ab6 | [
"Apache-2.0"
] | 27 | 2015-03-16T13:28:00.000Z | 2021-08-02T02:58:23.000Z | arm64/conditional.py | c01db33f/pyreil | 3deec3a3bb69aae51cc0d728d5f83156cfba2ab6 | [
"Apache-2.0"
] | 2 | 2015-02-23T12:18:53.000Z | 2015-03-15T20:31:16.000Z | arm64/conditional.py | c01db33f/reil | 3deec3a3bb69aae51cc0d728d5f83156cfba2ab6 | [
"Apache-2.0"
] | 9 | 2016-03-22T18:59:12.000Z | 2022-02-05T08:18:28.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 Mark Brand - c01db33f (at) gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""reil.arm64.conditional - ARMv8 translators
This module generates REIL (reverse engineering intermediate language)
IL from ARMv8 machine code.
This file contains helpers for conditional instructions
"""
from reil.shorthand import *
from reil.utilities import *
A = 0
EQ = 1
NE = 2
HS = 3
LO = 4
MI = 5
PL = 6
VS = 7
VC = 8
HI = 9
LS = 10
GE = 11
LT = 12
GT = 13
LE = 14
AL = 15
NV = 16
def condition(ctx, cc):
# we implement as per the architecture reference manual
# TODO: optimise instead.
cb = (cc >> 1) & 0b111
if cb == 0b111:
cond = imm(1, 8)
else:
cond = ctx.tmp(8)
# evaluate base condition
if cb == 0b000:
ctx.emit( bisnz_ (r('z', 8), cond))
elif cb == 0b001:
ctx.emit( bisnz_ (r('c', 8), cond))
elif cb == 0b010:
ctx.emit( bisnz_ (r('n', 8), cond))
elif cb == 0b011:
ctx.emit( bisnz_ (r('v', 8), cond))
elif cb == 0b100:
t0 = ctx.tmp(8)
t1 = ctx.tmp(8)
ctx.emit( bisnz_ (r('c', 8), t0))
ctx.emit( bisz_ (r('z', 8), t1))
ctx.emit( and_ (t0, t1, cond))
elif cb == 0b101:
ctx.emit( equ_ (r('n', 8), r('v', 8), cond))
elif cb == 0b110:
t0 = ctx.tmp(8)
t1 = ctx.tmp(8)
ctx.emit( equ_ (r('n', 8), r('v', 8), t0))
ctx.emit( bisz_ (r('z', 8), t1))
ctx.emit( and_ (t0, t1, cond))
if cc != 0b1111 and cc & 0b1 == 1:
ctx.emit( bisz_ (cond, cond))
return cond
| 25.666667 | 77 | 0.581169 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 984 | 0.456401 |
7ea23da5c7684f08a14ea23938ae0d58c8dbefb9 | 2,830 | py | Python | open_ocr/rpc_server_base.py | jpnewman/python_ocr_api | 1063a03c273438a8df55d6296b87940bf143aa60 | [
"MIT"
] | null | null | null | open_ocr/rpc_server_base.py | jpnewman/python_ocr_api | 1063a03c273438a8df55d6296b87940bf143aa60 | [
"MIT"
] | null | null | null | open_ocr/rpc_server_base.py | jpnewman/python_ocr_api | 1063a03c273438a8df55d6296b87940bf143aa60 | [
"MIT"
] | null | null | null |
import logging
import pika
LOGGER = logging.getLogger(__name__)
class RpcServer(object):
def __init__(cls, rabbit_config, logfile=None):
cls.rabbit_config = rabbit_config
cls.logfile = logfile
def _setup_logging(cls):
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
if cls.logfile:
file_hdlr = logging.FileHandler(cls.logfile)
file_hdlr.setFormatter(formatter)
LOGGER.addHandler(file_hdlr)
LOGGER.setLevel(logging.DEBUG)
console_hdlr = logging.StreamHandler()
console_hdlr.setFormatter(formatter)
LOGGER.addHandler(console_hdlr)
console_hdlr.setLevel(logging.INFO)
def _setup_rabbitmq(cls):
parameters = pika.URLParameters(cls.rabbit_config['AmqpURI'])
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange=cls.rabbit_config['Exchange'],
exchange_type=cls.rabbit_config['ExchangeType'],
durable=cls.rabbit_config['Reliable'],
auto_delete=False,
internal=False,
arguments=None)
channel.queue_declare(queue=cls.rabbit_config['QueueName'],
durable=True)
channel.queue_bind(queue=cls.rabbit_config['QueueName'],
exchange=cls.rabbit_config['Exchange'],
routing_key=cls.rabbit_config['RoutingKey'])
channel.basic_qos(prefetch_count=1)
channel.basic_consume(cls.on_request, queue=cls.rabbit_config['QueueName'])
LOGGER.info(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
def _request_info(cls, ch, method, props, body):
LOGGER.debug("Channel: {0!r}".format(ch))
LOGGER.debug("Method: {0!r}".format(method))
LOGGER.debug("Properties: {0!r}".format(props))
LOGGER.debug("Properties: {0!r}".format(body))
def response_text(cls, ch, method, props, response):
basic_properties = pika.BasicProperties(content_type='text/plain',
delivery_mode=props.delivery_mode,
correlation_id=props.correlation_id)
ch.basic_publish(exchange='', # Default Exchange
routing_key=props.reply_to,
properties=basic_properties,
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
def run(cls):
cls._setup_logging()
cls._setup_rabbitmq()
def stop(cls):
# raise NotImplementedError()
pass
| 36.282051 | 84 | 0.591166 | 2,761 | 0.975618 | 0 | 0 | 0 | 0 | 0 | 0 | 316 | 0.111661 |
7ea28e8e4d7addcc5853c2f95be5898c08fdb331 | 1,490 | py | Python | s64da_benchmark_toolkit/dbconn.py | a-masterov/s64da-benchmark-toolkit | b1af60c6a1c7d4c62ae9fb5ce04e2c7b59002b0b | [
"MIT"
] | 15 | 2020-03-27T09:40:12.000Z | 2022-03-25T19:55:53.000Z | s64da_benchmark_toolkit/dbconn.py | a-masterov/s64da-benchmark-toolkit | b1af60c6a1c7d4c62ae9fb5ce04e2c7b59002b0b | [
"MIT"
] | 43 | 2020-03-25T10:03:11.000Z | 2021-06-02T01:15:17.000Z | s64da_benchmark_toolkit/dbconn.py | a-masterov/s64da-benchmark-toolkit | b1af60c6a1c7d4c62ae9fb5ce04e2c7b59002b0b | [
"MIT"
] | 6 | 2020-10-08T11:32:40.000Z | 2021-11-17T21:20:44.000Z |
import logging
import time
import psycopg2
from psycopg2.extras import DictCursor
LOG = logging.getLogger()
class DBConn:
def __init__(self, dsn, statement_timeout=0, num_retries=120, retry_wait=1, use_dict_cursor = False):
self.dsn = dsn
self.conn = None
self.cursor = None
self.server_side_cursor = None
self.statement_timeout = statement_timeout
self.num_retries = num_retries
self.retry_wait = retry_wait
self.use_dict_cursor = use_dict_cursor
def __enter__(self):
options = f'-c statement_timeout={self.statement_timeout}'
trial = 0
while trial < self.num_retries:
try:
self.conn = psycopg2.connect(self.dsn, options=options)
self.conn.autocommit = True
self.cursor = self.conn.cursor(cursor_factory = DictCursor if self.use_dict_cursor else None)
self.server_side_cursor = self.conn.cursor('server-side-cursor')
break
except psycopg2.Error as exc:
LOG.info(f'Cannot connect to DB. Retrying. Error: {exc}')
trial += 1
time.sleep(self.retry_wait)
assert self.conn, 'There is no connection.'
assert self.cursor, 'There is no cursor.'
assert self.server_side_cursor, 'There is no server-side cursor.'
return self
def __exit__(self, *args):
self.cursor.close()
self.conn.close()
| 31.702128 | 109 | 0.624832 | 1,376 | 0.92349 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.130201 |
7ea4d785d48817245d9b222be270587ab13b327a | 436 | py | Python | examples/test_pos_boot_normal.py | zmdismai/tcf | 3903e0a2f444c3aa14647a5147a0df76a49e4195 | [
"Apache-2.0"
] | null | null | null | examples/test_pos_boot_normal.py | zmdismai/tcf | 3903e0a2f444c3aa14647a5147a0df76a49e4195 | [
"Apache-2.0"
] | null | null | null | examples/test_pos_boot_normal.py | zmdismai/tcf | 3903e0a2f444c3aa14647a5147a0df76a49e4195 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
import tcfl.tc
import tcfl.tl
import tcfl.pos
@tcfl.tc.interconnect("ipv4_addr")
@tcfl.tc.target('pos_capable')
class _test(tcfl.tc.tc_c):
"""
Boot a target to the provisioned OS (not Provisioning OS)
"""
def eval(self, ic, target):
ic.power.on()
target.pos.boot_normal()
target.shell.up(user = 'root')
def teardown(self):
tcfl.tl.console_dump_on_failure(self)
| 20.761905 | 61 | 0.646789 | 302 | 0.692661 | 0 | 0 | 368 | 0.844037 | 0 | 0 | 121 | 0.277523 |
7ea4f26b832f9d8e48b3a6b69e37f65806d63b1b | 1,367 | py | Python | ImGen.py | ibaranov-cp/ImGen_Circuit_Maker | 50c328ec0f62a77908dc3179de8fbf123f38caec | [
"BSD-3-Clause"
] | null | null | null | ImGen.py | ibaranov-cp/ImGen_Circuit_Maker | 50c328ec0f62a77908dc3179de8fbf123f38caec | [
"BSD-3-Clause"
] | null | null | null | ImGen.py | ibaranov-cp/ImGen_Circuit_Maker | 50c328ec0f62a77908dc3179de8fbf123f38caec | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#BSD 3-Clause License
#Copyright (c) 2017, Ilia Baranov
#############################################
# CHANGE THESE VARS AS NEEDED
size = 10 #size of squares in mils
invert = False #Color invert the image
image_name = "test.png" #name of the image, can be BMP, PNG or JPG
#############################################
from PIL import Image
import numpy as np
im = Image.open(image_name)
im.load()
im = im.convert('1')
pixels = list(im.getdata())
width, height = im.size
pixels = [pixels[i * width:(i + 1) * width] for i in xrange(height)]
print height, width
def format_csv(i,x,y):
cv.write("\""+str(i)+"\",")
cv.write("\""+str(x*size)+"\",")
cv.write("\""+str((height - y - 1) *size)+"\",")
cv.write("\"\"\n")
with open(image_name[:-3]+"csv", 'w') as cv:
cv.write("\"Index\",\"X (mil)\",\"Y (mil)\",\"Arc Angle (Neg = CW)\"\n")
cv.write("\"0\",\"0\",\"0\",\"\"\n")
i = 1
comp = 0
if (invert): comp = 255
for y in range (0,height):
#print pixels[:][y] #For Debugging
for x in range (0,width):
if (pixels[y][x] == comp):
format_csv(i,x,y)
i+=1
format_csv(i,x,y-1)
i+=1
format_csv(i,x+1,y-1)
i+=1
format_csv(i,x+1,y)
i+=1
format_csv(i,x,y)
i+=1
cv.write("\""+str(i)+"\",")
cv.write("\"0\",\"0\",\"\"\n")
i+=1
| 23.982456 | 74 | 0.504755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.365764 |
7ea621576562b5fb32452a65b61e929a0460d182 | 13,564 | py | Python | ckan/tests/functional/test_publisher_auth.py | dadosgovbr/ckan | f2ed9a03581c3229cf01209baa9a02a71642e0b6 | [
"BSD-3-Clause"
] | 2 | 2015-11-05T12:04:52.000Z | 2017-08-09T11:29:11.000Z | ckan/tests/functional/test_publisher_auth.py | dadosgovbr/ckan | f2ed9a03581c3229cf01209baa9a02a71642e0b6 | [
"BSD-3-Clause"
] | null | null | null | ckan/tests/functional/test_publisher_auth.py | dadosgovbr/ckan | f2ed9a03581c3229cf01209baa9a02a71642e0b6 | [
"BSD-3-Clause"
] | null | null | null | import re
from nose.tools import assert_equal
import ckan.model as model
from ckan.lib.create_test_data import CreateTestData
from ckan.logic import NotAuthorized
from ckan.tests import *
from ckan.tests import setup_test_search_index
from base import FunctionalTestCase
from ckan.tests import search_related, is_search_supported
class TestPublisherGroups(FunctionalTestCase):
@classmethod
def setup_class(self):
from ckan.tests.mock_publisher_auth import MockPublisherAuth
self.auth = MockPublisherAuth()
model.Session.remove()
CreateTestData.create(auth_profile='publisher')
self.groupname = u'david'
self.packagename = u'testpkg'
model.repo.new_revision()
model.Session.add(model.Package(name=self.packagename))
model.repo.commit_and_remove()
@classmethod
def teardown_class(self):
model.Session.remove()
model.repo.rebuild_db()
model.Session.remove()
def _run_fail_test( self, username, action):
grp = model.Group.by_name(self.groupname)
context = { 'group': grp, 'model': model, 'user': username }
try:
self.auth.check_access(action,context, {})
assert False, "The user should not have access"
except NotAuthorized, e:
pass
def _run_success_test( self, username, action):
userobj = model.User.get(username)
grp = model.Group.by_name(self.groupname)
f = model.User.get_groups
def gg(*args, **kwargs):
return [grp]
model.User.get_groups = gg
context = { 'group': grp, 'model': model, 'user': username }
try:
self.auth.check_access(action, context, {})
except NotAuthorized, e:
assert False, "The user should have %s access: %r." % (action, e.extra_msg)
model.User.get_groups = f
def test_new_success(self):
self._run_success_test( 'russianfan', 'group_create' )
def test_new_fail(self):
self._run_fail_test( 'russianfan', 'group_create' )
def test_new_anon_fail(self):
self._run_fail_test( '', 'group_create' )
def test_new_unknown_fail(self):
self._run_fail_test( 'nosuchuser', 'group_create' )
def test_edit_success(self):
""" Success because user in group """
self._run_success_test( 'russianfan', 'group_update' )
def test_edit_fail(self):
""" Fail because user not in group """
self._run_fail_test( 'russianfan', 'group_update' )
def test_edit_anon_fail(self):
""" Fail because user is anon """
self._run_fail_test( '', 'group_update' )
def test_edit_unknown_fail(self):
self._run_fail_test( 'nosuchuser', 'group_update' )
def test_delete_success(self):
""" Success because user in group """
self._run_success_test( 'russianfan', 'group_delete' )
def test_delete_fail(self):
""" Fail because user not in group """
self._run_fail_test( 'russianfan', 'group_delete' )
def test_delete_anon_fail(self):
""" Fail because user is anon """
self._run_fail_test( '', 'group_delete' )
def test_delete_unknown_fail(self):
self._run_fail_test( 'nosuchuser', 'group_delete' )
class TestPublisherShow(FunctionalTestCase):
@classmethod
def setup_class(self):
from ckan.tests.mock_publisher_auth import MockPublisherAuth
self.auth = MockPublisherAuth()
model.Session.remove()
CreateTestData.create(auth_profile='publisher')
self.groupname = u'david'
self.packagename = u'testpkg'
model.repo.new_revision()
model.Session.add(model.Package(name=self.packagename))
model.repo.commit_and_remove()
@classmethod
def teardown_class(self):
model.Session.remove()
model.repo.rebuild_db()
model.Session.remove()
def test_package_show_deleted_success(self):
userobj = model.User.get('russianfan')
grp = model.Group.by_name(self.groupname)
pkg = model.Package.by_name(self.packagename)
pkg.state = 'deleted'
f = model.User.get_groups
g = model.Package.get_groups
def gg(*args, **kwargs):
return [grp]
model.User.get_groups = gg
model.Package.get_groups = gg
context = { 'package': pkg, 'model': model, 'user': userobj.name }
try:
self.auth.check_access('package_show', context, {})
except NotAuthorized, e:
assert False, "The user should have %s access: %r." % (action, e.extra_msg)
model.User.get_groups = f
model.Package.get_groups = g
pkg.state = "active"
def test_package_show_normal_success(self):
userobj = model.User.get('russianfan')
grp = model.Group.by_name(self.groupname)
pkg = model.Package.by_name(self.packagename)
pkg.state = "active"
context = { 'package': pkg, 'model': model, 'user': userobj.name }
try:
self.auth.check_access('package_show', context, {})
except NotAuthorized, e:
assert False, "The user should have %s access: %r." % ("package_show", e.extra_msg)
def test_package_show_deleted_fail(self):
userobj = model.User.get('russianfan')
grp = model.Group.by_name(self.groupname)
pkg = model.Package.by_name(self.packagename)
pkg.state = 'deleted'
g = model.Package.get_groups
def gg(*args, **kwargs):
return [grp]
model.Package.get_groups = gg
context = { 'package': pkg, 'model': model, 'user': userobj.name }
try:
self.auth.check_access('package_show', context, {})
assert False, "The user should not have access."
except NotAuthorized, e:
pass
model.Package.get_groups = g
pkg.state = "active"
class TestPublisherGroupPackages(FunctionalTestCase):
@classmethod
def setup_class(self):
from ckan.tests.mock_publisher_auth import MockPublisherAuth
self.auth = MockPublisherAuth()
model.Session.remove()
CreateTestData.create(auth_profile='publisher')
self.groupname = u'david'
self.packagename = u'testpkg'
model.repo.new_revision()
model.Session.add(model.Package(name=self.packagename))
model.repo.commit_and_remove()
@classmethod
def teardown_class(self):
model.Session.remove()
model.repo.rebuild_db()
model.Session.remove()
def _run_fail_test( self, username, action):
pkg = model.Package.by_name(self.packagename)
context = { 'package': pkg, 'model': model, 'user': username }
try:
self.auth.check_access(action, context, {})
assert False, "The user should not have access"
except NotAuthorized, e:
pass
def _run_success_test( self, username, action):
userobj = model.User.get(username)
grp = model.Group.by_name(self.groupname)
pkg = model.Package.by_name(self.packagename)
f = model.User.get_groups
g = model.Package.get_groups
def gg(*args, **kwargs):
return [grp]
model.User.get_groups = gg
model.Package.get_groups = gg
context = { 'package': pkg, 'model': model, 'user': username }
try:
self.auth.check_access(action, context, {})
except NotAuthorized, e:
assert False, "The user should have %s access: %r." % (action, e.extra_msg)
model.User.get_groups = f
model.Package.get_groups = g
def test_new_success(self):
self._run_success_test( 'russianfan', 'package_create' )
# Currently valid to have any logged in user succeed
#def test_new_fail(self):
# self._run_fail_test( 'russianfan', 'package_create' )
def test_new_anon_fail(self):
self._run_fail_test( '', 'package_create' )
def test_new_unknown_fail(self):
self._run_fail_test( 'nosuchuser', 'package_create' )
def test_edit_success(self):
""" Success because user in group """
self._run_success_test( 'russianfan', 'package_update' )
def test_edit_fail(self):
""" Fail because user not in group """
self._run_fail_test( 'russianfan', 'package_update' )
def test_edit_anon_fail(self):
""" Fail because user is anon """
self._run_fail_test( '', 'package_update' )
def test_edit_unknown_fail(self):
self._run_fail_test( 'nosuchuser', 'package_update' )
def test_delete_success(self):
""" Success because user in group """
self._run_success_test( 'russianfan', 'package_delete' )
def test_delete_fail(self):
""" Fail because user not in group """
self._run_fail_test( 'russianfan', 'package_delete' )
def test_delete_anon_fail(self):
""" Fail because user is anon """
self._run_fail_test( '', 'package_delete' )
def test_delete_unknown_fail(self):
self._run_fail_test( 'nosuchuser', 'package_delete' )
class TestPublisherPackageRelationships(FunctionalTestCase):
@classmethod
def setup_class(self):
from ckan.tests.mock_publisher_auth import MockPublisherAuth
self.auth = MockPublisherAuth()
model.Session.remove()
CreateTestData.create(auth_profile='publisher')
self.groupname = u'david'
self.package1name = u'testpkg'
self.package2name = u'testpkg2'
model.repo.new_revision()
pkg1 = model.Package(name=self.package1name)
pkg2 = model.Package(name=self.package2name)
model.Session.add( pkg1 )
model.Session.add( pkg2 )
model.Session.flush()
pkg1 = model.Package.by_name(self.package1name)
pkg2 = model.Package.by_name(self.package2name)
self.rel = model.PackageRelationship(name="test", type='depends_on')
self.rel.subject = pkg1
self.rel.object = pkg2
model.Session.add( self.rel )
model.repo.commit_and_remove()
@classmethod
def teardown_class(self):
model.Session.remove()
model.repo.rebuild_db()
model.Session.remove()
def test_create_fail_user( self):
p1 = model.Package.by_name( self.package1name )
p2 = model.Package.by_name( self.package2name )
context = { 'model': model, 'user': 'russianfan' }
try:
self.auth.check_access('package_relationship_create', context, {'id': p1.id, 'id2': p2.id})
assert False, "The user should not have access."
except NotAuthorized, e:
pass
def test_create_fail_ddict( self):
p1 = model.Package.by_name( self.package1name )
p2 = model.Package.by_name( self.package2name )
context = { 'model': model, 'user': 'russianfan' }
try:
self.auth.check_access('package_relationship_create', context, {'id': p1.id})
assert False, "The user should not have access."
except NotAuthorized, e:
pass
try:
self.auth.check_access('package_relationship_create', context, {'id2': p2.id})
assert False, "The user should not have access."
except NotAuthorized, e:
pass
def test_create_success(self):
userobj = model.User.get('russianfan')
f = model.User.get_groups
g = model.Package.get_groups
def gg(*args, **kwargs):
return ['test_group']
model.User.get_groups = gg
model.Package.get_groups = gg
p1 = model.Package.by_name( self.package1name )
p2 = model.Package.by_name( self.package2name )
context = { 'model': model, 'user': 'russianfan' }
try:
self.auth.check_access('package_relationship_create', context, {'id': p1.id, 'id2': p2.id})
except NotAuthorized, e:
assert False, "The user should have %s access: %r." % (action, e.extra_msg)
model.User.get_groups = f
model.Package.get_groups = g
def test_delete_success(self):
userobj = model.User.get('russianfan')
f = model.User.get_groups
g = model.Package.get_groups
def gg(*args, **kwargs):
return ['test_group']
model.User.get_groups = gg
model.Package.get_groups = gg
p1 = model.Package.by_name( self.package1name )
p2 = model.Package.by_name( self.package2name )
context = { 'model': model, 'user': 'russianfan', 'relationship': self.rel }
try:
self.auth.check_access('package_relationship_delete', context, {'id': p1.id, 'id2': p2.id })
except NotAuthorized, e:
assert False, "The user should have %s access: %r." % ('package_relationship_delete', e.extra_msg)
model.User.get_groups = f
model.Package.get_groups = g
| 35.60105 | 122 | 0.603804 | 13,158 | 0.970068 | 0 | 0 | 2,902 | 0.213949 | 0 | 0 | 2,356 | 0.173695 |
7ea6762aae019197866e6971cb35189f112a85c4 | 451 | py | Python | py/__main__.py | social-learning/data-structures | 84e9996150d14d7008e9d9d3cf37dcb586266802 | [
"MIT"
] | 2 | 2021-05-07T18:57:21.000Z | 2021-05-09T18:19:32.000Z | py/__main__.py | social-learning/data-structures | 84e9996150d14d7008e9d9d3cf37dcb586266802 | [
"MIT"
] | 3 | 2021-05-06T15:29:53.000Z | 2021-05-07T17:03:22.000Z | py/__main__.py | social-learning/data-structures | 84e9996150d14d7008e9d9d3cf37dcb586266802 | [
"MIT"
] | null | null | null | import src.data_structures.heap
from src.algorithms.misc import powerfulIntegers
class Solution:
def balancedStringSplit(self, s: str) -> int:
bal = 0
stack = []
for c in s:
if c not in stack:
stack.append(c)
else:
stack.pop()
bal += 1
return bal
if __name__ == "__main__":
x = powerfulIntegers(2,3,10)
print(x)
else:
print("File one executed when imported") | 21.47619 | 48 | 0.585366 | 246 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.095344 |
7ea6ac2af6bb95e81265bf6707a8fafbd55ae5e0 | 1,121 | py | Python | src/EDA/cleanAcceptedLoans.py | simon555/cs109-FinalProject | 4a7c7b10c02150400fedb36a6c59da0ee06bbcce | [
"MIT"
] | 2 | 2018-11-23T07:12:23.000Z | 2018-11-27T22:13:51.000Z | src/EDA/cleanAcceptedLoans.py | simon555/cs109-FinalProject | 4a7c7b10c02150400fedb36a6c59da0ee06bbcce | [
"MIT"
] | null | null | null | src/EDA/cleanAcceptedLoans.py | simon555/cs109-FinalProject | 4a7c7b10c02150400fedb36a6c59da0ee06bbcce | [
"MIT"
] | 1 | 2018-11-26T17:10:18.000Z | 2018-11-26T17:10:18.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 28 21:56:22 2018
@author: simon
"""
from src.EDA.clean_object import cleanObject
from src.EDA.clean_numeric import cleanNumeric
import pandas as pd
def cleanForDemo(filename):
"""
input : location of the file
output : fully cleaned dataset
the for demo means that for now we will only have 2 labels in loan_status, payed or not payed (yet)
"""
data = pd.read_csv(filename)
data=cleanObject(data)
data=cleanNumeric(data)
data=data.dropna()
not_payed_indexes = [1,2,3,5]
payed = [0]
def adapt_loan_status(label):
if label in not_payed_indexes:
return(0)
elif label in payed:
return(1)
else:
return(label)
data['loan_status'] = data['loan_status'].apply(adapt_loan_status)#
data = data[data['loan_status']!=4]
return(data)
if __name__=='__main__':
INPUTFILE='../../data/smallData/rawData/AcceptedLoans.csv'
OUTPUTFILE = '../../data/tmp/demoClean.csv'
data=cleanForDemo(INPUTFILE) | 21.557692 | 103 | 0.617306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.367529 |
7ea6b0f17a7cddaf370fa292780a167ed669fcc8 | 318 | py | Python | answers/VanshBaijal/Day 6/Question1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 22 | 2021-03-16T14:07:47.000Z | 2021-08-13T08:52:50.000Z | answers/VanshBaijal/Day 6/Question1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 174 | 2021-03-16T21:16:40.000Z | 2021-06-12T05:19:51.000Z | answers/VanshBaijal/Day 6/Question1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 135 | 2021-03-16T16:47:12.000Z | 2021-06-27T14:22:38.000Z | Candies = [int(x) for x in input("Enter the numbers with space: ").split()]
extraCandies=int(input("Enter the number of extra candies: "))
Output=[ ]
i=0
while(i<len(Candies)):
if(Candies[i]+extraCandies>=max(Candies)):
Output.append("True")
else:
Output.append("False")
i+=1
print(Output)
| 26.5 | 75 | 0.644654 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.257862 |
7ea8f23726760ea2f3958ff9164e22aa48175835 | 2,031 | py | Python | i2c/motion_sensor.py | Matrix-Robotics/MatrixControl | 0e596be195b2324154bc6ec220172863e311ccb7 | [
"MIT"
] | 1 | 2021-07-15T07:33:43.000Z | 2021-07-15T07:33:43.000Z | i2c/motion_sensor.py | Matrix-Robotics/MatrixControl | 0e596be195b2324154bc6ec220172863e311ccb7 | [
"MIT"
] | 1 | 2021-08-12T09:08:10.000Z | 2021-08-12T09:08:10.000Z | i2c/motion_sensor.py | Matrix-Robotics/MatrixControl | 0e596be195b2324154bc6ec220172863e311ccb7 | [
"MIT"
] | null | null | null | class MotionSensor:
"""Get 9Dof data by using MotionSensor.
See [MatrixMotionSensor](https://matrix-robotics.github.io/MatrixMotionSensor/) for more details.
Parameters
----------
i2c_port : int
i2c_port is corresponding with I2C1, I2C2 ... sockets on board.
_dev : class
MatrixControl.Device class
"""
def __init__(self, _dev, i2c_port):
self.i2c_port = i2c_port
self._dev = _dev
def _complement(self, _buff):
if len(str(_buff)) > 1:
if _buff > 32767:
_buff -= 65536
return _buff
def getAccel(self, axis):
"""Get Accel Data. (unit: mm/s^2)
Parameters
----------
axis : str
options are "X", "Y" or "Z"
"""
_buff = "I2C{}_GETACCEL_{}".format(self.i2c_port, axis.upper())
self._dev._sendbuff(self._dev.protocol[_buff])
self._dev._readbuff()
return self._complement(self._dev._rxbuff)
def getGyro(self, axis):
"""Get Gyro Data. (unit: degree per second)
Parameters
----------
axis : str
options are "X", "Y" or "Z"
"""
_buff = "I2C{}_GETGYRO_{}".format(self.i2c_port, axis.upper())
self._dev._sendbuff(self._dev.protocol[_buff])
self._dev._readbuff()
return self._complement(self._dev._rxbuff)
def getRoll(self):
_buff = "I2C{}_GETROLL".format(self.i2c_port)
self._dev._sendbuff(self._dev.protocol[_buff])
self._dev._readbuff()
return self._complement(self._dev._rxbuff)
def getYaw(self):
_buff = "I2C{}_GETYAW".format(self.i2c_port)
self._dev._sendbuff(self._dev.protocol[_buff])
self._dev._readbuff()
return self._complement(self._dev._rxbuff)
def getPitch(self):
_buff = "I2C{}_GETPITCH".format(self.i2c_port)
self._dev._sendbuff(self._dev.protocol[_buff])
self._dev._readbuff()
return self._complement(self._dev._rxbuff)
| 29.867647 | 101 | 0.589857 | 2,029 | 0.999015 | 0 | 0 | 0 | 0 | 0 | 0 | 702 | 0.345643 |
7ea9cd90fdf10c43d54fa675ea898cdb76225e1f | 5,855 | py | Python | tests/test_search.py | omaralvarez/trakt.py | 93a6beb73cdd37ffb354d2e9c1892dc39d9c4baf | [
"MIT"
] | null | null | null | tests/test_search.py | omaralvarez/trakt.py | 93a6beb73cdd37ffb354d2e9c1892dc39d9c4baf | [
"MIT"
] | null | null | null | tests/test_search.py | omaralvarez/trakt.py | 93a6beb73cdd37ffb354d2e9c1892dc39d9c4baf | [
"MIT"
] | null | null | null | from tests.core.helpers import read
from six.moves.urllib_parse import urlparse, parse_qsl
from trakt import Trakt
import responses
def search_callback(request):
url = urlparse(request.url)
query = dict(parse_qsl(url.query))
if 'id' in query and 'id_type' in query:
path = 'fixtures/search/lookup/%s/%s.json' % (
query.get('id_type'),
query.get('id')
)
else:
path = 'fixtures/search/query/%s/%s/%s.json' % (
query.get('type', 'all'),
query.get('year', 'all'),
query.get('query')
)
try:
content = read(path)
return 200, {}, content
except:
return 200, {}, '[]'
@responses.activate
def test_lookup_movie():
responses.add_callback(
responses.GET, 'http://mock/search',
callback=search_callback,
content_type='application/json'
)
Trakt.base_url = 'http://mock'
movie = Trakt['search'].lookup('tt0848228', 'imdb')
assert movie.keys == [
('imdb', 'tt0848228'),
('tmdb', '24428'),
('slug', 'the-avengers-2012'),
('trakt', '14701')
]
assert movie.title == "The Avengers"
assert movie.year == 2012
assert sorted(movie.images.keys()) == ['fanart', 'poster']
assert movie.overview is not None
assert movie.score is None
@responses.activate
def test_lookup_show():
responses.add_callback(
responses.GET, 'http://mock/search',
callback=search_callback,
content_type='application/json'
)
Trakt.base_url = 'http://mock'
show = Trakt['search'].lookup('tt0903747', 'imdb')
assert show.keys == [
('tvdb', '81189'),
('tmdb', '1396'),
('imdb', 'tt0903747'),
('tvrage', '18164'),
('slug', 'breaking-bad'),
('trakt', '1388')
]
assert show.title == "Breaking Bad"
assert show.year == 2008
assert sorted(show.images.keys()) == ['fanart', 'poster']
assert show.overview is not None
assert show.score is None
@responses.activate
def test_lookup_episode():
responses.add_callback(
responses.GET, 'http://mock/search',
callback=search_callback,
content_type='application/json'
)
Trakt.base_url = 'http://mock'
episode = Trakt['search'].lookup('tt0959621', 'imdb')
assert episode.keys == [
(1, 1),
('tvdb', '349232'),
('tmdb', '62085'),
('imdb', 'tt0959621'),
('tvrage', '637041'),
('trakt', '73482')
]
assert episode.title == "Pilot"
assert sorted(episode.images.keys()) == ['screenshot']
assert episode.overview is not None
assert episode.score is None
assert episode.show.keys == [
('slug', 'breaking-bad'),
('trakt', '1388')
]
assert episode.show.title == "Breaking Bad"
assert episode.show.year == 2008
assert sorted(episode.show.images.keys()) == ['fanart', 'poster']
@responses.activate
def test_query_movie():
responses.add_callback(
responses.GET, 'http://mock/search',
callback=search_callback,
content_type='application/json'
)
Trakt.base_url = 'http://mock'
movies = Trakt['search'].query('The Avengers', 'movie')
assert [(m.score, (m.title, m.year)) for m in movies] == [
(77.052734, ('Avenged', None)),
(77.0176, ('Avenger', 2006)),
(60.26589, ('The Avengers', 2012)),
(60.26589, ('The Avengers', 1998)),
(60.26589, ('The Avenger', 1960)),
(60.26589, ('The Avenger', 1931)),
(60.26589, ('The Avenging', 1982)),
(60.26589, ('The Avenger', 1947)),
(55.793285, ('Invisible Avenger', 1954)),
(55.652233, ('Crippled Avengers', 1978))
]
@responses.activate
def test_query_show():
responses.add_callback(
responses.GET, 'http://mock/search',
callback=search_callback,
content_type='application/json'
)
Trakt.base_url = 'http://mock'
shows = Trakt['search'].query('Breaking Bad', 'show')
assert [(s.score, (s.title, s.year)) for s in shows] == [
(54.809517, ('Breaking Bad', 2008)),
(28.975079, ('Breaking Boston', 2014)),
(28.89469, ('Breaking In', 2011)),
(26.28082, ('Talking Bad', 2013)),
(20.346865, ('Donal MacIntyre: Breaking Crime', 2015)),
(18.345793, ('Good Times, Bad Times', 1990)),
(1.1290063, ('What About Brian', 2006)),
(0.39297247, ('It Could Be Worse', 2013)),
(0.39297247, ('Murder Police', None)),
(0.3438509, ('Pinocchio', 2014))
]
@responses.activate
def test_query_episode():
responses.add_callback(
responses.GET, 'http://mock/search',
callback=search_callback,
content_type='application/json'
)
Trakt.base_url = 'http://mock'
episodes = Trakt['search'].query('Breaking Bad', 'episode')
assert [(e.score, (e.pk, e.title), (e.show.title, e.show.year)) for e in episodes] == [
(77.16374, ((2, 13), 'Bad Breaks'), ('Burn Notice', 2007)),
(77.16374, ((1, 1), 'Breaking Bad'), ("The Writers' Room", 2013)),
(77.16374, ((2, 18), 'Breaking Bad'), ('Honest Trailers', 2012)),
(77.16374, ((4, 7), 'Bad Break'), ('Bad Girls Club', 2006)),
(77.16374, ((1, 8), 'Bad Break'), ('Miami Ink', 2005)),
(77.16374, ((2, 6), 'Breaking Bad'), ('Pawn Stars UK', 2013)),
(77.16374, ((6, 16), 'Bad Breaks'), ('Trapper John, M.D.', 1979)),
(77.16374, ((3, 8), 'Breaking Bad'), ('Bad Days', 1969)),
(77.16374, ((1, 261), 'Breaking Bad'), ('The Totally Rad Show', 2007)),
(77.16374, ((1, 2), ' Breaking Bad'), ('Fight Factory', 2012))
]
| 29.129353 | 91 | 0.554398 | 0 | 0 | 0 | 0 | 5,132 | 0.876516 | 0 | 0 | 1,577 | 0.269342 |
7ea9f178e2418d2c7e8986651448126d115fa230 | 4,828 | py | Python | test.py | XiaoyongNI/hybrid-inference | c268e1ada019e08f62e3f02fc6d5059130ec5358 | [
"MIT"
] | null | null | null | test.py | XiaoyongNI/hybrid-inference | c268e1ada019e08f62e3f02fc6d5059130ec5358 | [
"MIT"
] | null | null | null | test.py | XiaoyongNI/hybrid-inference | c268e1ada019e08f62e3f02fc6d5059130ec5358 | [
"MIT"
] | null | null | null | from utils import generic_utils as g_utils
import torch
import evaluation as eval
import torch.nn.functional as F
import losses
from datasets import nclt
from datasets import synthetic
from datasets import lorenz
import numpy as np
from utils import generic_utils as g_utils
import time
def test_kalman(args, model, test_loader, plots=False, nclt_ds=False):
test_loss = 0
for state, meas, x_0, P_0 in test_loader:
batch_size = state.size()[0]
for i in range(batch_size):
state_np = state.numpy()[i, :, :]
meas_np = meas.numpy()[i, :, :]
x_0_np = x_0.numpy()[i, :]
P_0_np = P_0.numpy()[i, :]
# g_utils.plot_trajectory(state, meas)
est_state, est_cov = model.forward(meas_np, x_0_np, P_0_np)
if nclt_ds:
if plots:
nclt.plot_trajecotry([state_np, meas_np])
nclt.plot_trajecotry([state_np, g_utils.state2position(est_state)])
sample_loss = eval.mse(state_np, g_utils.state2position(est_state))
else:
if plots:
g_utils.plot_prediction(state_np, meas_np, est_state, est_cov)
sample_loss = eval.mse(g_utils.state2position(state_np), g_utils.state2position(est_state))
test_loss += sample_loss
test_loss /= len(test_loader.dataset)
print('%s set: Average loss: {:.4f}, Num samples: {}\n'.format(test_loader.dataset.partition,
test_loss, len(test_loader.dataset)))
return test_loss
def test_kalman_nclt(model, test_loader, plots=False):
test_loss = 0
for _, state, meas, x_0, P_0, _ in test_loader:
batch_size = state.size()[0]
for i in range(batch_size):
state_np = state.numpy()[i, :, :]
meas_np = meas.numpy()[i, :, :]
x_0_np = x_0.numpy()[i, :]
P_0_np = P_0.numpy()[i, :]
# g_utils.plot_trajectory(state, meas)
est_state, est_cov = model.forward(meas_np, x_0_np, P_0_np)
if plots:
synthetic.plot_trajecotry([state_np, g_utils.state2position(est_state)])
sample_loss = eval.mse(state_np, g_utils.state2position(est_state), normalize=False)
test_loss += sample_loss
test_loss /= test_loader.dataset.total_len()
print('{} set: Average loss: {:.4f}, Num samples: {}\n'.format(test_loader.dataset.partition,
test_loss, len(test_loader.dataset)))
test_loss_dB = 10 * np.log10(test_loss)
print("MSE LOSS:", test_loss_dB, "[dB]")
return test_loss
def test_kalman_lorenz(args, model, test_loader, plots=False):
test_loss = 0
for _, state, meas, x_0, P_0, _ in test_loader:
batch_size = state.size()[0]
for i in range(batch_size):
state_np = state.numpy()[i, :, :]
meas_np = meas.numpy()[i, :, :]
x_0_np = x_0.numpy()[i, :]
P_0_np = P_0.numpy()[i, :]
est_state = model.forward(meas_np)
sample_loss = eval.mse(state_np, est_state, normalize=False)
test_loss += sample_loss
test_loss /= test_loader.dataset.total_len()
if plots:
lorenz.plot_trajectory(args, est_state, test_loss)
print('{} set: Average loss: {:.4f}, Num samples: {}\n'.format(test_loader.dataset.partition,
test_loss, len(test_loader.dataset)))
test_loss_dB = 10 * np.log10(test_loss)
print("MSE LOSS:", test_loss_dB, "[dB]")
return test_loss
def test_gnn_kalman(args, net, device, loader, plots=False, plot_lorenz=False):
net.eval()
test_loss = 0
test_mse = 0
start = time.time()
with torch.no_grad():
for batch_idx, (ts, position, meas, x0, P0, operators) in enumerate(loader):
position, meas, x0 = position.to(device), meas.to(device), x0.to(device)
operators = g_utils.operators2device(operators, device)
outputs = net([operators, meas], x0, args.K, ts=ts)
test_mse += F.mse_loss(outputs[-1], position) * meas.size()[0] * meas.size()[1]
test_loss += losses.mse_arr_loss(outputs, position) * meas.size()[0] * meas.size()[1]
test_mse /= loader.dataset.total_len() + 1e-10
test_loss /= loader.dataset.total_len() + 1e-10
end = time.time()
t = end - start
if plot_lorenz:
lorenz.plot_trajectory(args, outputs[-1][0].cpu().numpy(), test_mse)
print('\t{} set: Loss: {:.4f}, MSE: {:.4f}, Len {}'.format(loader.dataset.partition,
test_loss, test_mse, len(loader.dataset)))
try:
test_mse_dB = 10 * np.log10(test_mse.cpu().numpy())
except:
test_mse_dB = 10 * np.log10(test_mse)
print("MSE LOSS:", test_mse_dB, "[dB]")
# Print Run Time
print("Inference Time:", t)
return test_mse
| 36.854962 | 107 | 0.615369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 352 | 0.072908 |
7eab2e9233141229915ea54e3a9ad3980ec8ae2f | 1,418 | py | Python | nlt/debug/dataset.py | isabella232/neural-light-transport | 04acc39713be464e74e0393315c71416f67a3ef3 | [
"Apache-2.0"
] | 176 | 2020-08-13T02:31:07.000Z | 2022-03-24T05:50:55.000Z | nlt/debug/dataset.py | isabella232/neural-light-transport | 04acc39713be464e74e0393315c71416f67a3ef3 | [
"Apache-2.0"
] | 7 | 2020-11-16T05:07:08.000Z | 2022-02-07T04:19:44.000Z | nlt/debug/dataset.py | isabella232/neural-light-transport | 04acc39713be464e74e0393315c71416f67a3ef3 | [
"Apache-2.0"
] | 30 | 2020-08-13T07:03:34.000Z | 2022-03-23T18:55:26.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from os.path import join, dirname
from absl import app
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
sys.path.append('../')
import datasets
from util import io as ioutil
def main(_):
config_ini = join(dirname(__file__), '..', 'config', 'dragon_specular.ini')
config = ioutil.read_config(config_ini)
# Make training dataset
dataset_name = config.get('DEFAULT', 'dataset')
Dataset = datasets.get_dataset_class(dataset_name)
dataset = Dataset(config, 'train')
path = dataset.files[1]
ret = dataset._load_data(path)
# Iterate
no_batch = config.getboolean('DEFAULT', 'no_batch')
datapipe = dataset.build_pipeline(no_batch=no_batch)
for batch_i, batch in enumerate(datapipe):
from IPython import embed; embed()
if __name__ == '__main__':
app.run(main)
| 29.541667 | 79 | 0.727786 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 686 | 0.48378 |
7eab677ffe16ca51d58be0908854da05e1f26c5f | 1,928 | py | Python | safirnotification/api.py | byrxiaochun/safir_notification_service | adc7e3ab2b61906480e50264bab0cccd47483189 | [
"Apache-2.0"
] | 1 | 2017-07-14T07:36:22.000Z | 2017-07-14T07:36:22.000Z | safirnotification/api.py | byrxiaochun/safir_notification_service | adc7e3ab2b61906480e50264bab0cccd47483189 | [
"Apache-2.0"
] | null | null | null | safirnotification/api.py | byrxiaochun/safir_notification_service | adc7e3ab2b61906480e50264bab0cccd47483189 | [
"Apache-2.0"
] | 2 | 2017-10-21T12:16:18.000Z | 2019-07-19T05:31:07.000Z | from __future__ import print_function
from flask import Flask
from flask import request
import argparse
import json
import sys
from safirnotification.alarm.alarm_handler import AlarmHandler
from safirnotification.utils import log
from safirnotification.utils.opts import ConfigOpts
LOG = log.get_logger()
Flask.get = lambda self, path: self.route(path, methods=['get'])
app = Flask(__name__)
alarm_handler = None
@app.route('/alarm', methods=['POST'])
def alarm():
if request.method == 'POST':
try:
data = json.loads(request.data)
if 'alarm_id' not in data:
LOG.error("Failed processing alarm! " +
"Alarm ID not found", file=sys.stderr)
else:
LOG.info('ALARM RECEIVED. ID: ' + str(data['alarm_id']) +
' Current state: ' + data['current'] +
' Previous state: ' + data['previous'])
alarm_handler.handle_alarm(
alarm_id=data['alarm_id'],
current_state=data['current'],
previous_state=data['previous'],
reason=data['reason'])
except Exception as ex:
LOG.error("Failed processing alarm! " + ex.message)
return "AODH alarm received"
def main():
parser = argparse.ArgumentParser(prog='safirnotification')
parser.add_argument('-c', help='Config file path')
args = parser.parse_args()
config_file = args.c
if config_file is None:
print('usage: safirnotification -c <config-file-path>')
sys.exit(2)
global alarm_handler
alarm_handler = AlarmHandler(config_file)
config_opts = ConfigOpts(config_file)
host = config_opts.get_opt('api', 'host')
port = config_opts.get_opt('api', 'port')
app.run(host=host, port=port, threaded=True)
if __name__ == "__main__":
main()
| 27.942029 | 73 | 0.608921 | 0 | 0 | 0 | 0 | 908 | 0.470954 | 0 | 0 | 376 | 0.195021 |
7eabea74e332a66d58fe03d710b978a1be03e172 | 4,259 | py | Python | slack_primitive_cli/command/chat.py | yuji38kwmt/slack-primitive-cli | 12e942d6ace6b71b874f0648bb3b6b5ac6c590a4 | [
"MIT"
] | null | null | null | slack_primitive_cli/command/chat.py | yuji38kwmt/slack-primitive-cli | 12e942d6ace6b71b874f0648bb3b6b5ac6c590a4 | [
"MIT"
] | 50 | 2020-06-29T06:17:23.000Z | 2022-03-28T15:01:33.000Z | slack_primitive_cli/command/chat.py | yuji38kwmt/slack-primitive-cli | 12e942d6ace6b71b874f0648bb3b6b5ac6c590a4 | [
"MIT"
] | null | null | null | import logging
import click
import slack_sdk
from slack_primitive_cli.common.utils import TOKEN_ENVVAR, TOKEN_HELP_MESSAGE, set_logger
logger = logging.getLogger(__name__)
@click.command(
name="chat.postMessage", help="Sends a message to a channel. See https://api.slack.com/methods/chat.postMessage "
)
@click.option("--token", envvar=TOKEN_ENVVAR, required=True, help=TOKEN_HELP_MESSAGE)
@click.option(
"--channel",
required=True,
help="Channel, private group, or IM channel to send message to. "
"Can be an encoded ID, or a name. See below for more details.",
)
@click.option(
"--text",
required=True,
help="How this field works and whether it is required depends on other fields you use in your API call.",
)
@click.option("--as_user", type=bool, help="Pass true to post the message as the authed user, instead of as a bot.")
@click.option("--attachments", help="A JSON-based array of structured attachments, presented as a URL-encoded string.")
@click.option("--blocks", help="A JSON-based array of structured blocks, presented as a URL-encoded string.")
@click.option(
"--icon_emoji",
help="Emoji to use as the icon for this message. Overrides icon_url. "
"Must be used in conjunction with as_user set to false, otherwise ignored. See authorship below.",
)
@click.option(
"--icon_url",
help="URL to an image to use as the icon for this message. "
"Must be used in conjunction with as_user set to false, otherwise ignored. See authorship below.",
)
@click.option("--link_names", type=bool, help="Find and link channel names and usernames.")
@click.option("--mrkdwn", type=bool, help="Disable Slack markup parsing by setting to false.")
@click.option("--parse", type=bool, help="Change how messages are treated.")
@click.option(
"--reply_broadcast",
type=bool,
help="Used in conjunction with thread_ts and indicates "
"whether reply should be made visible to everyone in the channel or conversation.",
)
@click.option(
"--thread_ts",
help="Provide another message's ts value to make this message a reply. "
"Avoid using a reply's ts value; use its parent instead.",
)
@click.option("--unfurl_links", type=bool, help="Pass true to enable unfurling of primarily text-based content.")
@click.option("--unfurl_media", type=bool, help="Pass false to disable unfurling of media content.")
@click.option(
"--username",
help="Set your bot's user name. Must be used in conjunction with as_user set to false, otherwise ignored.",
)
def postMessage(
token: str,
channel: str,
text: str,
as_user,
attachments,
blocks,
icon_emoji,
icon_url,
link_names,
mrkdwn,
parse,
reply_broadcast,
thread_ts,
unfurl_links,
unfurl_media,
username,
):
set_logger()
client = slack_sdk.WebClient(token=token)
response = client.chat_postMessage(
channel=channel,
text=text,
as_user=as_user,
attachments=attachments,
blocks=blocks,
icon_emoji=icon_emoji,
icon_url=icon_url,
link_names=link_names,
mrkdwn=mrkdwn,
parse=parse,
reply_broadcast=reply_broadcast,
thread_ts=thread_ts,
unfurl_links=unfurl_links,
unfurl_media=unfurl_media,
username=username,
)
print(response)
return response
@click.command(name="chat.delete", help="Deletes a message. See https://api.slack.com/methods/chat.delete ")
@click.option("--token", envvar=TOKEN_ENVVAR, required=True, help=TOKEN_HELP_MESSAGE)
@click.option("--channel", required=True, help="Channel containing the message to be deleted.")
@click.option("--ts", required=True, help="Timestamp of the message to be deleted.")
@click.option(
"--as_user",
type=bool,
help="Pass true to delete the message as the authed user with chat:write:user scope. "
"Bot users in this context are considered authed users. "
"If unused or false, the message will be deleted with chat:write:bot scope.",
)
def delete(token: str, channel: str, ts: str, as_user):
set_logger()
client = slack_sdk.WebClient(token=token)
response = client.chat_delete(channel=channel, ts=ts, as_user=as_user)
print(response)
return response
| 36.401709 | 119 | 0.702043 | 0 | 0 | 0 | 0 | 4,078 | 0.957502 | 0 | 0 | 2,087 | 0.490021 |
7eac896f2fd732187498b92ce5df251097f85bad | 340 | py | Python | codewars/7 kyu/convert-number-to-string.py | sirken/coding-practice | 9c5e23b2c24f525a89a5e1d15ce3aec3ad1a01ab | [
"MIT"
] | null | null | null | codewars/7 kyu/convert-number-to-string.py | sirken/coding-practice | 9c5e23b2c24f525a89a5e1d15ce3aec3ad1a01ab | [
"MIT"
] | null | null | null | codewars/7 kyu/convert-number-to-string.py | sirken/coding-practice | 9c5e23b2c24f525a89a5e1d15ce3aec3ad1a01ab | [
"MIT"
] | null | null | null | from Test import Test, Test as test
'''
We need a function that can transform a number into a string.
What ways of achieving this do you know?
Examples:
number_to_string(123) /* returns '123' */
number_to_string(999) /* returns '999' */
'''
def number_to_string(num):
return str(num)
test.assert_equals(number_to_string(67), '67'); | 21.25 | 61 | 0.723529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.617647 |
7eae404b9ffdd90228dcb56445e0fd056fc2c5bc | 4,255 | py | Python | hummingbot/connector/derivative/binance_perpetual/binance_perpetual_utils.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 2 | 2022-03-03T10:00:27.000Z | 2022-03-08T13:57:56.000Z | hummingbot/connector/derivative/binance_perpetual/binance_perpetual_utils.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 6 | 2022-01-31T15:44:54.000Z | 2022-03-06T04:27:12.000Z | hummingbot/connector/derivative/binance_perpetual/binance_perpetual_utils.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 1 | 2022-03-31T07:04:49.000Z | 2022-03-31T07:04:49.000Z | import os
import socket
from typing import Any, Dict, Optional
import hummingbot.connector.derivative.binance_perpetual.constants as CONSTANTS
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.config_methods import using_exchange
from hummingbot.core.utils.tracking_nonce import get_tracking_nonce
from hummingbot.core.web_assistant.auth import AuthBase
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest
from hummingbot.core.web_assistant.rest_pre_processors import RESTPreProcessorBase
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
CENTRALIZED = True
EXAMPLE_PAIR = "BTC-USDT"
DEFAULT_FEES = [0.02, 0.04]
BROKER_ID = "x-3QreWesy"
class BinancePerpetualRESTPreProcessor(RESTPreProcessorBase):
async def pre_process(self, request: RESTRequest) -> RESTRequest:
if request.headers is None:
request.headers = {}
request.headers["Content-Type"] = (
"application/json" if request.method == RESTMethod.POST else "application/x-www-form-urlencoded"
)
return request
def get_client_order_id(order_side: str, trading_pair: object):
nonce = get_tracking_nonce()
symbols: str = trading_pair.split("-")
base: str = symbols[0].upper()
quote: str = symbols[1].upper()
base_str = f"{base[0]}{base[-1]}"
quote_str = f"{quote[0]}{quote[-1]}"
client_instance_id = hex(abs(hash(f"{socket.gethostname()}{os.getpid()}")))[2:6]
return f"{BROKER_ID}-{order_side.upper()[0]}{base_str}{quote_str}{client_instance_id}{nonce}"
def rest_url(path_url: str, domain: str = "binance_perpetual", api_version: str = CONSTANTS.API_VERSION):
base_url = CONSTANTS.PERPETUAL_BASE_URL if domain == "binance_perpetual" else CONSTANTS.TESTNET_BASE_URL
return base_url + api_version + path_url
def wss_url(endpoint: str, domain: str = "binance_perpetual"):
base_ws_url = CONSTANTS.PERPETUAL_WS_URL if domain == "binance_perpetual" else CONSTANTS.TESTNET_WS_URL
return base_ws_url + endpoint
def build_api_factory(auth: Optional[AuthBase] = None) -> WebAssistantsFactory:
api_factory = WebAssistantsFactory(auth=auth, rest_pre_processors=[BinancePerpetualRESTPreProcessor()])
return api_factory
def is_exchange_information_valid(exchange_info: Dict[str, Any]) -> bool:
"""
Verifies if a trading pair is enabled to operate with based on its exchange information
:param exchange_info: the exchange information for a trading pair
:return: True if the trading pair is enabled, False otherwise
"""
return exchange_info.get("status", None) == "TRADING"
KEYS = {
"binance_perpetual_api_key": ConfigVar(
key="binance_perpetual_api_key",
prompt="Enter your Binance Perpetual API key >>> ",
required_if=using_exchange("binance_perpetual"),
is_secure=True,
is_connect_key=True,
),
"binance_perpetual_api_secret": ConfigVar(
key="binance_perpetual_api_secret",
prompt="Enter your Binance Perpetual API secret >>> ",
required_if=using_exchange("binance_perpetual"),
is_secure=True,
is_connect_key=True,
),
}
OTHER_DOMAINS = ["binance_perpetual_testnet"]
OTHER_DOMAINS_PARAMETER = {"binance_perpetual_testnet": "binance_perpetual_testnet"}
OTHER_DOMAINS_EXAMPLE_PAIR = {"binance_perpetual_testnet": "BTC-USDT"}
OTHER_DOMAINS_DEFAULT_FEES = {"binance_perpetual_testnet": [0.02, 0.04]}
OTHER_DOMAINS_KEYS = {
"binance_perpetual_testnet": {
# add keys for testnet
"binance_perpetual_testnet_api_key": ConfigVar(
key="binance_perpetual_testnet_api_key",
prompt="Enter your Binance Perpetual testnet API key >>> ",
required_if=using_exchange("binance_perpetual_testnet"),
is_secure=True,
is_connect_key=True,
),
"binance_perpetual_testnet_api_secret": ConfigVar(
key="binance_perpetual_testnet_api_secret",
prompt="Enter your Binance Perpetual testnet API secret >>> ",
required_if=using_exchange("binance_perpetual_testnet"),
is_secure=True,
is_connect_key=True,
),
}
}
| 37.324561 | 108 | 0.725969 | 387 | 0.090952 | 0 | 0 | 0 | 0 | 320 | 0.075206 | 1,334 | 0.313514 |
7eae90d3c01eaa4dde6516942f1304a7d7bfc497 | 1,247 | py | Python | Curso Udemy 2022/Curso_Luiz_Otavio/aula_75_ex.py | Matheusfarmaceutico/Exercicios-Python | d1821bd9d11ea0707074c5fe11dead2e85476ebd | [
"MIT"
] | null | null | null | Curso Udemy 2022/Curso_Luiz_Otavio/aula_75_ex.py | Matheusfarmaceutico/Exercicios-Python | d1821bd9d11ea0707074c5fe11dead2e85476ebd | [
"MIT"
] | null | null | null | Curso Udemy 2022/Curso_Luiz_Otavio/aula_75_ex.py | Matheusfarmaceutico/Exercicios-Python | d1821bd9d11ea0707074c5fe11dead2e85476ebd | [
"MIT"
] | null | null | null | def separador():
print("-="*30)
"""
Considerando duas listas de inteiros ou floats (lista A e lista B)
Some os valores nas listas retornando uma nova lista com os valores somados:
Se uma lista for maior que a outra, a soma só vai considerar o tamanho da
menor.
Exemplo:
lista_a = [1, 2, 3, 4, 5, 6, 7]
lista_b = [1, 2, 3, 4]"""
separador()
#Minha solução (mais pythonica)
lista_a = [1,2,3,4,5,6,7]
lista_b = [1,2,3,4]
temp = zip(lista_a,lista_b)
for v in temp:
print(sum(v))
separador()
# Maneira mais lógica, comum a todas as linguagens
lista_soma = []
lista_a = [1,2,3,4,5,6,7]
lista_b = [1,2,3,4]
for i in range(len(lista_b)):
lista_soma.append(lista_a[i] + lista_b[i])
print(lista_soma)
separador()
#Uma outra maneira mais lógica, assim assim utilizando o enumerate, que está presente apenas no Python
lista_soma = []
lista_a = [1,2,3,4,5,6,7]
lista_b = [1,2,3,4]
for i, _ in enumerate(lista_b):
lista_soma.append(lista_a[i] + lista_b[i])
print(lista_soma)
separador()
#solucao do Luiz Otávio, julguei ser mais certa e correta, utilizando um modo ainda mais pythonico que o que eu desenvovi.
lista_a = [1,2,3,4,5,6,7]
lista_b = [1,2,3,4]
lista_soma = [x + y for x,y in (zip(lista_a, lista_b))]
print(lista_soma) | 26.531915 | 122 | 0.690457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 619 | 0.49362 |
7eaf36bdb4bd0189fe015e24c185548232a56d8e | 4,740 | py | Python | Functions_in_Python.py | sichkar-valentyn/Functions_in_Python | 896fdf6fbe186fe319f5ccaddf4d3952d78c32b0 | [
"MIT"
] | 1 | 2021-05-13T20:31:37.000Z | 2021-05-13T20:31:37.000Z | Functions_in_Python.py | sichkar-valentyn/Functions_in_Python | 896fdf6fbe186fe319f5ccaddf4d3952d78c32b0 | [
"MIT"
] | null | null | null | Functions_in_Python.py | sichkar-valentyn/Functions_in_Python | 896fdf6fbe186fe319f5ccaddf4d3952d78c32b0 | [
"MIT"
] | null | null | null | # File: Functions_in_Python.py
# Description: Creating functions in Python
# Environment: Spyder IDE in Anaconda environment
#
# MIT License
# Copyright (c) 2018 Valentyn N Sichkar
# github.com/sichkar-valentyn
#
# Reference to:
# [1] Valentyn N Sichkar. Creating functions in Python // GitHub platform [Electronic resource]. URL: https://github.com/sichkar-valentyn/Functions_in_Python (date of access: XX.XX.XXXX)
"""
Created on Thu Jan 11 16:16:24 2018
@author: Valentyn
"""
import os
import psutil
import shutil
import sys
def sys_info():
print("Current directory: ", os.getcwd())
print("Number of CPU: ", os.cpu_count())
print("Operation System: ", sys.platform)
print("File system encoding: ", sys.getfilesystemencoding())
def file_duplicating(filename):
if os.path.isfile(filename): # Checking if the file_name[i] is a file and isn't folder
new_file = filename + '.duplication'
shutil.copy(filename, new_file) # Copying the file into another file
if os.path.exists(new_file):
print("The file - ", new_file, " - was successfully created!")
else:
print("Something went wrong!")
else:
print("Something went wrong!")
def file_deleting(filename):
if os.path.isfile(filename): # Checking if the file_name[i] is a file and isn't folder
if filename.endswith('.duplication'): # Checking if the file has the ending 'duplication'
os.remove(filename) # Removing the file
print("The file - ", filename, " - was successfully deleted!")
else:
pass
print("This is a Great Python Program!")
print("Hello there, programmer!")
name = input("What is your name? ")
print(name, ", Welcome!")
answer = ''
while answer != 'Q' and answer != 'N' and answer != 'q' and answer != 'n':
answer = input("Let's work? (Y/N/Q)")
if answer == 'Y' or answer == 'y':
print("Great choice!") # type "pass" for the empty construction
print("I can do for you:")
print("[1] - show list of files and folders in current directory")
print("[2] - show information about System")
print("[3] - show list of running tasks in the System")
print("[4] - duplication of all files in the current directory")
print("[5] - change the current directory")
print("[6] - duplication of specific file in the specific directory")
print("[7] - deleting all files with endings '.duplication' in the specific directory")
todo = int(input("Make your choice: "))
if todo == 1:
print(os.listdir())
elif todo == 2:
sys_info()
elif todo == 3:
print("List of current running PIDs: ", psutil.pids())
elif todo == 4:
print("All files in the current directory are duplicated now!")
file_list = os.listdir()
i = 0
while i < len(file_list):
file_duplicating(file_list[i])
i += 1
elif todo == 5:
print("Type the name of the derictory you want to change in.")
current_directory = input("Input name of the directory: ")
os.chdir(current_directory)
file_list = os.listdir()
print("List of files in chosen directory:")
for file_name in file_list:
print(file_name)
elif todo == 6:
print("Type the name of the derictory you want to work with")
specific_directory = input("Input name of the directory or type '.' for current directory: ")
file_list = os.listdir(specific_directory)
print("Type the name of the file you want to duplicate")
file_to_duplicate = input("Input name of the file: ")
fullname_of_file_to_duplicate = os.path.join(specific_directory, file_to_duplicate) # function to join full path to the file with the name of the file
file_duplicating(fullname_of_file_to_duplicate)
elif todo == 7:
print("Type the name of the derictory you want to delete duplactes in")
specific_directory = input("Input name of the directory: ")
file_list = os.listdir(specific_directory)
i = 0
while i < len(file_list):
file_deleting(file_list[i])
i += 1
print(i, " files were checked now!")
else: pass # for the empty construction
elif answer == 'N' or answer == 'n':
print("Good by, see you next time!")
elif answer == 'Q' or answer == 'q':
print("You successfully finished the work, see you soon!")
else:
print("Unknown input, try again")
| 41.946903 | 186 | 0.611181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,309 | 0.487131 |
7eb022ba017c96463108c7ec8dbea1b02a79b2bf | 1,571 | py | Python | jaseci_serv/jaseci_serv/base/admin.py | Gorgeous-Patrick/jaseci | b423165fefbbc9574cd4467ee05728add7f47e5a | [
"MIT"
] | 6 | 2021-10-30T03:35:36.000Z | 2022-02-10T02:06:18.000Z | jaseci_serv/jaseci_serv/base/admin.py | Gorgeous-Patrick/jaseci | b423165fefbbc9574cd4467ee05728add7f47e5a | [
"MIT"
] | 85 | 2021-10-29T22:47:39.000Z | 2022-03-31T06:11:52.000Z | jaseci_serv/jaseci_serv/base/admin.py | Gorgeous-Patrick/jaseci | b423165fefbbc9574cd4467ee05728add7f47e5a | [
"MIT"
] | 12 | 2021-11-03T17:29:22.000Z | 2022-03-30T16:01:53.000Z | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from jaseci_serv.base import models
class UserAdmin(BaseUserAdmin):
"""
Customized user listing for admin page
"""
ordering = ["time_created"]
list_display = ["email", "name", "time_created"]
fieldsets = (
(None, {"fields": ("email", "password")}),
(_("Personal Info"), {"fields": ("name",)}),
(
_("Permissions"),
{
"fields": (
"is_active",
"is_activated",
"is_staff",
"is_admin",
"is_superuser",
)
},
),
(_("Important dates"), {"fields": ("last_login",)}),
)
add_fieldsets = (
(None, {"classes": ("wide",), "fields": ("email", "password1", "password2")}),
)
admin.site.register(models.User, UserAdmin)
class JaseciObjectAdmin(admin.ModelAdmin):
ordering = ["j_timestamp"]
list_display = ("jid", "name", "j_type", "j_parent", "j_master", "j_timestamp")
search_fields = [
"jid",
"name",
"j_type",
"j_parent",
"j_master",
"j_timestamp",
]
admin.site.register(models.JaseciObject, JaseciObjectAdmin)
class GlobalVarsAdmin(admin.ModelAdmin):
ordering = ["name"]
list_display = ("name", "value")
search_fields = ["name", "value"]
admin.site.register(models.GlobalVars, GlobalVarsAdmin)
| 24.936508 | 86 | 0.551878 | 1,211 | 0.770847 | 0 | 0 | 0 | 0 | 0 | 0 | 467 | 0.297263 |
7eb19e8b95185663f63c59aabd713e0c10af913c | 1,551 | py | Python | app.py | CoDeRgAnEsh/Rent-flask | 1a796196ebe613c7cea77ad6ac96ca91ca98070f | [
"MIT"
] | 3 | 2018-10-21T08:18:16.000Z | 2019-05-20T19:43:23.000Z | app.py | CoDeRgAnEsh/Rent-flask | 1a796196ebe613c7cea77ad6ac96ca91ca98070f | [
"MIT"
] | null | null | null | app.py | CoDeRgAnEsh/Rent-flask | 1a796196ebe613c7cea77ad6ac96ca91ca98070f | [
"MIT"
] | null | null | null | #!/usr/bin env python
import numpy as np
import pandas as pd
from flask import Flask, abort, jsonify, request
import pickle
# from flask_accept import accept
from flask_cors import CORS
with open('model.pkl', 'rb') as model:
xgb_model = pickle.load(model)
features = ['longitude', 'latitude', 'gym', 'lift', 'swimming_pool',
'property_size', 'bathroom', 'floor', 'total_floor', 'balconies',
'building_type_AP', 'building_type_GC', 'building_type_IF',
'building_type_IH', 'parking_BOTH', 'parking_FOUR_WHEELER',
'parking_NONE', 'parking_TWO_WHEELER', 'type_BHK1', 'type_BHK2',
'type_BHK3', 'type_BHK4', 'type_BHK4PLUS', 'type_RK1']
# c = pd.DataFrame([77, 12, 0, 0, 0, 500, 1, 1, 2, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0])
# c = c.transpose()
# c.columns = features
# print(xgb_model.predict(c))
app = Flask(__name__)
CORS(app, supports_credentials=True)
@app.route('/api', methods=['POST'])
def predict():
data = request.get_json(force=True)
f = open('inputs.txt', 'a+')
f.write(str(data))
f.write('\n')
pred = [data[x] for x in features]
pred = pd.DataFrame(pred)
pred = pred.transpose()
pred.columns = features
# # make prediction
y = xgb_model.predict(pred)
f.write(str(y))
f.write('\n')
f.close()
return pd.Series(y).to_json(orient='values')
@app.route('/hello', methods=['POST'])
def hello():
data = request.get_json()
return jsonify({'you sent': data})
if __name__ == '__main__':
app.run(host= '0.0.0.0', debug=True) | 26.741379 | 96 | 0.638298 | 0 | 0 | 0 | 0 | 570 | 0.367505 | 0 | 0 | 656 | 0.422953 |