max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
compare.py | andrewyatz/refget-application-note | 0 | 12770251 | #!/usr/bin/env python3
import sys
import os.path
import gzip
from os import path
def process_file(file, output_file):
if path.exists(file) == False:
print("Cannot continue because {} does not exist".format(file), file=sys.stderr)
sys.exit(1)
if path.exists(output_file) == True:
os.remove(output_file)
with gzip.open(file, "rt", encoding="utf-8") as f:
with open(output_file, "w", encoding="utf-8") as output:
previous = []
# Expected format input is
# checksum,sequence,identifier
for line in f:
current = line.rstrip().split(",")
if previous:
if current[0] == previous[0]: # check checksums match
if current[1] != previous[1]: # check if the seqs do not match
# Expected format output is
# clashed_checksum,identifier_one,seq_one,identifier_two,seq_two
print(
"{},{},{},{},{}".format(
previous[0],
previous[2],
previous[1],
current[2],
current[1],
),
file=output,
)
previous = current
def main():
if len(sys.argv) != 3:
print(
"Please provide the commpressed sorted comma separated file to process and output file"
)
print("./compare.py input.csv.gz report.csv")
sys.exit(0)
process_file(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| 3.296875 | 3 |
tests/clients/test_users.py | unparalleled-js/py42 | 0 | 12770252 | # -*- coding: utf-8 -*-
import json
import pytest
from requests import Response
import py42.settings
from py42.clients.users import UserClient
from py42.response import Py42Response
USER_URI = "/api/User"
DEFAULT_GET_ALL_PARAMS = {
"active": None,
"email": None,
"orgUid": None,
"roleId": None,
"pgNum": 1,
"pgSize": 500,
"q": None,
}
MOCK_GET_USER_RESPONSE = """{"totalCount": 3000, "users": ["foo"]}"""
MOCK_EMPTY_GET_USER_RESPONSE = """{"totalCount": 3000, "users": []}"""
MOCK_text = '{"item_list_key": [{"foo": "foo_val"}, {"bar": "bar_val"}]}'
class TestUserClient(object):
@pytest.fixture
def mock_get_all_response(self, mocker):
response = mocker.MagicMock(spec=Response)
response.status_code = 200
response.encoding = "utf-8"
response.text = MOCK_GET_USER_RESPONSE
return Py42Response(response)
@pytest.fixture
def mock_get_all_empty_response(self, mocker):
response = mocker.MagicMock(spec=Response)
response.status_code = 200
response.encoding = "utf-8"
response.text = MOCK_EMPTY_GET_USER_RESPONSE
return Py42Response(response)
@pytest.fixture
def post_api_mock_response(self, mocker):
response = mocker.MagicMock(spec=Response)
response.status_code = 200
response.encoding = "utf-8"
response.text = MOCK_text
return Py42Response(response)
def test_post_create_user_is_successful(self, mock_session, post_api_mock_response):
user_client = UserClient(mock_session)
mock_session.post.return_value = post_api_mock_response
org_uid = "TEST_ORG_ID"
username = "<EMAIL>"
password = "password"
name = "TESTNAME"
note = "Test Note"
user_client.create_user(org_uid, username, username, password, name, name, note)
expected_params = {
u"orgUid": org_uid,
u"username": username,
u"email": username,
u"password": password,
u"firstName": name,
u"lastName": name,
u"notes": note,
}
mock_session.post.assert_called_once_with(
USER_URI, data=json.dumps(expected_params)
)
def test_get_all_calls_get_with_uri_and_params(
self, mock_session, mock_get_all_response
):
mock_session.get.side_effect = [mock_get_all_response]
client = UserClient(mock_session)
for _ in client.get_all():
break
first_call = mock_session.get.call_args_list[0]
assert first_call[0][0] == USER_URI
assert first_call[1]["params"] == DEFAULT_GET_ALL_PARAMS
def test_unicode_username_get_user_by_username_calls_get_with_username(
self, mock_session, successful_response
):
username = u"您已经发现了秘密信息"
mock_session.get.return_value = successful_response
client = UserClient(mock_session)
client.get_by_username(username)
expected_params = {u"username": username}
mock_session.get.assert_called_once_with(USER_URI, params=expected_params)
def test_get_user_by_id_calls_get_with_uri_and_params(
self, mock_session, successful_response
):
mock_session.get.return_value = successful_response
client = UserClient(mock_session)
client.get_by_id(123456)
uri = "{}/{}".format(USER_URI, 123456)
mock_session.get.assert_called_once_with(uri, params={})
def test_get_all_calls_get_expected_number_of_times(
self, mock_session, mock_get_all_response, mock_get_all_empty_response
):
py42.settings.items_per_page = 1
client = UserClient(mock_session)
mock_session.get.side_effect = [
mock_get_all_response,
mock_get_all_response,
mock_get_all_empty_response,
]
for _ in client.get_all():
pass
py42.settings.items_per_page = 500
assert mock_session.get.call_count == 3
def test_get_scim_data_by_uid_calls_get_with_expected_uri_and_params(
self, mock_session
):
client = UserClient(mock_session)
client.get_scim_data_by_uid("USER_ID")
uri = "/api/v7/scim-user-data/collated-view"
mock_session.get.assert_called_once_with(uri, params={"userId": "USER_ID"})
def test_get_available_roles_calls_get_with_expected_uri(self, mock_session):
client = UserClient(mock_session)
client.get_available_roles()
uri = "/api/v4/role/view"
mock_session.get.assert_called_once_with(uri)
def test_get_roles_calls_get_with_expected_uri(self, mock_session):
client = UserClient(mock_session)
client.get_roles(12345)
uri = "/api/UserRole/12345"
mock_session.get.assert_called_once_with(uri)
def test_add_role_calls_post_with_expected_uri_and_data(self, mock_session):
client = UserClient(mock_session)
client.add_role(12345, "Test Role Name")
uri = "/api/UserRole"
assert mock_session.post.call_args[0][0] == uri
assert '"roleName": "Test Role Name"' in mock_session.post.call_args[1]["data"]
assert '"userId": 12345' in mock_session.post.call_args[1]["data"]
def test_delete_role_calls_delete_with_expected_uri_and_params(self, mock_session):
client = UserClient(mock_session)
client.remove_role(12345, "Test Role Name")
uri = "/api/UserRole?userId=12345&roleName=Test%20Role%20Name"
mock_session.delete.assert_called_once_with(uri)
def test_get_page_calls_get_with_expected_url_and_params(self, mock_session):
client = UserClient(mock_session)
client.get_page(10, True, "email", "org", "role", 100, "q")
mock_session.get.assert_called_once_with(
"/api/User",
params={
"active": True,
"email": "email",
"orgUid": "org",
"roleId": "role",
"pgNum": 10,
"pgSize": 100,
"q": "q",
},
)
| 2.328125 | 2 |
VFD_MDM166.py | pluschris/VFD_MDM166 | 1 | 12770253 | #!/usr/bin/python3
# #####################################
# info: This class can connect to VFD MDM166
#
# date: 2017-06-13
# version: 0.1.1
#
# Dependencies:
# $ sudo apt-get install python3-dev libusb-1.0-0-dev libudev-dev python3-pip
# $ sudo pip3 install --upgrade setuptools
# $ sudo pip3 install hidapi
# place a file 99-hidraw-vfd-permissions.rules with this line to /etc/udev/rules.d:
# SUBSYSTEM=="usb", ATTR{idVendor}=="19c2", ATTR{idProduct}=="6a11", MODE="0666"
#
# history:
#
# #####################################
# Import solution :-)
import hid
import dot_matrix_font
class usbVFD:
def __init__(self,vid=0x19c2,pid=0x6a11):
# just open an usb-hid-connection to the VFD:
self.dev = hid.device()
self.dev.open(vendor_id=vid, product_id=pid)
self.font = dot_matrix_font.dot_matrix_font()
def send_command(self,command):
#just send the command with the length ahead
l=bytes([len(command)])
command=l+command
self.dev.write(command)
########################################################################################
# general commands:
def dimming(self,luminance=100):
command = b'\x1b\x40'
if luminance>=75:
command+=b'\x02'
elif luminance>=25:
command+=b'\x01'
else:
command+=b'\x00'
self.send_command(command)
def clear_display(self):
self.send_command(command=b'\x1b\x50')
def all_on(self):
self.send_command(command=b'\x1b\x55')
def reset(self):
self.send_command(command=b'\1F')
def set_addr_counter(self,add):
self.send_command(command=b'\x1b\x60'+bytes([add]))
def write_grafic(self,data):
self.send_command(command=b'\x1b\x70'+bytes([len(data)])+bytes(data))
########################################################################################
# clock:
def calc_BCD(self,n):
if n>0xFF:
n=0xFF
higher_nibble, lower_nibble = divmod(n,10)
return higher_nibble<<4 | lower_nibble
def set_clock_data(self,hour,minute):
self.send_command(command=b'\x1B\x00'+bytes([self.calc_BCD(minute)])+bytes([self.calc_BCD(hour)]))
def set_clock_format(self,clock_format='24h',row='1row'):
command = b'\x1b'
if row=='upper':
command+=b'\x01'
else:
command+=b'\x02'
if clock_format=='24h':
command+=b'\x01'
else:
command+=b'\x00'
self.send_command(command)
########################################################################################
# symbols: symbol=address of symbol, grayscale from 0...100%
def set_symbol(self,symbol,grayscale=100):
command = b'\x1B\x30'+symbol
if grayscale >= 75:
command += b'\x02'
elif grayscale >= 25:
command += b'\x01'
else:
command += b'\x00'
self.send_command(command)
######
# named access to symbols for convenience
#
def set_play(self,grayscale=100):
self.set_symbol(symbol=b'\x00',grayscale=grayscale)
def set_pause(self,grayscale=100):
self.set_symbol(symbol=b'\x01',grayscale=grayscale)
def set_rec(self, grayscale=100):
self.set_symbol(symbol=b'\x02', grayscale=grayscale)
def set_envelope(self, grayscale=100):
self.set_symbol(symbol=b'\x03',grayscale=grayscale)
def set_envelope_at(self, grayscale=100):
self.set_symbol(symbol=b'\x04',grayscale=grayscale)
def set_mute(self, grayscale=100):
self.set_symbol(symbol=b'\x05',grayscale=grayscale)
def set_i(self, grayscale=100, segment=1):
if segment <=1:
segment=1
elif segment>=4:
segment=4
self.set_symbol(symbol=bytes([0x05+segment]))
def set_vol_logo(self,grayscale=100):
self.set_symbol(symbol=b'\x0A',grayscale=grayscale)
def set_vol_bar(self,grayscale=100,segment=1):
if segment <=1:
segment=1
elif segment>=14:
segment=14
self.set_symbol(symbol=bytes([0x0A+segment]))
########################################################################################
# write text: line is 0 for upper row, 1 for lower row
def write_str(self,text,row=0):
char_count = 0
for char in text:
addr_count = 0
for i in range(0, 6):
# send column after column:
self.set_addr_counter(addr_count + char_count * 12 + row)
col = [str(row[i]) for row in self.font.str_to_dot_matrix(char)]
col = [int(''.join(col), 2)]
self.write_grafic(col)
addr_count += 2 # each column has two addresses: upper and lower on
char_count+=1
| 2.640625 | 3 |
lib/cogs/ping.py | weibolu-rm/weibolu-bot | 0 | 12770254 | <filename>lib/cogs/ping.py
from discord.ext.commands import Cog
from discord.ext.commands import command
class Ping(Cog):
def __init__(self, bot):
self.bot = bot
# ctx is a context ctx.send == ctx.channel.send
@command(name="ping")
async def ping(self, ctx):
await ctx.send("pong! {0:.2f}ms".format(self.bot.latency * 1000))
def setup(bot):
bot.add_cog(Ping(bot)) | 2.78125 | 3 |
SRC/Check.py | vscv/HuWeiSPClassification | 0 | 12770255 | # ==============================================================================
# 2017_04_15 LSW@NCHC.
#
# Change 3 code to use new in, out dir name for fit the needs.
# cp new.image to /out/ do not need to chnage code of classify.py.
#
# USAGE: time py Check.py /home/TF_io/
# ==============================================================================
"""Daemon function with Popen call.
Glue code to check image dir then call next function.
NOTE: pyinstaller this Check.py to Check.exe before you use it.
"""
import os, time
import sys
from shutil import copyfile
import subprocess
this_n = sys.argv[0]
io_dir = sys.argv[1]
path_to_watch = io_dir + "/in/"
path_to_check = io_dir + "/out/"
before = dict ([(f, None) for f in os.listdir(path_to_watch) if f.endswith('.jpg')])
while 1:
time.sleep (1)
after = dict ([(f, None) for f in os.listdir(path_to_watch) if f.endswith('.jpg')])
for f in after:
if not f in before:
#print("Added: ", ", ".join (f))
# check if cfg exist, else exit this loop
if os.path.isfile(io_dir + "/" + f.split('_')[0] + ".cfg"):
print("roi_cfg,", io_dir + "/" + f.split('_')[0] + ".cfg", "exist:", os.path.isfile(io_dir + "/" + f.split('_')[0] + ".cfg"))
print("New Image Found:", f)
print("cp",path_to_watch + f, "to", path_to_check + f)
copyfile(path_to_watch + "/" + f, path_to_check + "/" + f)
print("roi_cfg:", f.split('_')[0] + ".cfg")
# Call classify.exe
path_out_img = path_to_check + f
path_cam_cfg = io_dir + "/" + f.split('_')[0] + ".cfg"
p = subprocess.Popen(['./classify.exe', "--image_file", path_out_img, path_cam_cfg, "--model_dir", "hw_model"], stdout = subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
print(stdout, stderr)
else:
print("roi_cfg,", io_dir + "/" + f.split('_')[0] + ".cfg", "exist:", os.path.isfile(io_dir + "/" + f.split('_')[0] + ".cfg"))
removed = [f for f in before if not f in after]
#if added:
# for a in added:
# print("Added: ", ", ".join (a))
# print(roi_cfg = a.split('_'))
if removed: print("Removed: ", ", ".join (removed))
before = after
| 2.453125 | 2 |
src/config.py | Viewly/alpha-2 | 0 | 12770256 | <reponame>Viewly/alpha-2
import json
from os import getenv, environ, getcwd, path
from dotenv import load_dotenv, find_dotenv
from funcy import rpartial
from toolz import pipe
load_json_file = rpartial(pipe, open, lambda x: x.read(), json.loads)
def config_folder_prefix():
# todo: improve this quick hack
# (currently tightly coupled to alpha-2 folder)
# It should find the correct config file path when
# ran from src/, tests/ or Docker based paths.
return path.join(getcwd().split('alpha-2')[0], 'alpha-2/config')
def load_json_config(name):
env = 'prod' if IS_PRODUCTION else 'dev'
return load_json_file(f'{config_folder_prefix()}/{name}.{env}.json')
# load default config
IS_PRODUCTION = bool(getenv('PRODUCTION', False))
if not IS_PRODUCTION:
load_dotenv(find_dotenv())
FLASK_ENV = environ['FLASK_ENV'].lower()
# base config
SECRET_KEY = getenv('SECRET_KEY', 'not_a_good_secret')
# needed for Disqus plugin, shared /w nginx reverse proxy
VIRTUAL_HOST = getenv('VIRTUAL_HOST', 'http://localhost:5000')
# amazon manager credentials
AWS_MANAGER_PUBLIC_KEY = environ['AWS_MANAGER_PUBLIC_KEY']
AWS_MANAGER_PRIVATE_KEY = environ['AWS_MANAGER_PRIVATE_KEY']
# amazon s3 upload signatures
S3_UPLOADER_PUBLIC_KEY = environ['S3_UPLOADER_PUBLIC_KEY']
S3_UPLOADER_PRIVATE_KEY = environ['S3_UPLOADER_PRIVATE_KEY']
# amazon s3 upload bucket
S3_UPLOADS_BUCKET = environ['S3_UPLOADS_BUCKET']
S3_UPLOADS_REGION = environ['S3_UPLOADS_REGION']
# amazon s3 processed assets (videos, thumbnails, etc.) location
S3_VIDEOS_BUCKET = environ['S3_VIDEOS_BUCKET']
S3_VIDEOS_REGION = environ['S3_VIDEOS_REGION']
# amazon Cloud Formation distribution ID
CDN_DISTRIBUTION_ID = environ['CDN_DISTRIBUTION_ID']
# videos and thumbnails CDN
CDN_URL = getenv('CDN_URL', 'https://cdn.view.ly')
# player url
PLAYER_URL = getenv('PLAYER_URL', 'https://player.view.ly')
# PostgreSQL
SQLALCHEMY_DATABASE_URI = getenv('POSTGRES_URL', 'postgres://localhost/alpha')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
SQLALCHEMY_POOL_SIZE = 50
SQLALCHEMY_MAX_OVERFLOW = 200
# Email
MAIL_SERVER = getenv('MAIL_SERVER', 'smtp.mandrillapp.com')
MAIL_USERNAME = getenv('MAIL_USERNAME', 'viewly')
MAIL_PASSWORD = getenv('MAIL_PASSWORD', '')
MAIL_PORT = int(getenv('MAIL_PORT', 587))
MAIL_DEFAULT_SENDER = ('Viewly Alpha', '<EMAIL>')
# Flask-Security
SECURITY_TOKEN_MAX_AGE = 3600
SECURITY_PASSWORD_SALT = ""
SECURITY_CONFIRMABLE = IS_PRODUCTION
SECURITY_REGISTERABLE = True
SECURITY_RECOVERABLE = True
SECURITY_TRACKABLE = True
SECURITY_PASSWORDLESS = False
SECURITY_CHANGEABLE = True
SECURITY_EMAIL_SUBJECT_REGISTER = "Welcome to Viewly Alpha 2. Please confirm your email."
RECAPTCHA_ENABLED = IS_PRODUCTION
RECAPTCHA_SITE_KEY = environ['RECAPTCHA_SITE_KEY']
RECAPTCHA_SECRET_KEY = environ['RECAPTCHA_SECRET_KEY']
# Celery
CELERY_BACKEND_URL = getenv('CELERY_BACKEND_URL', 'redis://localhost:6379/0')
CELERY_BROKER_URL = getenv('CELERY_BROKER_URL', 'redis://localhost:6379/0')
# Logging
SENTRY_DSN = getenv('SENTRY_DSN')
# Disqus
DISQUS_PUBLIC_KEY = getenv('DISQUS_PUBLIC_KEY')
DISQUS_SECRET_KEY = getenv('DISQUS_SECRET_KEY')
# Ethereum chain
ETH_CHAIN = environ['ETH_CHAIN']
INFURA_KEY = environ['INFURA_KEY']
INFURA_KEY_FE = environ['INFURA_KEY_FE']
# Ethereum Contracts
VIEW_TOKEN_ADDRESS = environ['VIEW_TOKEN_ADDRESS']
VIDEO_PUBLISHER_ADDRESS = environ['VIDEO_PUBLISHER_ADDRESS']
VOTING_POWER_DELEGATOR_ADDRESS = environ['VOTING_POWER_DELEGATOR_ADDRESS']
VIEW_TOKEN_ABI = load_json_file(f'{config_folder_prefix()}/ViewToken.abi.json')
VIDEO_PUBLISHER_ABI = load_json_file(f'{config_folder_prefix()}/VideoPublisher.abi.json')
VOTING_POWER_DELEGATOR_ABI = load_json_file(
f'{config_folder_prefix()}/VotingPowerDelegator.abi.json')
# Ethereum contract configuration / Governance
DISTRIBUTION_GAME_DAYS = getenv('DISTRIBUTION_GAME_DAYS', 7)
GAS_PRICE = int(getenv('GAS_PRICE', 20)) # in gwei
# Elastic Transcoder
elastic_transcoder = load_json_config('elastic_transcoder')
# potentially separate into classes
# then load with app.config.from_obj('config.Development')
#
# class Development:
# SECRET_KEY = getenv(...)
#
# class Production:
# SECRET_KEY = getenv(...)
| 2.3125 | 2 |
dependencies/src/4Suite-XML-1.0.2/Ft/Lib/DistExt/Version.py | aleasims/Peach | 0 | 12770257 | <filename>dependencies/src/4Suite-XML-1.0.2/Ft/Lib/DistExt/Version.py
import re
from distutils.version import Version, StrictVersion
__all__ = ['CommonVersion', 'VersionPredicate', 'SplitProvision',
'SplitComparison',
]
class CommonVersion(Version):
"""
Version numbering that handles most version numbering schemes.
Implements the standard interface for version number classes as
described by distutils.version.Version.
A version consists of an alternating series of release numbers followed
by an optional series of pre-release or post-release tags. A release
number is a series of dot-separated numeric components. Release tags are
a series of letters optionally followed by a release number. The
pre-release tag name is alphabetically before "final". The post-release
tag name is alphabetically greater than or equal to "final".
For example, "1.0b2.dev-r41475" could denote Subversion revision 41475 of
the in-development version of the second beta of release 1.0. Notice that
"dev" is a pre-release tag, so this version is a lower version number
than 1.0b2, which would be the actual second beta of release 1.0. But
the "-r41475" is a post-release tag, so this version is newer than
"1.0b2.dev".
"""
version_re = re.compile(r'\d+(\.\d+)*')
tag_re = re.compile(r'[_.-]?([a-zA-Z]+)?(\d+(?:\.\d)*)?')
# 'tag_aliases' maps release tags to the tag that should be used for
# comparison purposes.
tag_aliases = {'pr' : 'c',
'pre' : 'c',
'preview' : 'c',
'rc' : 'c',
}
def parse(self, vstring):
# save the original string for use by __str__
self._original = vstring
def versiontuple(vstring):
"""
Converts a dot-separated version number into a tuple of ints
with any trailing zeros removed.
"""
version = map(int, vstring.split('.'))
while version and not version[-1]:
del version[-1]
return tuple(version)
# Get the version number
match = self.version_re.match(vstring)
if not match:
raise ValueError("invalid version number: %r" % vstring)
self.version = versiontuple(match.group())
# Check for pre- and post-release tags
tags = []
start = match.end()
end = len(vstring)
while start < end:
match = self.tag_re.match(vstring, start)
if not match:
raise ValueError("invalid release tag: %r" % vstring[start:])
tag, version = match.groups()
tag = tag and tag.lower()
if tag in self.tag_aliases:
tag = self.tag_aliases[tag]
if version:
version = versiontuple(version)
else:
version = None
tags.append((tag, version))
start = match.end()
self.tags = tuple(tags)
return
def __str__(self):
return self._original
def __cmp__(self, other):
if isinstance(other, str):
other = self.__class__(other)
compare = cmp(self.version, other.version)
if compare == 0:
compare = cmp(self.tags, other.tags)
return compare
try:
from distutils.versionpredicate import VersionPredicate, \
split_provision as SplitProvision, \
splitUp as SplitComparison
except ImportError:
import operator
re_validPackage = re.compile(r"(?i)^\s*([a-z_]\w*(?:\.[a-z_]\w*)*)(.*)")
re_paren = re.compile(r"^\s*\((.*)\)\s*$") # (list) inside of parentheses
re_provision = re.compile(
"([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$")
re_splitComparison = re.compile(r"^\s*(<=|>=|<|>|!=|==)\s*([^\s,]+)\s*$")
compmap = {"<": operator.lt, "<=": operator.le, "==": operator.eq,
">": operator.gt, ">=": operator.ge, "!=": operator.ne}
class VersionPredicate:
"""
Parse and test package version predicates.
"""
def __init__(self, versionPredicateStr):
"""Parse a version predicate string."""
# Fields:
# name: package name
# pred: list of (comparison string, StrictVersion)
versionPredicateStr = versionPredicateStr.strip()
if not versionPredicateStr:
raise ValueError("empty package restriction")
match = re_validPackage.match(versionPredicateStr)
if not match:
raise ValueError("bad package name in %r" % versionPredicateStr)
self.name, paren = match.groups()
paren = paren.strip()
if paren:
match = re_paren.match(paren)
if not match:
raise ValueError("expected parenthesized list: %r" % paren)
str = match.groups()[0]
self.pred = [ SplitComparison(p) for p in str.split(",") ]
if not self.pred:
raise ValueError("empty parenthesized list in %r"
% versionPredicateStr)
else:
self.pred = []
def __str__(self):
if self.pred:
seq = [cond + " " + str(ver) for cond, ver in self.pred]
return self.name + " (" + ", ".join(seq) + ")"
else:
return self.name
def satisfied_by(self, version):
"""True if version is compatible with all the predicates in self.
The parameter version must be acceptable to the StrictVersion
constructor. It may be either a string or StrictVersion.
"""
for cond, ver in self.pred:
if not compmap[cond](version, ver):
return False
return True
# originally distutils.versionpredicate.split_provision()
def SplitProvision(value):
"""Return the name and optional version number of a provision.
The version number, if given, will be returned as a `StrictVersion`
instance, otherwise it will be `None`.
"""
value = value.strip()
m = re_provision.match(value)
if not m:
raise ValueError("illegal provides specification: %r" % value)
ver = m.group(2) or None
if ver:
ver = StrictVersion(ver)
return m.group(1), ver
# originally distutils.versionpredicate.splitUp()
def SplitComparison(pred):
"""Parse a single version comparison.
Return (comparison string, StrictVersion)
"""
res = re_splitComparison.match(pred)
if not res:
raise ValueError("bad package restriction syntax: %r" % pred)
comp, verStr = res.groups()
return (comp, StrictVersion(verStr))
| 2.359375 | 2 |
test/integration/test_main.py | RedHatOfficial/receptor | 6 | 12770258 | <filename>test/integration/test_main.py
import asyncio
import socket
from unittest.mock import patch
import pytest
import receptor
from receptor.config import ReceptorConfig
from receptor.receptor import Receptor
@pytest.fixture
def receptor_config(unused_tcp_port, tmpdir, type="node"):
return ReceptorConfig(
["--data-dir", tmpdir.strpath, type, "--listen", "127.0.0.1:" + str(unused_tcp_port)]
)
@pytest.fixture
def receptor_service(receptor_config):
return Receptor(config=receptor_config, node_id="A")
@pytest.fixture
def receptor_service_factory(unused_tcp_port_factory, tmpdir):
def _receptor_service(node_name, peer_ports=None, type="node"):
if peer_ports is None:
peer_ports = []
peers = {"127.0.0.1:{}".format(p): "" for p in peer_ports}
peer_config = []
for peer in peers:
peer_config.extend(["--peer", peer])
base_config = [
"--node-id",
node_name,
"--data-dir",
tmpdir.strpath,
type,
"--listen",
"127.0.0.1" + str(unused_tcp_port_factory()),
]
base_config.extend(peer_config)
receptor_config = ReceptorConfig(base_config)
return Receptor(receptor_config)
return _receptor_service
async def connect_port(receptor_obj):
n = 5
while n:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
node, port = receptor_obj.config.node_listen[0].split(":")
result = sock.connect_ex((node, int(port)))
if result != 0:
await asyncio.sleep(1)
n = n - 1
continue
break
receptor_obj.stop = True
async def wait_for_time(seconds):
await asyncio.sleep(seconds)
@patch("receptor.connection.sock.serve")
def test_main_node(mock_sock, event_loop, receptor_config):
c = receptor.Controller(receptor_config, loop=event_loop)
event_loop.call_soon(event_loop.create_task, connect_port(c.receptor))
c.enable_server(receptor_config.node_listen)
c.run()
mock_sock.assert_called_once()
| 2.296875 | 2 |
rest_framework_docs/compat.py | harwee/django-rest-framework-docs | 4 | 12770259 | try:
from django.urls import (
URLPattern,
URLResolver,
)
except ImportError:
# Will be removed in Django 2.0
from django.urls import (
RegexURLPattern as URLPattern,
RegexURLResolver as URLResolver,
)
# This is from the similarly named compat.py file of django-rest-framework 3.7
def get_regex_pattern(urlpattern):
"""
Get the raw regex out of the urlpattern's RegexPattern or RoutePattern.
This is always a regular expression, unlike get_original_route above.
"""
if hasattr(urlpattern, 'pattern'):
# Django 2.0
return urlpattern.pattern.regex.pattern
else:
# Django < 2.0
return urlpattern.regex.pattern
def is_url_resolver(instance):
return isinstance(instance, URLResolver)
def is_url_pattern(instance):
return isinstance(instance, URLPattern)
| 2.296875 | 2 |
mmdet/models/utils/token_transformer_block.py | sota-joson/KE-RCNN | 0 | 12770260 | from timm.models.layers.weight_init import trunc_normal_
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from einops import rearrange
from mmcv.cnn import build_conv_layer, kaiming_init
class FeatEmbed(nn.Module):
"""Image to Patch Embedding.
Args:
img_size (int | tuple): Size of input image.
patch_size (int): Size of one patch.
in_channels (int): Channel num of input features. Defaults to 3.
embed_dims (int): Dimensions of embedding. Defaults to 768.
conv_cfg (dict | None): Config dict for convolution layer. Defaults to
`dict(type='Conv2d')`.
"""
def __init__(self,
img_size,
patch_size,
in_channels=256,
embed_dims=256,
conv_cfg=dict(type='Conv2d')):
super().__init__()
self.img_size = _pair(img_size)
self.patch_size = _pair(patch_size)
num_patches = (self.img_size[1] // self.patch_size[1]) * (
self.img_size[0] // self.patch_size[0])
assert num_patches * self.patch_size[0] * self.patch_size[1] == \
self.img_size[0] * self.img_size[1], \
'The image size H*W must be divisible by patch size'
self.num_patches = num_patches
# Use conv layer to embed
self.projection = build_conv_layer(
conv_cfg,
in_channels,
embed_dims,
kernel_size=patch_size,
stride=patch_size)
self.init_weights()
def init_weights(self):
# Lecun norm from ClassyVision
kaiming_init(self.projection, mode='fan_in', nonlinearity='linear')
def forward(self, x):
x = self.projection(x).flatten(2)
x = rearrange(x, 'b d n -> b n d')
return x
| 2.265625 | 2 |
src/models.py | luisgc93/stock_reminder_bot | 26 | 12770261 | from datetime import datetime, timedelta
from os import environ
from peewee import (
BigIntegerField,
DateField,
DateTimeField,
CharField,
FloatField,
Model,
BooleanField,
InternalError,
)
from playhouse.db_url import connect
# Use default sqlite db in tests
db = connect(environ.get("DATABASE_URL") or "sqlite:///default.db")
class BaseModel(Model):
class Meta:
database = db
class Reminder(BaseModel):
user_name = CharField()
tweet_id = BigIntegerField()
created_on = DateField()
remind_on = DateTimeField()
stock_symbol = CharField()
stock_price = FloatField()
short = BooleanField(default=False)
is_finished = BooleanField(default=False)
class Meta:
table_name = "reminders"
def finish(self):
self.is_finished = True
self.save()
def refresh_from_db(self):
return Reminder.get_by_id(self.id)
@classmethod
def create_instance(cls, values):
with db.atomic() as transaction:
try:
Reminder.create(
user_name=values["user_name"],
tweet_id=values["tweet_id"],
created_on=values["created_on"],
remind_on=values["remind_on"],
stock_symbol=values["stock_symbol"],
stock_price=values["stock_price"],
short=values["short"],
)
except InternalError:
transaction.rollback()
@classmethod
def due_now(cls):
return cls.select().where(
cls.remind_on.between(
# TODO: I think this should rather fetch all reminders for today's date.
# If the job fails, upon retry, the reminder might not be fetched if
# it's outside of the 6 min window
datetime.now() - timedelta(minutes=3),
datetime.now() + timedelta(minutes=3),
),
cls.is_finished == False, # noqa
)
def migrate():
tables = db.get_tables()
if [Reminder] not in tables:
db.create_tables([Reminder])
if __name__ == "__main__":
migrate()
| 2.640625 | 3 |
scripts/002_activity_to_land_use_mapping.py | UrbanIntelligenceLab/Exposure-Density-and-Neighborhood-Disparities-in-COVID-19-Infection-Risk | 0 | 12770262 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: <NAME>
Description:
This PySPark scripts maps geolocated mobility data for valid users
to specific land use type where the activity occured and counts
number of unique users within each land use type aggregated to 250m x 250m
neighborhoods in New York City.
"""
# imports
from pyspark.sql.session import SparkSession
from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper
from pyspark.sql.types import * # import types
import numpy as np
from math import sin, cos, sqrt, atan2, radians
spark = SparkSession.builder.getOrCreate()
def distance_km(x1, y1, x2, y2):
# approximate radius of earth in km
R = 6373.0
lat1 = radians(y1)
lon1 = radians(x1)
lat2 = radians(y2)
lon2 = radians(x2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
return distance
# Load grid data
land_use = spark.read.parquet('/raster/grid_classification/parquet_grid_data/')
x_raster_step = 0.000009
y_raster_step = 0.000012
# Load venpath data activity
df = spark.read.parquet('<directory-to-mobility-data-on-HDFS>')
df = df.withColumn('ad_id_upper', upper(col('ad_id')))
# define boundaries extent
llc_lon = -74.2555954656
llc_lat = 40.4961100684
urc_lon = -73.7000071112
urc_lat = 40.9155259862
# subset data based on bounding box
nyc = df.filter((col('ad_id')!='00000000-0000-0000-0000-000000000000') \
& (col('lon')>=llc_lon) \
& (col('lon')<=urc_lon) \
& (col('lat')>=llc_lat) \
& (col('lat')<=urc_lat) )
# create date column
nyc = nyc.withColumn("date", to_date(col("timestamp")))
# find valid users based on number of days active
ad_id_count = nyc.groupby("ad_id_upper").agg(countDistinct("date").alias('day_count')).withColumnRenamed("ad_id_upper", "id")
ad_id_count_filtered = ad_id_count.filter((col("day_count")>14))
nyc = nyc.join(ad_id_count_filtered, nyc.ad_id_upper == ad_id_count_filtered.id, how='inner')
# cast raster cell indices
nyc = nyc.withColumn("x_raster_cell", ((nyc["lon"]-llc_lon) / x_raster_step).cast('integer'))
nyc = nyc.withColumn("y_raster_cell", ((nyc["lat"]-llc_lat) / y_raster_step).cast('integer'))
# join with land use raster
nyc = nyc.join(land_use, (nyc.x_raster_cell == land_use.x_cell) & (nyc.y_raster_cell == land_use.y_cell), how='left')
# calculate the extent of the bounding box in kilometers
xx = distance_km(llc_lon, np.mean([llc_lat, urc_lat]), urc_lon, np.mean([llc_lat, urc_lat]))
yy = distance_km(np.mean([llc_lon, urc_lon]), llc_lat, np.mean([llc_lon, urc_lon]), urc_lat)
# find number of 500 m cels in x and y dimension
x_grid = xx / 0.25
y_grid = yy / 0.25
# define the x and y step size in geographic coordinates
x_grid_step = (urc_lon - llc_lon)/x_grid
y_grid_step = (urc_lat - llc_lat)/y_grid
# assign cell x, y, coordiantes and index for each ping
nyc = nyc.withColumn("x_250m_cell", ((nyc["lon"]-llc_lon) / x_grid_step).cast('integer'))
nyc = nyc.withColumn("cell_250m_lon", llc_lon+nyc["x_250m_cell"]*x_grid_step+0.5*x_grid_step)
nyc = nyc.withColumn("y_250m_cell", ((nyc["lat"]-llc_lat) / y_grid_step).cast('integer'))
nyc = nyc.withColumn("cell_250m_lat", llc_lat+nyc["y_250m_cell"]*y_grid_step+0.5*y_grid_step)
nyc = nyc.withColumn('cell_index', concat(col("x_250m_cell"), lit(";"), col("y_250m_cell")))
# create hour column
nyc = nyc.withColumn("hour", date_format(col("timestamp").cast("timestamp"), "yyyy-MM-dd HH:00"))
# count cell aggregations and save to file
hourly_counts = nyc.groupby("hour", "cell_index", "class").agg(countDistinct("ad_id_upper"))
hourly_counts.write \
.format("com.databricks.spark.csv") \
.mode("overwrite") \
.save("/user/bjb417/covid/output/nyc/nyc_land_use/nyc_250mGrid_landUse_uniqueDev_hourlyCounts_active14days.csv")
# save 250m x 250m grid information
grid = nyc.select("cell_index", "x_250m_cell", "y_250m_cell", "cell_250m_lon", "cell_250m_lat") \
.drop_duplicates(subset=['cell_index'])
grid.write \
.format("com.databricks.spark.csv") \
.mode("overwrite") \
.save("/user/bjb417/covid/output/nyc/nyc_land_use/nyc_250mGrid_landUse_active14days.csv") | 2.859375 | 3 |
stevesie/resources/task_dependency.py | Stevesie/stevesie-py | 1 | 12770263 | <reponame>Stevesie/stevesie-py<filename>stevesie/resources/task_dependency.py
from typing import NamedTuple, Sequence
from datetime import datetime
from stevesie.remote_resource import RemoteResource
from stevesie.resources.task_collection_field import TaskCollectionField
class TaskDependencyTuple(NamedTuple):
id: str
variable_name: str
name: str
sample_value: str
created_at: datetime
TaskDependencyTuple.__new__.__defaults__ = (None,) * len(TaskDependencyTuple._fields)
class TaskDependency(TaskDependencyTuple, RemoteResource):
pass
| 2.09375 | 2 |
fat_checker_utils/image_and_manifest_preparation.py | dr-darryl-wright/fat_checker_utils | 0 | 12770264 | import os
import random
import argparse
import numpy as np
from PIL import Image, ImageDraw, ImageFont
def make_blank_placeholder(image_file, out_file):
#print(out_file)
image = np.asarray(Image.open(image_file))
blank = np.ones(image.shape)*255
blank = blank.astype(np.uint8)
#print(blank.shape)
im = Image.fromarray(blank)
draw = ImageDraw.Draw(im)
(x, y) = ((image.shape[0]//2)-50+random.randint(-10,10), (image.shape[1]//2)-50+random.randint(-10,10))
font = ImageFont.truetype('/Library/Fonts/Arial Bold.ttf', 45)
message = "No Data"
color = 'rgb(0, 0, 0)' # black color
draw.text((x, y), message, fill=color, font=font)
#im.convert('L')
im.save(out_file)
def reduce_quality(image_file):
im = Image.open(image_file)
im.save(image_file, quality=90)
def main():
parser = argparse.ArgumentParser(description='Process some images.')
parser.add_argument('--path', metavar='path', type=str,
help='path to images')
parser.add_argument('--volume_identifier', metavar='vol_id', type=str,
help='unique volume identifier e.g. 3R_ROI1')
args = parser.parse_args()
path = args.path
vol_id = args.volume_identifier
#old_dirpath=None
images = []
id_nums = []
'''
metadata = {'Raw Z resolution (nm)': 50,
'Raw XY resolution (nm)': 10,
'Volume ID': vol_id,
'default_frame': 3,
'#set': None}
'''
manifest = open(os.path.join(path+'manifest.csv'),'w')
manifest.write((',').join(['image1', 'image2', 'image3', 'image4', 'image5', 'Raw Z resolution (nm)', 'Raw XY resolution (nm)', 'Volume ID', 'default_frame', '#set\n']))
for (dirpath, dirnames, filenames) in os.walk(path):
#if dirpath != old_dirpath:
# images = []
# id_nums = []
for f in filenames:
'''
metadata = {'Raw Z resolution (nm)': 50,
'Raw XY resolution (nm)': 10,
'default_frame': 3}
'''
image_file = os.path.join(dirpath, f)
if '.DS_Store' in image_file:
continue
if '.csv' in image_file:
continue
#print(image_file)
#if 'ROI1' in image_file:
# reduce_quality(image_file)
file_stub = image_file.strip('.jpg')[:-3] + '%03d_blank.jpg'
id_num = image_file.strip('.jpg')[-3:]
if id_num == 'ank':
continue
if id_num == 'opy':
id_num = image_file.strip(' copy.jpg')[-3:]
file_stub = image_file.strip(' copy.jpg')[:-3] + '%03d_blank.jpg'
id_nums.append(int(id_num))
images.append(image_file)
if images == []:
continue
sorted_images = [x for _,x in sorted(zip(id_nums,images))]
id_nums.sort()
make_blank_placeholder(images[0], file_stub%(id_nums[0]-2))
make_blank_placeholder(images[0], file_stub%(id_nums[0]-1))
make_blank_placeholder(images[0], file_stub%(id_nums[-1]+1))
make_blank_placeholder(images[0], file_stub%(id_nums[-1]+2))
images = [file_stub%(id_nums[0]-2), file_stub%(id_nums[0]-1)] + \
sorted_images + \
[file_stub%(id_nums[-1]+1), file_stub%(id_nums[-1]+2)]
#print(len(images))
for i in range(2,len(images)-2, 1):
#print(len(images[i-2:i+3]))
print(images[i-2:i+3])
#print(','.join(images[i-2:i+2]))
manifest.write((',').join([im.split('/')[-1] for im in images[i-2:i+3]]+['50', '10', vol_id, '3', dirpath.split('/')[-1]])+'\n')
if __name__ == '__main__':
main()
| 3.078125 | 3 |
sentiment/app.py | sethah/sentiment-explorer | 0 | 12770265 | import pickle
import logging
import hashlib
import numpy as np
import os
from pathlib import Path
import spacy
import shutil
import sys
import tarfile
import tempfile
import torch
from typing import Dict, List
sys.path.append("nbsvm")
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
from lime.lime_text import LimeTextExplainer
from allennlp.models.archival import load_archive
from allennlp.data import Vocabulary
from allennlp.data.dataset_readers import DatasetReader
from flask import Flask, request, Response, jsonify, render_template, send_from_directory
logging.basicConfig(level=logging.INFO)
stemmer = SnowballStemmer('english')
stopWords = set(stopwords.words('english'))
class LemmaTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, articles):
return [stemmer.stem(self.wnl.lemmatize(t)) for t in word_tokenize(articles) if t not in stopWords]
# this was done to make sure the model unpickles correctly (may not actually be necessary)
setattr(sys.modules["__main__"], LemmaTokenizer.__name__, LemmaTokenizer)
class LimePredictor(object):
def __init__(self, idx2label: Dict[int, str]):
self.idx2label = idx2label
self.label2idx = {v: k for k, v in idx2label.items()}
self.class_names = [idx2label[i] for i in range(len(self.idx2label))]
def predict(self, text: str) -> Dict[str, np.ndarray]:
raise NotImplementedError
def predict_batch(self, texts: List[str]) -> np.ndarray:
raise NotImplementedError
class NBSVMLimePredictor(LimePredictor):
def __init__(self, model_path: str):
model_path = Path(model_path)
with open(str(model_path), "rb") as f:
self.model = pickle.load(f)
nbsvm = self.model.steps[1][1]
nbsvm.predict_proba = nbsvm._predict_proba_lr
self.idx2label = {i: l for i, l in enumerate(nbsvm.classes_.tolist())}
super(NBSVMLimePredictor, self).__init__(self.idx2label)
def predict(self, text: str) -> Dict[str, np.ndarray]:
out = {}
out['label'] = self.model.predict([text])[0]
logits = self.model.predict_proba([text])[0]
out['logits'] = logits
out['probs'] = logits
return out
def predict_batch(self, texts: List[str]) -> np.ndarray:
return self.model.predict_proba(texts)
class AllenNLPLimePredictor(LimePredictor):
def __init__(self, archive_path: str, device: int = -1, batch_size: int = 32):
archive_path = Path(archive_path)
archive = load_archive(archive_path)
self.params = archive.config
self.model = archive.model.eval()
self.batch_size = batch_size
self.reader = DatasetReader.from_params(self.params.get("dataset_reader"))
self.vocab = self._load_vocab(archive_path)
self.idx2label = self.vocab.get_index_to_token_vocabulary('labels')
if device != -1:
self.model.to(f"cuda:{device}")
super(AllenNLPLimePredictor, self).__init__(self.idx2label)
@staticmethod
def _load_vocab(archive_path: Path) -> Vocabulary:
# an annoying hack to load the vocab file
tempdir = tempfile.mkdtemp()
with tarfile.open(archive_path, 'r:gz') as _archive:
_archive.extractall(tempdir)
vocab_path = Path(tempdir) / "vocabulary"
vocab = Vocabulary.from_files(vocab_path)
shutil.rmtree(tempdir)
return vocab
def predict(self, text: str) -> Dict[str, np.ndarray]:
return self.model.forward_on_instance(self.reader.text_to_instance(text))
def predict_batch(self, texts: List[str]) -> np.ndarray:
with torch.no_grad():
instances = [self.reader.text_to_instance(t) for t in texts]
instance_chunks = [instances[x: x + self.batch_size] for x in
range(0, len(instances), self.batch_size)]
preds = []
for batch in instance_chunks:
pred = self.model.forward_on_instances(batch)
preds.extend(pred)
probs = [p['probs'] for p in preds]
return np.stack(probs, axis=0)
class ServerError(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
error_dict = dict(self.payload or ())
error_dict['message'] = self.message
return error_dict
app = Flask(__name__) # pylint: disable=invalid-name
# We hash the javascript file and use it as a cache breaker
hasher = hashlib.md5()
app_js = open("static/app.js")
hasher.update(app_js.read().encode('utf-8'))
js_hash = hasher.hexdigest()
nlp = spacy.load('en_core_web_sm', disable=['vectors', 'textcat', 'tagger', 'ner'])
# nlp.add_pipe(nlp.create_pipe('sentencizer'))
split_expr = lambda text: [sent.string.strip() for sent in nlp(text).sents]
home_path = Path(os.environ.get("HOME", "."))
nbsvm_predictor = NBSVMLimePredictor(home_path / ".models/nbsvm_imdb_sent_500.pkl")
device = 0 if torch.cuda.is_available() else -1
bert_predictor = AllenNLPLimePredictor(home_path / ".models/bert_base_1000.tar.gz", device=device)
nbsvm_explainer = LimeTextExplainer(class_names=nbsvm_predictor.class_names,
bow=True, split_expression=split_expr)
bert_explainer = LimeTextExplainer(class_names=bert_predictor.class_names,
bow=False, split_expression=split_expr)
models = {
'bert': {'explainer': bert_explainer, 'predictor': bert_predictor},
'nbsvm': {'explainer': nbsvm_explainer, 'predictor': nbsvm_predictor}
}
@app.errorhandler(ServerError)
def handle_invalid_usage(error: ServerError) -> Response: # pylint: disable=unused-variable
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/')
def index() -> Response: # pylint: disable=unused-variable
return render_template(
'app.html',
google_analytics_ua="UA-120916510-5", # TODO:don't hardcode this!
js_hash=js_hash
)
@app.route('/static/<path:path>')
def static_proxy(path: str) -> Response: # pylint: disable=unused-variable
return send_from_directory('static', path)
@app.route('/predict', methods=['POST', 'OPTIONS'])
def predict() -> Response: # pylint: disable=unused-variable
if request.method == "OPTIONS":
return Response(response="", status=200)
data = request.get_json()
previous_str = data["previous"]
# Log the query
app.logger.info(f"<{previous_str}>")
lime_tokens = split_expr(previous_str)
model_name = data.get("model_name", "BERT").lower()
predictor = models[model_name]['predictor']
explainer = models[model_name]['explainer']
app.logger.info(f"Using model {model_name}")
out = predictor.predict(previous_str)
class_probabilities = out['probs'].tolist()
label = out['label']
explanation = explainer.explain_instance(previous_str, predictor.predict_batch,
num_features=10, labels=[1], num_samples=100)
score_dict = dict(explanation.as_list(1))
lime_scores = [score_dict.get(tok, 0.) for tok in lime_tokens]
if predictor.label2idx['neg'] != 0:
# we need to reverse the lime scores
lime_scores = [-1 * score for score in lime_scores]
# make sure class probabilities are always consistently ordered
class_probabilities = [class_probabilities[predictor.label2idx[lbl]] for lbl in ['neg', 'pos']]
app.logger.info(label)
app.logger.info(lime_scores)
app.logger.info(lime_tokens)
app.logger.info(class_probabilities)
return jsonify({
"lime_scores": lime_scores,
"lime_tokens": lime_tokens,
"label": label,
"class_probabilities": class_probabilities,
"words": lime_tokens,
"output": previous_str,
"sentiment": label
})
if __name__ == "__main__":
app.run(host='0.0.0.0', threaded=False)
| 2.1875 | 2 |
listas/media_alunos.py | fernando-datageo/Python | 0 | 12770266 | <filename>listas/media_alunos.py
# -*- coding: utf-8 -*-
"""
Programa que recebe como entrada dois arquivos:
O primeiro arquivo contém nomes de alunos
O segundo arquivo contém as notas dos alunos
E será gerado um terceiro arquivo contendo as médias.
"""
def acertarNotas(aluno,nota):
f1 = open(aluno,"r") # Abre no modo leitura o arquivo com os nomes
f2 = open(nota,"r") # Abre no modo leitura o arquivo com as notas
listanota = [] # Cria uma lista a ser preenchida com as notas
texto = f1.readlines() # Lê todas as linahs do arquivo nota
for i in texto:
notas = f2.readline().split() # separa as notas em linha em lista de strings
valores = [float(val) for val in notas] # Transforma as notas em valores float
media = sum(valores) / len(valores) # Soma os valores e cria uma média sobre eles
todos= i+" "+str(notas)+" "+str(media)+"\n" # Cria uma lista de nomes concatenados com suas médias
listanota.append(todos) # Adiciona a lista de notas
f1.close()
f2.close()
arquivo=open('listamedias','w') # Cria uma arquivo para salvar as execuções realizadas
arquivo.writelines(listanota) # Transcreve as informações no arquivo
arquivo.close()
return
acertarNotas("aluno.csv","nota.csv") # Chama afunção criada com os arquivos externos | 3.578125 | 4 |
src/masonite/managers/QueueManager.py | Abeautifulsnow/masonite | 1 | 12770267 | <filename>src/masonite/managers/QueueManager.py<gh_stars>1-10
"""Queue Manager Module."""
from ..contracts import QueueManagerContract
from .Manager import Manager
class QueueManager(Manager, QueueManagerContract):
"""Manages all queue drivers.
Arguments:
Manager {from .managers.Manager} -- The base Manager class.
"""
config = "queue"
driver_prefix = "Queue"
class Queue:
"""Dummy class that will be used to swap out the manager in the container."""
pass
| 2.40625 | 2 |
maestrowf/abstracts/containers/__init__.py | kennyweiss/maestrowf | 90 | 12770268 | ###############################################################################
# Copyright (c) 2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
# Written by <NAME>, <EMAIL>.
#
# LLNL-CODE-734340
# All rights reserved.
# This file is part of MaestroWF, Version: 1.0.0.
#
# For details, see https://github.com/LLNL/maestrowf.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
"""Module that defines containers for storing various types of information."""
class Record(object):
"""A container class for holding general information."""
def __init__(self):
"""Initialize an empty Record."""
self._info = {}
def get(self, key, default=None):
"""
Get information by key in a record.
:param key: The key to look up in a Record's stored information.
:param default: The default value to return if the key is not found
(Default: None).
:returns: The information labeled by parameter key. Default if key does
not exist.
"""
return self._info.get(key, default)
| 1.203125 | 1 |
victim/models.py | knowapi/DeveloperPortalExamples | 2 | 12770269 | from django.db import models
from operation.models import Operation
from processor.utils import push_record_to_sqs_queue
import logging
SAFETY_LEVELS = (
(0, 'SAFE'),
(1, 'NOT CONFIRMED'),
(2, 'UNREACHABLE'),
(3, 'NEED_HELP'),
(4, 'NOT IN ZONE')
)
class Victim(models.Model):
"""
Used to store refugee information
"""
name = models.CharField(max_length=64)
phone_number = models.CharField(max_length=20, unique=True)
notification_contact_number = models.CharField(max_length=20, blank=True)
safety_level = models.IntegerField(choices=SAFETY_LEVELS, default=1)
retry_count = models.IntegerField(default=0)
location = models.TextField(null=True)
additional_information = models.TextField(null=True)
status_updated_by = models.TextField(null=True)
operation = models.ForeignKey(Operation,blank=True,default=None)
def save(self, *args, **kwags):
super(Victim, self).save(*args, **kwags)
logging.info('Added a new refugee with ID = %d' % self.id)
push_record_to_sqs_queue(self.id)
| 2.078125 | 2 |
app/routes/actor.py | jabertuhin/dvdrental-backend | 1 | 12770270 | from fastapi import APIRouter, Depends
from app.dtos.responses.actor import ActorsDto, ActorDto
from app.services.actor_service import ActorService
from app.services.implementations.actor_service_implementation import (
ActorServiceImplementation,
)
router = APIRouter(tags=["Actor Resource"])
@router.get(path="/actors", response_model=ActorsDto)
async def get_actors(
actor_service: ActorService = Depends(ActorServiceImplementation),
) -> ActorsDto:
return await actor_service.get_all_actors()
@router.get(path="/actors/{actor_id}", response_model=ActorDto)
async def get_actor(
actor_id: int, actor_service: ActorService = Depends(ActorServiceImplementation)
) -> ActorDto:
return await actor_service.get_actor(actor_id=actor_id)
| 2.328125 | 2 |
abduct/stream.py | movermeyer/python-abduct | 4 | 12770271 | <filename>abduct/stream.py
import sys
from contextlib2 import contextmanager
from abduct.compat import StringIO
def stdout(release_on_exception=False, tee=False):
return make_stream_context('stdout', release_on_exception, tee)
def stderr(release_on_exception=False, tee=False):
return make_stream_context('stderr', release_on_exception, tee)
def make_stream_context(stream_name, release_on_exception, tee):
real_stream = getattr(sys, stream_name)
fake_stream = TeeStream((real_stream,)) if tee else StringIO()
@contextmanager
def context():
try:
setattr(sys, stream_name, fake_stream)
yield fake_stream
except Exception:
if release_on_exception and not tee:
real_stream.write(fake_stream.getvalue())
raise
finally:
setattr(sys, stream_name, real_stream)
return context()
class TeeStream(object):
def __init__(self, target_streams):
self.__impl = StringIO()
self.__target_streams = tuple(target_streams) + (self.__impl,)
def __getattr__(self, name):
return getattr(self.__impl, name)
def __for_each_target(self, method, *args, **kwargs):
for t in self.__target_streams:
getattr(t, method)(*args, **kwargs)
def flush(self):
self.__for_each_target('flush')
def write(self, s): # pylint: disable=invalid-name
self.__for_each_target('write', s)
def writelines(self, iterable):
for i in iterable:
self.write(i)
| 2.5 | 2 |
src/thex/app.py | harris-2374/THEx | 0 | 12770272 | <gh_stars>0
import dash
import dash_bootstrap_components as dbc
from flask import cli
cli.show_server_banner = lambda *_: None
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(
__name__,
external_stylesheets = [dbc.themes.DARKLY],
suppress_callback_exceptions = True,
serve_locally = True,
)
app.title = "THEx"
server = app.server
| 1.75 | 2 |
codility/countingElements/permCheck.py | j-dags/Algos | 0 | 12770273 | # A non-empty array A consisting of N integers is given.
# A permutation is a sequence containing each element from 1 to N once, and only once.
# For example, array A such that:
# A[0] = 4
# A[1] = 1
# A[2] = 3
# A[3] = 2
# is a permutation, but array A such that:
# A[0] = 4
# A[1] = 1
# A[2] = 3
# is not a permutation, because value 2 is missing.
# The goal is to check whether array A is a permutation.
# Write a function that, given an array A, returns 1 if array A is a permutation and 0 if it is not.
# Write an efficient algorithm for the following assumptions:
# N is an integer within the range [1..100,000];
# each element of array A is an integer within the range [1..1,000,000,000].
# O(n)t | O(n)s
def permCheck(A):
memo = {}
limit = len(A)
for element in A:
if not 1 <= element <= limit: return 0
else:
if element in memo: return 0
else:
memo[element] = True
return 1
| 3.890625 | 4 |
src/model/domain.py | ajaykumarsampath/microgrid-model | 1 | 12770274 | <gh_stars>1-10
from dataclasses import dataclass
from functools import cached_property
from typing import List
import numpy as np
from model.component_interface import IComponent, IGridNetwork
from model.generator_interface import IGeneratorComponent
from shared.component import BUS_ID
UNIT_BUS_ID_MAP = [str, BUS_ID]
SEC_TO_HOUR_FACTOR = 1 / 3600
@dataclass(frozen=True)
class MicrogridModelData:
name: str
generators: List[IGeneratorComponent]
loads: List[IComponent]
grid_model: IGridNetwork
generator_bus_ids: List[BUS_ID]
load_bus_ids: List[BUS_ID]
@cached_property
def model_bus_ids(self) -> List[BUS_ID]:
unique_bus = []
for i, e in enumerate(self.generator_bus_ids + self.load_bus_ids):
if e not in unique_bus:
unique_bus.append(e)
return unique_bus
@cached_property
def valid_data(self):
grid_buses = self.grid_model.buses
try:
self._check_non_unique_ids(self.generators)
self._check_non_unique_ids(self.loads)
assert len(self.generators) == len(self.generator_bus_ids)
assert len(self.loads) == len(self.load_bus_ids)
assert len(grid_buses) == len(self.model_bus_ids)
assert all([bus in self.model_bus_ids for bus in grid_buses])
return self.grid_model.validate_grid_model()
except AssertionError:
return False
def _check_non_unique_ids(self, components: List[IComponent]):
component_names = [c.name for c in components]
assert len(component_names) == len(set(component_names))
def unit_bus_matrix(self):
num_generators = len(self.generator_bus_ids)
num_loads = len(self.load_bus_ids)
cols_unit_bus_mat = len(self.model_bus_ids)
_unit_bus_mat = np.zeros((num_generators + num_loads, cols_unit_bus_mat))
for count, bus_id in enumerate(self.generator_bus_ids):
bus_id_index = self.model_bus_ids.index(bus_id)
_unit_bus_mat[count, bus_id_index] = 1
for count, bus_id in enumerate(self.load_bus_ids):
bus_id_index = self.model_bus_ids.index(bus_id)
_unit_bus_mat[count + num_generators, bus_id_index] = 1
return _unit_bus_mat
| 2.265625 | 2 |
Server/PythonCrawl/src/main.py | yodebu/Interview-Street | 0 | 12770275 | <gh_stars>0
#!/usr/bin/env python
'''
--------------------------------------------------
--------------------------------------------------
@ Module : Main Module
@ Name: GeeksForGeeks Article Extractor
@ Purpose: To download and save articles filed under each and every tag mentioned in www.geeksforgeeks.org
@ Author: <NAME>
Dept of CSE, NIT Durgapur
V1.0 - 06.02.2015 - basic implementation
# MIT License - used for non-commercial purposes, used for college project work
# Special thanks to - GeeksForGeeks.org
--------------------------------------------------
--------------------------------------------------
'''
import os
from bs4 import BeautifulSoup
from optparse import OptionParser
import crawler
from crawler import *
import dbconn
from dbconn import *
#parser to parse and pass arguements into the Program
def parse_options():
usage = "usage: prog [options] (arg1, arg2, ... argn)"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--tag", \
type="string", \
action="store", \
dest="inp_tag", \
default = "", \
help="input search tags for downloading from the website")
parser.add_option("-n", "--name", \
type="string", \
action="store", \
dest="inp_name", \
default = "", \
help="Enter your name to be stored in the database")
parser.add_option("-e", "--email", \
type="string", \
action="store", \
dest="inp_email", \
default = "", \
help="Enter your email to be stored in the database")
parser.add_option("-l", "--location", \
type= "string", \
action= "store", \
dest= "inp_location", \
default = "/home/yodebu/Desktop/Project/Interview-Street/Server/Files/", \
help= "location where downloaded files willl be stored, update this according to your directory")
opts, args = parser.parse_args()
return opts, args
##-----------------------------------------------------
## MAIN PROGRAM
# main function
def main():
# parse the input parameters
opts, args = parse_options()
Tag = opts.inp_tag
email = opts.inp_email
path = opts.inp_location
name = opts.inp_name
dbSave(name, email, Tag)
ExtractMainLinks(Tag, path)
if __name__ == "__main__": main()
| 2.75 | 3 |
ucb/raw_ucb.py | Kylin824/federated-learning | 0 | 12770276 | import numpy as np
import matplotlib.pyplot as plt
# 计算delta
def calculate_delta(t, chosen_count, item):
if chosen_count[item] == 0:
return 1
else:
return np.sqrt(2 * np.log(t) / chosen_count[item])
def choose_arm(upper_bound_probs):
max = np.max(upper_bound_probs)
idx = np.where(upper_bound_probs == max) # 返回tuple,包含符合条件值的下标
idx = np.array(idx[0]) # 转为array
if np.size(idx) == 1:
return idx[0]
else:
return np.random.choice(idx, 1)[0]
def train():
# 时间
T = []
# 可选的臂(根据数据)
num_arms = 10
# 总回报
total_reward = 0
total_best_reward = 0
total_reward_with_T = []
total_regret_with_T = []
np.random.seed(23)
true_rewards_prop = np.random.uniform(low=0, high=1, size=num_arms) # 每个老虎机真实的吐钱概率
true_max_prop_arm = np.argmax(true_rewards_prop)
print("true reward prop: \n", true_rewards_prop)
print("\ntrue_max_prop_arm: ", true_max_prop_arm)
estimated_rewards = np.zeros(num_arms) # 每个老虎机吐钱的观测概率,初始都为0
chosen_count = np.zeros(num_arms) # 每个老虎机当前已经探索的次数,初始都为0
# for i in range(10):
# choosen_arm = i % 10
# reward = np.random.binomial(n=1, p=true_rewards_prop[choosen_arm])
# best_reward = np.random.binomial(n=1, p=true_rewards_prop[true_max_prop_arm])
#
# total_reward += reward
# total_best_reward += best_reward
# T.append(i)
# total_reward_with_T.append(total_reward)
# total_regret_with_T.append(total_best_reward - total_reward)
#
# if i < 10:
# estimated_rewards[choosen_arm] = reward
# else:
# # estimated_rewards[choosen_arm] = ((i - 1) * estimated_rewards[choosen_arm] + reward) / i
# estimated_rewards[choosen_arm] = (chosen_count[choosen_arm] * estimated_rewards[choosen_arm] + reward) / (
# chosen_count[choosen_arm] + 1)
# chosen_count[choosen_arm] += 1
print("\ninit estimated reward: ")
print(estimated_rewards)
# 初始化
for t in range(0, 20000):
upper_bound_probs = [estimated_rewards[item] + calculate_delta(t, chosen_count, item) for item in
range(num_arms)]
# 选择最大置信区间上界的arm
# choosen_arm = np.argmax(upper_bound_probs)
choosen_arm = choose_arm(upper_bound_probs)
reward = np.random.binomial(n=1, p=true_rewards_prop[choosen_arm])
best_reward = np.random.binomial(n=1, p=true_rewards_prop[true_max_prop_arm])
total_reward += reward
total_best_reward += best_reward
T.append(t)
total_reward_with_T.append(total_reward)
total_regret_with_T.append(total_best_reward - total_reward)
# 更新每个老虎机的吐钱概率
# estimated_rewards[choosen_arm] = ((t - 1) * estimated_rewards[choosen_arm] + reward) / t
estimated_rewards[choosen_arm] = (chosen_count[choosen_arm] * estimated_rewards[choosen_arm] + reward) / (
chosen_count[choosen_arm] + 1)
chosen_count[choosen_arm] += 1
# if t % 200 == 0:
# print("estimated reward: ")
# print(estimated_rewards)
print("\ntotal reward: ", total_reward)
print("\nbest reward: ", total_best_reward)
print("\nestimated reward: ")
print(estimated_rewards)
print("\nchoosen arm: ", chosen_count)
# CTR趋势画图
plt.xlabel("T")
plt.ylabel("Total regret")
plt.plot(T, total_regret_with_T)
# 存入路径
plt.savefig('./regret1.png')
if __name__ == "__main__":
# 训练
train()
| 3.203125 | 3 |
get_free_proxy/self/SelfEnum.py | zwzw911/get-free-proxy | 0 | 12770277 | <gh_stars>0
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'zwzw911'
from enum import Enum, unique
# # 使用url作为value,以便检测是否需要代理才能连接
# @unique
# class SupportedWeb(Enum):
# Xici = 'https://www.xicidaili.com'
# Kuai = 'https://www.kuaidaili.com/free'
# Hidemy = 'https://hidemy.name/en/proxy-list/#list'
# Proxylist = 'https://proxy-list.org/english'
# All = 4
# site会把enum转换成list,检测是否需要代理直接使用list中的元素
@unique
class SupportedWeb(Enum):
Xici = 0
Kuai = 1
Hidemy = 2
Proxylist = 3
All = 4
@unique
class StorageType(Enum):
Redis = 0
Mysql = 1
File = 2
All = 3
@unique
class ProxyType(Enum):
# 透明:对方服务器知道你使用了代理,也知道你的真实IP。
# REMOTE_ADDR = ProxyIP,HTTP_VIA = ProxyIP,HTTP_X_FORWARDED_FOR = YourIP
TRANS = 0
# 匿名:对方服务器知道你使用了代理,但不知道你的真实IP。
# REMOTE_ADDR = ProxyIP,HTTP_VIA = ProxyIP,HTTP_X_FORWARDED_FOR = ProxyIP
ANON = 1
# 高匿名:对方服务器不知道你使用了代理,也不知道你的真实IP。
# REMOTE_ADDR = ProxyIP,HTTP_VIA = NULL,HTTP_X_FORWARDED_FOR = NULL
HIGH_ANON = 2
All = 3
@unique
class ProtocolType(Enum):
HTTP = 0
HTTPS = 1
SOCKS4 = 2
SOCKS5 = 3
# SOCKS = 4
All = 5
@unique
# sort -u a | awk '{print $1 " = " NR}'
class Country(Enum):
Argentina = 1
Australia = 2
Bangladesh = 3
Botswana = 4
Brazil = 5
Cambodia = 6
Cameroon = 7
China = 8
Colombia = 9
Czech = 10
Denmark = 11
Ecuador = 12
Germany = 13
Greece = 14
Hong = 15
Hungary = 16
India = 17
Indonesia = 18
Iraq = 19
Italy = 20
Japan = 21
Kazakhstan = 22
Latvia = 23
Malaysia = 24
Mexico = 25
Mongolia = 26
Nepal = 27
Pakistan = 28
Peru = 29
Philippines = 30
Russia = 31
Sweden = 32
Syrian = 33
Thailand = 34
Turkey = 35
Ukrain = 36
United = 37
All = 38
if __name__ == '__main__':
a = 'Xici'
print(type(SupportedWeb.Xici)) | 2.34375 | 2 |
source/39-Sequência_de_Collatz_mais_longa.py | FelixLuciano/DesSoft-2020.2 | 0 | 12770278 | # Sequência de Collatz mais longa
# Considere a seguinte sequência iterativa definida para os números inteiros positivos:
# \begin{align}
# n &\rightarrow n/2 (n\text{ é par}) \\
# n & \rightarrow 3n+1 (n\text{ é ímpar})
# \end{align}
# Usando a regra acima e começando com o número 13, geramos a seguinte sequência:
# 13 -> 40 -> 20 -> 10 -> 5 -> 16 -> 8 -> 4 -> 2 -> 1
# Percebe-se que essa sequência (começando em 13 e terminando em 1) contém 10 termos. Apesar de ainda não ter sido provado (Problema de Collatz), acredita-se que a sequência sempre termina em 1, independentemente do número inicial.
# Faça um programa que determina qual número positivo inicial menor que 1000 gera a sequência de Collatz mais longa. Seu programa deve imprimir esse número.
# Nota: Uma vez que a sequência começa os números podem passar de 1000.
# Adaptado de https://projecteuler.net/problem=14
def is_odd (number):
return bool(number % 2)
max_n = 0
count = 0
n0 = 0
n = n0
while n0 < 1000:
counting = 0
n0 += 1
n = n0
while n > 1:
if is_odd(n):
n = 3 * n + 1
else:
n = n / 2
counting += 1
if counting > count:
max_n = n0
count = counting
print(max_n)
| 4.03125 | 4 |
itch_dl/consts.py | DragoonAethis/ItchJamDownloader | 3 | 12770279 | <filename>itch_dl/consts.py
ITCH_BASE = "itch.io"
ITCH_URL = f"https://{ITCH_BASE}"
ITCH_API = f"https://api.{ITCH_BASE}"
# Extracts https://user.itch.io/name to {'author': 'user', 'game': 'name'}
ITCH_GAME_URL_REGEX = r"^https:\/\/(?P<author>[\w\d\-_]+).itch.io\/(?P<game>[\w\d\-_]+)$"
ITCH_BROWSER_TYPES = [
"games",
"tools",
"game-assets",
"comics",
"books",
"physical-games",
"soundtracks",
"game-mods",
"misc",
]
| 1.984375 | 2 |
Django 3 By Example-Book/My Shop/shop/models.py | ibnshayed/Python-Programming | 0 | 12770280 | <gh_stars>0
from django.db import models
from django.urls import reverse
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, unique=True)
class Meta:
ordering = ('name',)
verbose_name = 'category' # django automatically do from class name
verbose_name_plural = 'categories' # django automatically do from class name + s
def __str__(self):
return self.name
def get_absolute_url(self):
# reverse("app_name: inside urls.py path(name)",args=[list of args inside path() in urls.py])
return reverse('shop:product_list_by_category',args=[self.slug])
class Product(models.Model):
category = models.ForeignKey(Category,
related_name='products',
on_delete=models.CASCADE)
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, db_index=True)
image = models.ImageField(upload_to='products/%Y/%m/%d',blank=True)
description = models.TextField(blank=True)
price = models.DecimalField(max_digits=10, decimal_places=2) # use DecimalField instead of FloatField to avoid rounding issues.
available = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('name',)
index_together = (('id', 'slug'),)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shop:product_detail',args=[self.id, self.slug]) | 2.25 | 2 |
Class_buildSample.py | ms-saleh/SampleGenerators | 0 | 12770281 | <filename>Class_buildSample.py
# -*- coding: utf-8 -*-
"""
Created on Fr Mar 12 13:36:25 2021
@author: <NAME> @ Cornell.<EMAIL>
Get the design of experiment for micro channe arrays, build the geometery in
nTopology Platform and create buildfiles in Nanoscribe
"""
import os
import subprocess
import json
import shutil
from PIL import Image, ImageDraw, ImageFont
from getpass import getpass
class buildSample(object):
exePath = r"C:/Program Files/nTopology/nTopology/ntopCL.exe"
cleanUp = True
blockNumbers = 0
def __init__(self,path):
self.path = path
self.setOutputPath(path)
self.setSTLPath(self.outputPath+"\\STL")
self.setBuildPath(self.outputPath+"\\BuildFiles")
def setOutputPath(self, outputPath):
self.outputPath=outputPath
def setSTLPath(self, STLPath):
self.STLPath=STLPath
def setBuildPath(self, buildPath):
self.buildPath=buildPath
def setSampleName(self,sampleName):
self.sampleName=sampleName
def summary(self):
print('{:>20} {}'.format('Sample Name:',self.sampleName))
print('{:>20} {}'.format('Costum nTop Block:',self.customBlock))
print('{:>20} {}'.format('Output Path:',self.outputPath))
print('{:>20} {}'.format('STL Path:',self.STLPath))
print('{:>20} {}'.format('Build Files Path:',self.buildPath))
def readDOE(self):
#look for DOE file
for fname in os.listdir(self.path):
if fname.endswith(".csv") and fname[:6]=="Sample":
self.setSampleName(fname[:-4])
#import the DOE dimensions from the Sample##.CSV
with open(self.sampleName+'.csv',mode='r') as f:
self.setDOE(f.readlines())
def setDOE(self,DOE):
self.DOE=DOE
def readCustomBlock(self):
for fname in os.listdir(self.path):
if fname.endswith(".ntop") and fname[:3]=="CB_":
self.setCustomBlock(fname)
def setCustomBlock(self,nTop):
self.customBlock=self.path+'\\'+nTop
def setMeshMergeBlock(self):
self.customBlock=self.path+'\\'+"MeshMerge.ntop"
def setRecipe(self,recipe):
self.recipe=self.path+'\\'+recipe
def nTopTemplate(self):
# Generate template for MicroChannel nTop
Arguments = [self.exePath] #nTopCL path
Arguments.append("-u") #username argument
Arguments.append(os.environ.get('nTop_user')) #nTop username
Arguments.append("-w") #password argument
Arguments.append(os.environ.get('nTop_pass')) #nTop pass
Arguments.append("-t") #json template argument
Arguments.append(self.customBlock) #.ntop notebook file path
#nTopCL call with arguments
#print(" ".join(Arguments))
output,error = subprocess.Popen(Arguments,stdout = subprocess.PIPE,
stderr= subprocess.PIPE).communicate()
#Print the return messages
print(output.decode("utf-8"))
def nTopRun(self,jsonFile,nTopFile):
# Generate template for MicroChannel nTop
Arguments = [self.exePath] #nTopCL path
Arguments.append("-u") #username argument
Arguments.append(os.environ.get('nTop_user')) #nTop username
Arguments.append("-w") #password argument
Arguments.append(os.environ.get('nTop_pass')) #nTop pass
Arguments.append("-j") #json input argument
Arguments.append(jsonFile) #input json file
Arguments.append("-o") #output argument
Arguments.append(self.path+"\\"+"out.json") #output json path
Arguments.append(nTopFile) #.ntop notebook file path
#nTopCL call with arguments
#print("\n".join(Arguments))
output,error = subprocess.Popen(Arguments,stdout = subprocess.PIPE,
stderr= subprocess.PIPE).communicate()
#Print the return messages
print(output.decode("utf-8"))
def createuChannelInputJSON(self):
try:
with open(self.path+"\\input_template.json") as f:
Inputs_JSON = json.load(f)
except:
self.nTopTemplate()
with open(self.path+"\\input_template.json") as f:
Inputs_JSON = json.load(f)
self.json=[]
for index1, Line in enumerate(self.DOE):
Dim = Line.strip().split(",")
Inputs_JSON['inputs'][0]['value']=self.STLPath+'\\'+'uChannel_'+str(index1)+'.stl'
self.json.append(self.path+"\\"+"input_"+str(index1)+".json")
for index2, item in enumerate(Inputs_JSON['inputs'][1:]):
item['value']=float(Dim[index2])
with open(self.json[index1], 'w') as outfile:
json.dump(Inputs_JSON, outfile, indent=4)
def createMeshMergeInputJSON(self):
try:
with open(self.path+"\\input_template.json") as f:
Inputs_JSON = json.load(f)
except:
self.nTopTemplate()
with open(self.path+"\\input_template.json") as f:
Inputs_JSON = json.load(f)
self.json=[]
for index, item in enumerate(sorted(os.listdir(self.STLPath))):
Inputs_JSON['inputs'][4]['value'][index]=self.STLPath +"\\"+item
fnt = ImageFont.truetype(r'/Library/Fonts/arial.ttf', 900)
img = Image.new('RGB', (1000 , 1000), color = "black")
d = ImageDraw.Draw(img)
d.text((10,10), self.sampleName[-2:] , font=fnt, fill="blue")
img.save(self.path+"\\"+self.sampleName[-2:]+".png")
Inputs_JSON['inputs'][0]['value'] = self.STLPath +"\\"+ self.sampleName + "Bottom.stl"
Inputs_JSON['inputs'][1]['value'] = self.STLPath +"\\"+ self.sampleName + "uChannel.stl"
Inputs_JSON['inputs'][2]['value'] = self.STLPath +"\\"+ self.sampleName + "Top.stl"
Inputs_JSON['inputs'][3]['value'] = self.path +"\\"+ self.sampleName[-2:] + ".png"
with open(self.path+"\\input.json", 'w') as outfile:
json.dump(Inputs_JSON, outfile, indent=4)
self.json.append(self.path+"\\input.json")
def createTree(self):
if os.path.isdir(self.STLPath):
shutil.rmtree(self.STLPath)
os.mkdir(self.STLPath)
if os.path.isdir(self.buildPath):
shutil.rmtree(self.buildPath)
os.mkdir(self.buildPath)
def createuChannelSTL(self):
for JSON in self.json:
self.nTopRun(JSON, self.customBlock)
if self.cleanUp and os.path.isfile(JSON):
os.remove(JSON)
def createMeshMergeSTL(self):
for JSON in self.json:
self.nTopRun(JSON, self.customBlock)
if self.cleanUp and os.path.isfile(JSON):
os.remove(JSON)
def createBottomRecipe(self,recipe):
self.setRecipe(recipe)
with open(self.recipe,mode='r') as f:
Lines=f.readlines()
f = open("./BuildFiles/Bottom_job.recipe",mode='w')
f.truncate(0)
f.close()
f = open(self.buildPath+"\\Bottom_job.recipe",mode='a')
for line in Lines:
if line[:14]=="Model.FilePath":
f.write(('Model.FilePath = '+self.STLPath +"\\"+ self.sampleName + "Bottom.stl\n"))
else:
f.write(line)
f.close()
def createuChannelRecipe(self,recipe):
self.setRecipe(recipe)
with open(self.recipe,mode='r') as f:
Lines=f.readlines()
f = open("./BuildFiles/uChannel_job.recipe",mode='w')
f.truncate(0)
f.close()
f = open(self.buildPath+"\\uChannel_job.recipe",mode='a')
for line in Lines:
if line[:14]=="Model.FilePath":
f.write(('Model.FilePath = '+self.STLPath +"\\"+ self.sampleName + "uChannel.stl\n"))
else:
f.write(line)
f.close()
def sliceBottomSTL(self):
Arguments = [self.exePath]
Arguments.append("-p")
Arguments.append(self.buildPath+"\\Bottom_job.recipe")
print(" ".join(Arguments))
subprocess.call(Arguments)
def sliceuChannelSTL(self):
Arguments = [self.exePath]
Arguments.append("-p")
Arguments.append(self.buildPath+"\\uChannel_job.recipe")
print(" ".join(Arguments))
subprocess.call(Arguments)
def moveuChannelOutput(self):
uChannel_BuildPath = self.buildPath + "\\uChannel_job_output"
if os.path.isdir(uChannel_BuildPath):
files = os.listdir(uChannel_BuildPath)
for file in files:
src = os.path.join(uChannel_BuildPath,file)
dst = os.path.join(self.buildPath,file)
if os.path.isfile(src):
shutil.copy(src,dst)
elif os.path.isdir(src):
if os.path.exists(dst) and os.path.isdir(dst):
shutil.rmtree(dst)
shutil.copytree(src,dst)
shutil.rmtree(uChannel_BuildPath)
shutil.copy(os.path.join(self.buildPath,self.sampleName+'uChannel_data.gwl')
,os.path.join(self.buildPath,self.sampleName+'uChannel_data.orig'))
self.blockNumbers =self.blockNumbers + len(os.listdir(self.buildPath+"\\"+self.sampleName+"uChannel_files"))
def moveBottomOutput(self):
Bottom_BuildPath = self.buildPath + "\\Bottom_job_output"
if os.path.isdir(Bottom_BuildPath):
files = os.listdir(Bottom_BuildPath)
for file in files:
src = os.path.join(Bottom_BuildPath,file)
dst = os.path.join(self.buildPath,file)
if os.path.isfile(src):
shutil.copy(src,dst)
elif os.path.isdir(src):
if os.path.exists(dst) and os.path.isdir(dst):
shutil.rmtree(dst)
shutil.copytree(src,dst)
shutil.rmtree(Bottom_BuildPath)
shutil.copy(os.path.join(self.buildPath,self.sampleName+'Bottom_data.gwl')
,os.path.join(self.buildPath,self.sampleName+'Bottom_data.orig'))
self.blockNumbers =self.blockNumbers + len(os.listdir(self.buildPath+"\\"+self.sampleName+"Bottom_files"))
def createCombinedJob(self):
jobFilePath = self.path+"\\_job.gwl"
with open(jobFilePath,mode='r') as jobFile:
Lines = jobFile.readlines()
open(self.buildPath+"\\"+self.sampleName+"_job.gwl", mode='w').close()
with open(self.buildPath+"\\"+self.sampleName+"_job.gwl", mode='a') as job:
for line in Lines:
words = line.strip().split(" ")
if line == "%%% Last Line in Parameter Settings\n":
job.write(line)
job.write("\nvar $BlockNumbers = %s\n" %self.blockNumbers)
job.write("var $count = 0\n\n")
elif len(words)>1:
if words[0] == "include" and words[1][-9:] == "_data.gwl":
job.write(" ".join([words[0],self.sampleName+words[1][8:]])+"\n")
else:
job.write(line)
else:
job.write(line)
def modifyBottomData(self):
dataFilePath = self.buildPath+"\\"+self.sampleName+"Bottom_data.orig"
with open(dataFilePath,mode='r') as dataFile:
Lines = dataFile.readlines()
f = open(self.buildPath+"\\"+self.sampleName+"Bottom_data.gwl", mode='w')
f.truncate(0)
f.close()
with open(self.buildPath+"\\"+self.sampleName+"Bottom_data.gwl", mode='a') as data:
for line in Lines:
words = line.strip().split(" ")
if line == "FindInterfaceAt $interfacePos\n":
pass# do nothing
elif len(words)>1:
if words[0] == '%' and words[1] == 'BLOCK':
data.write(line)
data.write("set $count = $count +1\n")
data.write(r'MessageOut "Print Progress = %.1f." #($count/$BlockNumbers*100)'+"\n")
else:
data.write(line)
else:
data.write(line)
def modifyuChannelData(self):
dataFilePath = self.buildPath+"\\"+self.sampleName+"uChannel_data.orig"
with open(dataFilePath,mode='r') as dataFile:
Lines = dataFile.readlines()
f = open(self.buildPath+"\\"+self.sampleName+"uChannel_data.gwl", mode='w')
f.truncate(0)
f.close()
with open(self.buildPath+"\\"+self.sampleName+"uChannel_data.gwl", mode='a') as data:
for line in Lines:
words = line.strip().split(" ")
if line == "FindInterfaceAt $interfacePos\n":
pass# do nothing
elif len(words)>1:
if words[0] == '%' and words[1] == 'BLOCK':
data.write(line)
data.write("set $count = $count +1\n")
data.write(r'MessageOut "Print Progress = %.1f." #($count/$BlockNumbers*100)'+"\n")
else:
data.write(line)
else:
data.write(line)
| 2.4375 | 2 |
contextily/_providers.py | jpn--/contextily | 163 | 12770282 | """
Tile providers.
This file is autogenerated! It is a python representation of the leaflet
providers defined by the leaflet-providers.js extension to Leaflet
(https://github.com/leaflet-extras/leaflet-providers).
Credit to the leaflet-providers.js project (BSD 2-Clause "Simplified" License)
and the Leaflet Providers contributors.
Generated by parse_leaflet_providers.py at 2019-08-01 from leaflet-providers
at commit 9eb968f8442ea492626c9c8f0dac8ede484e6905 (Bumped version to 1.8.0).
"""
class Bunch(dict):
"""A dict with attribute-access"""
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError(key)
def __dir__(self):
return self.keys()
class TileProvider(Bunch):
"""
A dict with attribute-access and that
can be called to update keys
"""
def __call__(self, **kwargs):
new = TileProvider(self) # takes a copy preserving the class
new.update(kwargs)
return new
providers = Bunch(
OpenStreetMap = Bunch(
Mapnik = TileProvider(
url = 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
max_zoom = 19,
attribution = '(C) OpenStreetMap contributors',
name = 'OpenStreetMap.Mapnik'
),
DE = TileProvider(
url = 'https://{s}.tile.openstreetmap.de/tiles/osmde/{z}/{x}/{y}.png',
max_zoom = 18,
attribution = '(C) OpenStreetMap contributors',
name = 'OpenStreetMap.DE'
),
CH = TileProvider(
url = 'https://tile.osm.ch/switzerland/{z}/{x}/{y}.png',
max_zoom = 18,
attribution = '(C) OpenStreetMap contributors',
bounds = [[45, 5], [48, 11]],
name = 'OpenStreetMap.CH'
),
France = TileProvider(
url = 'https://{s}.tile.openstreetmap.fr/osmfr/{z}/{x}/{y}.png',
max_zoom = 20,
attribution = '(C) Openstreetmap France | (C) OpenStreetMap contributors',
name = 'OpenStreetMap.France'
),
HOT = TileProvider(
url = 'https://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png',
max_zoom = 19,
attribution = '(C) OpenStreetMap contributors, Tiles style by Humanitarian OpenStreetMap Team hosted by OpenStreetMap France',
name = 'OpenStreetMap.HOT'
),
BZH = TileProvider(
url = 'https://tile.openstreetmap.bzh/br/{z}/{x}/{y}.png',
max_zoom = 19,
attribution = '(C) OpenStreetMap contributors, Tiles courtesy of Breton OpenStreetMap Team',
bounds = [[46.2, -5.5], [50, 0.7]],
name = 'OpenStreetMap.BZH'
)
),
OpenSeaMap = TileProvider(
url = 'https://tiles.openseamap.org/seamark/{z}/{x}/{y}.png',
attribution = 'Map data: (C) OpenSeaMap contributors',
name = 'OpenSeaMap'
),
OpenPtMap = TileProvider(
url = 'http://openptmap.org/tiles/{z}/{x}/{y}.png',
max_zoom = 17,
attribution = 'Map data: (C) OpenPtMap contributors',
name = 'OpenPtMap'
),
OpenTopoMap = TileProvider(
url = 'https://{s}.tile.opentopomap.org/{z}/{x}/{y}.png',
max_zoom = 17,
attribution = 'Map data: (C) OpenStreetMap contributors, SRTM | Map style: (C) OpenTopoMap (CC-BY-SA)',
name = 'OpenTopoMap'
),
OpenRailwayMap = TileProvider(
url = 'https://{s}.tiles.openrailwaymap.org/standard/{z}/{x}/{y}.png',
max_zoom = 19,
attribution = 'Map data: (C) OpenStreetMap contributors | Map style: (C) OpenRailwayMap (CC-BY-SA)',
name = 'OpenRailwayMap'
),
OpenFireMap = TileProvider(
url = 'http://openfiremap.org/hytiles/{z}/{x}/{y}.png',
max_zoom = 19,
attribution = 'Map data: (C) OpenStreetMap contributors | Map style: (C) OpenFireMap (CC-BY-SA)',
name = 'OpenFireMap'
),
SafeCast = TileProvider(
url = 'https://s3.amazonaws.com/te512.safecast.org/{z}/{x}/{y}.png',
max_zoom = 16,
attribution = 'Map data: (C) OpenStreetMap contributors | Map style: (C) SafeCast (CC-BY-SA)',
name = 'SafeCast'
),
Thunderforest = Bunch(
OpenCycleMap = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'cycle',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.OpenCycleMap'
),
Transport = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'transport',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.Transport'
),
TransportDark = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'transport-dark',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.TransportDark'
),
SpinalMap = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'spinal-map',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.SpinalMap'
),
Landscape = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'landscape',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.Landscape'
),
Outdoors = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'outdoors',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.Outdoors'
),
Pioneer = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'pioneer',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.Pioneer'
),
MobileAtlas = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'mobile-atlas',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.MobileAtlas'
),
Neighbourhood = TileProvider(
url = 'https://{s}.tile.thunderforest.com/{variant}/{z}/{x}/{y}.png?apikey={apikey}',
attribution = '(C) Thunderforest, (C) OpenStreetMap contributors',
variant = 'neighbourhood',
apikey = '<insert your api key here>',
max_zoom = 22,
name = 'Thunderforest.Neighbourhood'
)
),
OpenMapSurfer = Bunch(
Roads = TileProvider(
url = 'https://maps.heigit.org/openmapsurfer/tiles/{variant}/webmercator/{z}/{x}/{y}.png',
max_zoom = 19,
variant = 'roads',
attribution = 'Imagery from GIScience Research Group @ University of Heidelberg | Map data (C) OpenStreetMap contributors',
name = 'OpenMapSurfer.Roads'
),
Hybrid = TileProvider(
url = 'https://maps.heigit.org/openmapsurfer/tiles/{variant}/webmercator/{z}/{x}/{y}.png',
max_zoom = 19,
variant = 'hybrid',
attribution = 'Imagery from GIScience Research Group @ University of Heidelberg | Map data (C) OpenStreetMap contributors',
name = 'OpenMapSurfer.Hybrid'
),
AdminBounds = TileProvider(
url = 'https://maps.heigit.org/openmapsurfer/tiles/{variant}/webmercator/{z}/{x}/{y}.png',
max_zoom = 18,
variant = 'adminb',
attribution = 'Imagery from GIScience Research Group @ University of Heidelberg | Map data (C) OpenStreetMap contributors',
name = 'OpenMapSurfer.AdminBounds'
),
ContourLines = TileProvider(
url = 'https://maps.heigit.org/openmapsurfer/tiles/{variant}/webmercator/{z}/{x}/{y}.png',
max_zoom = 18,
variant = 'asterc',
attribution = 'Imagery from GIScience Research Group @ University of Heidelberg | Map data ASTER GDEM',
min_zoom = 13,
name = 'OpenMapSurfer.ContourLines'
),
Hillshade = TileProvider(
url = 'https://maps.heigit.org/openmapsurfer/tiles/{variant}/webmercator/{z}/{x}/{y}.png',
max_zoom = 18,
variant = 'asterh',
attribution = 'Imagery from GIScience Research Group @ University of Heidelberg | Map data ASTER GDEM, SRTM',
name = 'OpenMapSurfer.Hillshade'
),
ElementsAtRisk = TileProvider(
url = 'https://maps.heigit.org/openmapsurfer/tiles/{variant}/webmercator/{z}/{x}/{y}.png',
max_zoom = 19,
variant = 'elements_at_risk',
attribution = 'Imagery from GIScience Research Group @ University of Heidelberg | Map data (C) OpenStreetMap contributors',
name = 'OpenMapSurfer.ElementsAtRisk'
)
),
Hydda = Bunch(
Full = TileProvider(
url = 'https://{s}.tile.openstreetmap.se/hydda/{variant}/{z}/{x}/{y}.png',
max_zoom = 18,
variant = 'full',
attribution = 'Tiles courtesy of OpenStreetMap Sweden -- Map data (C) OpenStreetMap contributors',
name = 'Hydda.Full'
),
Base = TileProvider(
url = 'https://{s}.tile.openstreetmap.se/hydda/{variant}/{z}/{x}/{y}.png',
max_zoom = 18,
variant = 'base',
attribution = 'Tiles courtesy of OpenStreetMap Sweden -- Map data (C) OpenStreetMap contributors',
name = 'Hydda.Base'
),
RoadsAndLabels = TileProvider(
url = 'https://{s}.tile.openstreetmap.se/hydda/{variant}/{z}/{x}/{y}.png',
max_zoom = 18,
variant = 'roads_and_labels',
attribution = 'Tiles courtesy of OpenStreetMap Sweden -- Map data (C) OpenStreetMap contributors',
name = 'Hydda.RoadsAndLabels'
)
),
MapBox = TileProvider(
url = 'https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}{r}.png?access_token={accessToken}',
attribution = '(C) Mapbox (C) OpenStreetMap contributors Improve this map',
subdomains = 'abcd',
id = 'mapbox.streets',
accessToken = '<insert your access token here>',
name = 'MapBox'
),
Stamen = Bunch(
Toner = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toner',
ext = 'png',
name = 'Stamen.Toner'
),
TonerBackground = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toner-background',
ext = 'png',
name = 'Stamen.TonerBackground'
),
TonerHybrid = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toner-hybrid',
ext = 'png',
name = 'Stamen.TonerHybrid'
),
TonerLines = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toner-lines',
ext = 'png',
name = 'Stamen.TonerLines'
),
TonerLabels = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toner-labels',
ext = 'png',
name = 'Stamen.TonerLabels'
),
TonerLite = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toner-lite',
ext = 'png',
name = 'Stamen.TonerLite'
),
Watercolor = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 1,
max_zoom = 16,
variant = 'watercolor',
ext = 'jpg',
name = 'Stamen.Watercolor'
),
Terrain = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 18,
variant = 'terrain',
ext = 'png',
name = 'Stamen.Terrain'
),
TerrainBackground = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 18,
variant = 'terrain-background',
ext = 'png',
name = 'Stamen.TerrainBackground'
),
TopOSMRelief = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toposm-color-relief',
ext = 'jpg',
bounds = [[22, -132], [51, -56]],
name = 'Stamen.TopOSMRelief'
),
TopOSMFeatures = TileProvider(
url = 'https://stamen-tiles-{s}.a.ssl.fastly.net/{variant}/{z}/{x}/{y}{r}.{ext}',
attribution = 'Map tiles by Stamen Design, CC BY 3.0 -- Map data (C) OpenStreetMap contributors',
subdomains = 'abcd',
min_zoom = 0,
max_zoom = 20,
variant = 'toposm-features',
ext = 'png',
bounds = [[22, -132], [51, -56]],
opacity = 0.9,
name = 'Stamen.TopOSMFeatures'
)
),
Esri = Bunch(
WorldStreetMap = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'World_Street_Map',
attribution = 'Tiles (C) Esri -- Source: Esri, DeLorme, NAVTEQ, USGS, Intermap, iPC, NRCAN, Esri Japan, METI, Esri China (Hong Kong), Esri (Thailand), TomTom, 2012',
name = 'Esri.WorldStreetMap'
),
DeLorme = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'Specialty/DeLorme_World_Base_Map',
attribution = 'Tiles (C) Esri -- Copyright: (C)2012 DeLorme',
min_zoom = 1,
max_zoom = 11,
name = 'Esri.DeLorme'
),
WorldTopoMap = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'World_Topo_Map',
attribution = 'Tiles (C) Esri -- Esri, DeLorme, NAVTEQ, TomTom, Intermap, iPC, USGS, FAO, NPS, NRCAN, GeoBase, Kadaster NL, Ordnance Survey, Esri Japan, METI, Esri China (Hong Kong), and the GIS User Community',
name = 'Esri.WorldTopoMap'
),
WorldImagery = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'World_Imagery',
attribution = 'Tiles (C) Esri -- Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community',
name = 'Esri.WorldImagery'
),
WorldTerrain = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'World_Terrain_Base',
attribution = 'Tiles (C) Esri -- Source: USGS, Esri, TANA, DeLorme, and NPS',
max_zoom = 13,
name = 'Esri.WorldTerrain'
),
WorldShadedRelief = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'World_Shaded_Relief',
attribution = 'Tiles (C) Esri -- Source: Esri',
max_zoom = 13,
name = 'Esri.WorldShadedRelief'
),
WorldPhysical = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'World_Physical_Map',
attribution = 'Tiles (C) Esri -- Source: US National Park Service',
max_zoom = 8,
name = 'Esri.WorldPhysical'
),
OceanBasemap = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'Ocean_Basemap',
attribution = 'Tiles (C) Esri -- Sources: GEBCO, NOAA, CHS, OSU, UNH, CSUMB, National Geographic, DeLorme, NAVTEQ, and Esri',
max_zoom = 13,
name = 'Esri.OceanBasemap'
),
NatGeoWorldMap = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'NatGeo_World_Map',
attribution = 'Tiles (C) Esri -- National Geographic, Esri, DeLorme, NAVTEQ, UNEP-WCMC, USGS, NASA, ESA, METI, NRCAN, GEBCO, NOAA, iPC',
max_zoom = 16,
name = 'Esri.NatGeoWorldMap'
),
WorldGrayCanvas = TileProvider(
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/{variant}/MapServer/tile/{z}/{y}/{x}',
variant = 'Canvas/World_Light_Gray_Base',
attribution = 'Tiles (C) Esri -- Esri, DeLorme, NAVTEQ',
max_zoom = 16,
name = 'Esri.WorldGrayCanvas'
)
),
OpenWeatherMap = Bunch(
Clouds = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'clouds',
name = 'OpenWeatherMap.Clouds'
),
CloudsClassic = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'clouds_cls',
name = 'OpenWeatherMap.CloudsClassic'
),
Precipitation = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'precipitation',
name = 'OpenWeatherMap.Precipitation'
),
PrecipitationClassic = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'precipitation_cls',
name = 'OpenWeatherMap.PrecipitationClassic'
),
Rain = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'rain',
name = 'OpenWeatherMap.Rain'
),
RainClassic = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'rain_cls',
name = 'OpenWeatherMap.RainClassic'
),
Pressure = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'pressure',
name = 'OpenWeatherMap.Pressure'
),
PressureContour = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'pressure_cntr',
name = 'OpenWeatherMap.PressureContour'
),
Wind = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'wind',
name = 'OpenWeatherMap.Wind'
),
Temperature = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'temp',
name = 'OpenWeatherMap.Temperature'
),
Snow = TileProvider(
url = 'http://{s}.tile.openweathermap.org/map/{variant}/{z}/{x}/{y}.png?appid={apiKey}',
max_zoom = 19,
attribution = 'Map data (C) OpenWeatherMap',
apiKey = '<insert your api key here>',
opacity = 0.5,
variant = 'snow',
name = 'OpenWeatherMap.Snow'
)
),
HERE = Bunch(
normalDay = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDay'
),
normalDayCustom = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day.custom',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDayCustom'
),
normalDayGrey = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day.grey',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDayGrey'
),
normalDayMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDayMobile'
),
normalDayGreyMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day.grey.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDayGreyMobile'
),
normalDayTransit = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day.transit',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDayTransit'
),
normalDayTransitMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day.transit.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalDayTransitMobile'
),
normalNight = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.night',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalNight'
),
normalNightMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.night.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalNightMobile'
),
normalNightGrey = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.night.grey',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalNightGrey'
),
normalNightGreyMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.night.grey.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalNightGreyMobile'
),
normalNightTransit = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.night.transit',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalNightTransit'
),
normalNightTransitMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.night.transit.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.normalNightTransitMobile'
),
reducedDay = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'reduced.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.reducedDay'
),
reducedNight = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'reduced.night',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.reducedNight'
),
basicMap = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day',
max_zoom = 20,
type = 'basetile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.basicMap'
),
mapLabels = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'normal.day',
max_zoom = 20,
type = 'labeltile',
language = 'eng',
format = 'png',
size = '256',
name = 'HERE.mapLabels'
),
trafficFlow = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'traffic',
variant = 'normal.day',
max_zoom = 20,
type = 'flowtile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.trafficFlow'
),
carnavDayGrey = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'carnav.day.grey',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.carnavDayGrey'
),
hybridDay = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'hybrid.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.hybridDay'
),
hybridDayMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'hybrid.day.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.hybridDayMobile'
),
hybridDayTransit = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'hybrid.day.transit',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.hybridDayTransit'
),
hybridDayGrey = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'hybrid.grey.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.hybridDayGrey'
),
pedestrianDay = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'pedestrian.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.pedestrianDay'
),
pedestrianNight = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'base',
variant = 'pedestrian.night',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.pedestrianNight'
),
satelliteDay = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'satellite.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.satelliteDay'
),
terrainDay = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'terrain.day',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.terrainDay'
),
terrainDayMobile = TileProvider(
url = 'https://{s}.{base}.maps.api.here.com/maptile/2.1/{type}/{mapID}/{variant}/{z}/{x}/{y}/{size}/{format}?app_id={app_id}&app_code={app_code}&lg={language}',
attribution = 'Map (C) 1987-2019 HERE',
subdomains = '1234',
mapID = 'newest',
app_id = '<insert your app_id here>',
app_code = '<insert your app_code here>',
base = 'aerial',
variant = 'terrain.day.mobile',
max_zoom = 20,
type = 'maptile',
language = 'eng',
format = 'png8',
size = '256',
name = 'HERE.terrainDayMobile'
)
),
FreeMapSK = TileProvider(
url = 'http://t{s}.freemap.sk/T/{z}/{x}/{y}.jpeg',
min_zoom = 8,
max_zoom = 16,
subdomains = '1234',
bounds = [[47.204642, 15.996093], [49.830896, 22.576904]],
attribution = '(C) OpenStreetMap contributors, vizualization CC-By-SA 2.0 Freemap.sk',
name = 'FreeMapSK'
),
MtbMap = TileProvider(
url = 'http://tile.mtbmap.cz/mtbmap_tiles/{z}/{x}/{y}.png',
attribution = '(C) OpenStreetMap contributors & USGS',
name = 'MtbMap'
),
CartoDB = Bunch(
Positron = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'light_all',
name = 'CartoDB.Positron'
),
PositronNoLabels = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'light_nolabels',
name = 'CartoDB.PositronNoLabels'
),
PositronOnlyLabels = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'light_only_labels',
name = 'CartoDB.PositronOnlyLabels'
),
DarkMatter = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'dark_all',
name = 'CartoDB.DarkMatter'
),
DarkMatterNoLabels = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'dark_nolabels',
name = 'CartoDB.DarkMatterNoLabels'
),
DarkMatterOnlyLabels = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'dark_only_labels',
name = 'CartoDB.DarkMatterOnlyLabels'
),
Voyager = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'rastertiles/voyager',
name = 'CartoDB.Voyager'
),
VoyagerNoLabels = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'rastertiles/voyager_nolabels',
name = 'CartoDB.VoyagerNoLabels'
),
VoyagerOnlyLabels = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'rastertiles/voyager_only_labels',
name = 'CartoDB.VoyagerOnlyLabels'
),
VoyagerLabelsUnder = TileProvider(
url = 'https://{s}.basemaps.cartocdn.com/{variant}/{z}/{x}/{y}{r}.png',
attribution = '(C) OpenStreetMap contributors (C) CARTO',
subdomains = 'abcd',
max_zoom = 19,
variant = 'rastertiles/voyager_labels_under',
name = 'CartoDB.VoyagerLabelsUnder'
)
),
HikeBike = Bunch(
HikeBike = TileProvider(
url = 'https://tiles.wmflabs.org/{variant}/{z}/{x}/{y}.png',
max_zoom = 19,
attribution = '(C) OpenStreetMap contributors',
variant = 'hikebike',
name = 'HikeBike.HikeBike'
),
HillShading = TileProvider(
url = 'https://tiles.wmflabs.org/{variant}/{z}/{x}/{y}.png',
max_zoom = 15,
attribution = '(C) OpenStreetMap contributors',
variant = 'hillshading',
name = 'HikeBike.HillShading'
)
),
BasemapAT = Bunch(
basemap = TileProvider(
url = 'https://maps{s}.wien.gv.at/basemap/{variant}/normal/google3857/{z}/{y}/{x}.{format}',
max_zoom = 20,
attribution = 'Datenquelle: basemap.at',
subdomains = ['', '1', '2', '3', '4'],
format = 'png',
bounds = [[46.35877, 8.782379], [49.037872, 17.189532]],
variant = 'geolandbasemap',
name = 'BasemapAT.basemap'
),
grau = TileProvider(
url = 'https://maps{s}.wien.gv.at/basemap/{variant}/normal/google3857/{z}/{y}/{x}.{format}',
max_zoom = 19,
attribution = 'Datenquelle: basemap.at',
subdomains = ['', '1', '2', '3', '4'],
format = 'png',
bounds = [[46.35877, 8.782379], [49.037872, 17.189532]],
variant = 'bmapgrau',
name = 'BasemapAT.grau'
),
overlay = TileProvider(
url = 'https://maps{s}.wien.gv.at/basemap/{variant}/normal/google3857/{z}/{y}/{x}.{format}',
max_zoom = 19,
attribution = 'Datenquelle: basemap.at',
subdomains = ['', '1', '2', '3', '4'],
format = 'png',
bounds = [[46.35877, 8.782379], [49.037872, 17.189532]],
variant = 'bmapoverlay',
name = 'BasemapAT.overlay'
),
highdpi = TileProvider(
url = 'https://maps{s}.wien.gv.at/basemap/{variant}/normal/google3857/{z}/{y}/{x}.{format}',
max_zoom = 19,
attribution = 'Datenquelle: basemap.at',
subdomains = ['', '1', '2', '3', '4'],
format = 'jpeg',
bounds = [[46.35877, 8.782379], [49.037872, 17.189532]],
variant = 'bmaphidpi',
name = 'BasemapAT.highdpi'
),
orthofoto = TileProvider(
url = 'https://maps{s}.wien.gv.at/basemap/{variant}/normal/google3857/{z}/{y}/{x}.{format}',
max_zoom = 20,
attribution = 'Datenquelle: basemap.at',
subdomains = ['', '1', '2', '3', '4'],
format = 'jpeg',
bounds = [[46.35877, 8.782379], [49.037872, 17.189532]],
variant = 'bmaporthofoto30cm',
name = 'BasemapAT.orthofoto'
)
),
nlmaps = Bunch(
standaard = TileProvider(
url = 'https://geodata.nationaalgeoregister.nl/tiles/service/wmts/{variant}/EPSG:3857/{z}/{x}/{y}.png',
min_zoom = 6,
max_zoom = 19,
bounds = [[50.5, 3.25], [54, 7.6]],
attribution = 'Kaartgegevens (C) Kadaster',
variant = 'brtachtergrondkaart',
name = 'nlmaps.standaard'
),
pastel = TileProvider(
url = 'https://geodata.nationaalgeoregister.nl/tiles/service/wmts/{variant}/EPSG:3857/{z}/{x}/{y}.png',
min_zoom = 6,
max_zoom = 19,
bounds = [[50.5, 3.25], [54, 7.6]],
attribution = 'Kaartgegevens (C) Kadaster',
variant = 'brtachtergrondkaartpastel',
name = 'nlmaps.pastel'
),
grijs = TileProvider(
url = 'https://geodata.nationaalgeoregister.nl/tiles/service/wmts/{variant}/EPSG:3857/{z}/{x}/{y}.png',
min_zoom = 6,
max_zoom = 19,
bounds = [[50.5, 3.25], [54, 7.6]],
attribution = 'Kaartgegevens (C) Kadaster',
variant = 'brtachtergrondkaartgrijs',
name = 'nlmaps.grijs'
),
luchtfoto = TileProvider(
url = 'https://geodata.nationaalgeoregister.nl/luchtfoto/rgb/wmts/1.0.0/2016_ortho25/EPSG:3857/{z}/{x}/{y}.png',
min_zoom = 6,
max_zoom = 19,
bounds = [[50.5, 3.25], [54, 7.6]],
attribution = 'Kaartgegevens (C) Kadaster',
name = 'nlmaps.luchtfoto'
)
),
NASAGIBS = Bunch(
ModisTerraTrueColorCR = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 9,
format = 'jpg',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'MODIS_Terra_CorrectedReflectance_TrueColor',
name = 'NASAGIBS.ModisTerraTrueColorCR'
),
ModisTerraBands367CR = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 9,
format = 'jpg',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'MODIS_Terra_CorrectedReflectance_Bands367',
name = 'NASAGIBS.ModisTerraBands367CR'
),
ViirsEarthAtNight2012 = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 8,
format = 'jpg',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'VIIRS_CityLights_2012',
name = 'NASAGIBS.ViirsEarthAtNight2012'
),
ModisTerraLSTDay = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 7,
format = 'png',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'MODIS_Terra_Land_Surface_Temp_Day',
opacity = 0.75,
name = 'NASAGIBS.ModisTerraLSTDay'
),
ModisTerraSnowCover = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 8,
format = 'png',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'MODIS_Terra_Snow_Cover',
opacity = 0.75,
name = 'NASAGIBS.ModisTerraSnowCover'
),
ModisTerraAOD = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 6,
format = 'png',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'MODIS_Terra_Aerosol',
opacity = 0.75,
name = 'NASAGIBS.ModisTerraAOD'
),
ModisTerraChlorophyll = TileProvider(
url = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/{variant}/default/{time}/{tilematrixset}{max_zoom}/{z}/{y}/{x}.{format}',
attribution = 'Imagery provided by services from the Global Imagery Browse Services (GIBS), operated by the NASA/GSFC/Earth Science Data and Information System (ESDIS) with funding provided by NASA/HQ.',
bounds = [[-85.0511287776, -179.999999975], [85.0511287776, 179.999999975]],
min_zoom = 1,
max_zoom = 7,
format = 'png',
time = '',
tilematrixset = 'GoogleMapsCompatible_Level',
variant = 'MODIS_Terra_Chlorophyll_A',
opacity = 0.75,
name = 'NASAGIBS.ModisTerraChlorophyll'
)
),
NLS = TileProvider(
url = 'https://nls-{s}.tileserver.com/nls/{z}/{x}/{y}.jpg',
attribution = 'National Library of Scotland Historic Maps',
bounds = [[49.6, -12], [61.7, 3]],
min_zoom = 1,
max_zoom = 18,
subdomains = '0123',
name = 'NLS'
),
JusticeMap = Bunch(
income = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'income',
name = 'JusticeMap.income'
),
americanIndian = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'indian',
name = 'JusticeMap.americanIndian'
),
asian = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'asian',
name = 'JusticeMap.asian'
),
black = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'black',
name = 'JusticeMap.black'
),
hispanic = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'hispanic',
name = 'JusticeMap.hispanic'
),
multi = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'multi',
name = 'JusticeMap.multi'
),
nonWhite = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'nonwhite',
name = 'JusticeMap.nonWhite'
),
white = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'white',
name = 'JusticeMap.white'
),
plurality = TileProvider(
url = 'http://www.justicemap.org/tile/{size}/{variant}/{z}/{x}/{y}.png',
attribution = 'Justice Map',
size = 'county',
bounds = [[14, -180], [72, -56]],
variant = 'plural',
name = 'JusticeMap.plurality'
)
),
Wikimedia = TileProvider(
url = 'https://maps.wikimedia.org/osm-intl/{z}/{x}/{y}{r}.png',
attribution = 'Wikimedia',
min_zoom = 1,
max_zoom = 19,
name = 'Wikimedia'
),
GeoportailFrance = Bunch(
parcels = TileProvider(
url = 'https://wxs.ign.fr/{apikey}/geoportail/wmts?REQUEST=GetTile&SERVICE=WMTS&VERSION=1.0.0&STYLE={style}&TILEMATRIXSET=PM&FORMAT={format}&LAYER={variant}&TILEMATRIX={z}&TILEROW={y}&TILECOL={x}',
attribution = 'Geoportail France',
bounds = [[-75, -180], [81, 180]],
min_zoom = 2,
max_zoom = 20,
apikey = 'choisirgeoportail',
format = 'image/png',
style = 'bdparcellaire',
variant = 'CADASTRALPARCELS.PARCELS',
name = 'GeoportailFrance.parcels'
),
ignMaps = TileProvider(
url = 'https://wxs.ign.fr/{apikey}/geoportail/wmts?REQUEST=GetTile&SERVICE=WMTS&VERSION=1.0.0&STYLE={style}&TILEMATRIXSET=PM&FORMAT={format}&LAYER={variant}&TILEMATRIX={z}&TILEROW={y}&TILECOL={x}',
attribution = 'Geoportail France',
bounds = [[-75, -180], [81, 180]],
min_zoom = 2,
max_zoom = 18,
apikey = 'choisirgeoportail',
format = 'image/jpeg',
style = 'normal',
variant = 'GEOGRAPHICALGRIDSYSTEMS.MAPS',
name = 'GeoportailFrance.ignMaps'
),
maps = TileProvider(
url = 'https://wxs.ign.fr/{apikey}/geoportail/wmts?REQUEST=GetTile&SERVICE=WMTS&VERSION=1.0.0&STYLE={style}&TILEMATRIXSET=PM&FORMAT={format}&LAYER={variant}&TILEMATRIX={z}&TILEROW={y}&TILECOL={x}',
attribution = 'Geoportail France',
bounds = [[-75, -180], [81, 180]],
min_zoom = 2,
max_zoom = 18,
apikey = 'choisirgeoportail',
format = 'image/jpeg',
style = 'normal',
variant = 'GEOGRAPHICALGRIDSYSTEMS.MAPS.SCAN-EXPRESS.STANDARD',
name = 'GeoportailFrance.maps'
),
orthos = TileProvider(
url = 'https://wxs.ign.fr/{apikey}/geoportail/wmts?REQUEST=GetTile&SERVICE=WMTS&VERSION=1.0.0&STYLE={style}&TILEMATRIXSET=PM&FORMAT={format}&LAYER={variant}&TILEMATRIX={z}&TILEROW={y}&TILECOL={x}',
attribution = 'Geoportail France',
bounds = [[-75, -180], [81, 180]],
min_zoom = 2,
max_zoom = 19,
apikey = 'choisirgeoportail',
format = 'image/jpeg',
style = 'normal',
variant = 'ORTHOIMAGERY.ORTHOPHOTOS',
name = 'GeoportailFrance.orthos'
)
),
OneMapSG = Bunch(
Default = TileProvider(
url = 'https://maps-{s}.onemap.sg/v3/{variant}/{z}/{x}/{y}.png',
variant = 'Default',
min_zoom = 11,
max_zoom = 18,
bounds = [[1.56073, 104.11475], [1.16, 103.502]],
attribution = ' New OneMap | Map data (C) contributors, Singapore Land Authority',
name = 'OneMapSG.Default'
),
Night = TileProvider(
url = 'https://maps-{s}.onemap.sg/v3/{variant}/{z}/{x}/{y}.png',
variant = 'Night',
min_zoom = 11,
max_zoom = 18,
bounds = [[1.56073, 104.11475], [1.16, 103.502]],
attribution = ' New OneMap | Map data (C) contributors, Singapore Land Authority',
name = 'OneMapSG.Night'
),
Original = TileProvider(
url = 'https://maps-{s}.onemap.sg/v3/{variant}/{z}/{x}/{y}.png',
variant = 'Original',
min_zoom = 11,
max_zoom = 18,
bounds = [[1.56073, 104.11475], [1.16, 103.502]],
attribution = ' New OneMap | Map data (C) contributors, Singapore Land Authority',
name = 'OneMapSG.Original'
),
Grey = TileProvider(
url = 'https://maps-{s}.onemap.sg/v3/{variant}/{z}/{x}/{y}.png',
variant = 'Grey',
min_zoom = 11,
max_zoom = 18,
bounds = [[1.56073, 104.11475], [1.16, 103.502]],
attribution = ' New OneMap | Map data (C) contributors, Singapore Land Authority',
name = 'OneMapSG.Grey'
),
LandLot = TileProvider(
url = 'https://maps-{s}.onemap.sg/v3/{variant}/{z}/{x}/{y}.png',
variant = 'LandLot',
min_zoom = 11,
max_zoom = 18,
bounds = [[1.56073, 104.11475], [1.16, 103.502]],
attribution = ' New OneMap | Map data (C) contributors, Singapore Land Authority',
name = 'OneMapSG.LandLot'
)
)
)
| 2.03125 | 2 |
api/config.py | 114000/webapp-boilerplate | 0 | 12770283 | # encoding: utf-8
from os import path, getenv
from datetime import timedelta
import ast
basedir = path.abspath(path.dirname(__file__))
class Config (object):
APP_NAME = getenv('APP_NAME', 'Python Flask Boilerplate')
DEV = ast.literal_eval(getenv('DEV', 'True'))
DEBUG = ast.literal_eval(getenv('DEBUG', 'True'))
HOST = '0.0.0.0'
PORT = 5678
USER_DEFAULT_PASSWORD = '<PASSWORD>'
SQLALCHEMY_DATABASE_URI = getenv('SQLALCHEMY_DATABASE_URI', 'postgresql://postgres:654321@localhost:5432/postgres')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_MIGRATE_REPO = path.join(basedir, 'db', 'db_repository')
'''Flask-JWT'''
SECRET_KEY = 'super-secret'
JWT_AUTH_URL_RULE = '/signin'
JWT_AUTH_USERNAME_KEY = 'name'
JWT_AUTH_PASSWORD_KEY = '<PASSWORD>'
JWT_EXPIRATION_DELTA = timedelta(seconds = 1800)
'''Docker-Network media-service container'''
# MEDIA_SERVICE_RESTFUL_API_URL = getenv('MEDIA_SERVICE_RESTFUL_API_URL', 'http://localhost:8080/index/api')
# MEDIA_SERVICE_SECRET = '<KEY>'
### SQLALCHEMY_DATABASE_URI = 'mysql://user:pass@server_ip:server_port/db_name'
current = Config | 1.875 | 2 |
nobullshitonlyanswers/qa/forms.py | tanyadixit21/my-django-shenanigans | 0 | 12770284 | from django import forms
from django.contrib.auth import get_user_model
from qa.models import Question
from qa.models import Answer
class QuestionForm(forms.ModelForm):
user = forms.ModelChoiceField(
widget = forms.HiddenInput,
queryset = get_user_model().objects.all(),
disabled = True,)
class Meta:
model = Question
fields = ['title', 'question', 'user']
class AnswerForm(forms.ModelForm):
user = forms.ModelChoiceField(
widget = forms.HiddenInput,
queryset = get_user_model().objects.all(),
disabled = True,)
question = forms.ModelChoiceField(
widget = forms.HiddenInput,
queryset = Question.objects.all(),
disabled = True,)
class Meta:
model = Answer
fields = ['answer', 'question', 'user']
class AnswerAcceptanceForm(forms.ModelForm):
accepted = forms.BooleanField(widget = forms.HiddenInput,
required = False,
)
class Meta:
model = Answer
fields = ['accepted',]
| 2.21875 | 2 |
feedsearch_crawler/crawler/__init__.py | DBeath/feedsearch-crawler | 20 | 12770285 | <filename>feedsearch_crawler/crawler/__init__.py
from feedsearch_crawler.crawler.crawler import Crawler
from feedsearch_crawler.crawler.duplicatefilter import DuplicateFilter
from feedsearch_crawler.crawler.item import Item
from feedsearch_crawler.crawler.item_parser import ItemParser
from feedsearch_crawler.crawler.lib import (
to_string,
to_bytes,
coerce_url,
CallbackResult,
)
from feedsearch_crawler.crawler.request import Request
from feedsearch_crawler.crawler.response import Response
__all__ = [
"Crawler",
"Item",
"ItemParser",
"DuplicateFilter",
"Request",
"Response",
"to_bytes",
"to_string",
"coerce_url",
"CallbackResult",
]
| 1.960938 | 2 |
tests/core/urls.py | zhd785576549/flamingo | 0 | 12770286 | from flamingo.url.conf import path
routers = [
path(url="/test", view_func_or_module="tapp.urls", name="test")
]
| 1.75 | 2 |
04_repair_eeg_artefacts.py | JoseAlanis/supplementary_dpx_tt | 0 | 12770287 | """
===============================================
Repair EEG artefacts caused by ocular movements
===============================================
Identify "bad" components in ICA solution (e.g., components which are highly
correlated the time course of the electrooculogram).
Authors: <NAME> <<EMAIL>>
License: BSD (3-clause)
"""
import numpy as np
import matplotlib.pyplot as plt
from mne import open_report, events_from_annotations, Epochs
from mne.io import read_raw_fif
from mne.preprocessing import read_ica, corrmap
# All parameters are defined in config.py
from config import fname, parser, LoggingFormat
# Handle command line arguments
args = parser.parse_args()
subject = args.subject
print(LoggingFormat.PURPLE +
LoggingFormat.BOLD +
'Finding and removing bad components for subject %s' % subject +
LoggingFormat.END)
###############################################################################
# 1) Import the output from previous processing step
input_file = fname.output(subject=subject,
processing_step='repair_bads',
file_type='raw.fif')
raw = read_raw_fif(input_file, preload=True)
# activate average reference
raw.apply_proj()
###############################################################################
# 2) Import ICA weights from precious processing step
ica_file = fname.output(subject=subject,
processing_step='fit_ica',
file_type='ica.fif')
ica = read_ica(ica_file)
###############################################################################
# 3) Find bad components via correlation with template ICA
temp_subjs = [2, 10]
# temp_raws = []
temp_icas = []
# import template subjects
for subj in temp_subjs:
# temp_raws.append(read_raw_fif(fname.output(subject=subj,
# processing_step='repair_bads',
# file_type='raw.fif')))
temp_icas.append(read_ica(fname.output(subject=subj,
processing_step='fit_ica',
file_type='ica.fif')))
# set thresholds for correlation
if subject in {5, 28, 32, 39, 45}:
threshold = 0.90
else:
threshold = 0.85
# compute correlations with template ocular movements up/down and left/right
corrmap(icas=[temp_icas[1], ica],
template=(0, 0), threshold=threshold, label='blink_up', plot=False)
corrmap(icas=[temp_icas[1], ica],
template=(0, 1), threshold=threshold, label='blink_side', plot=False)
# compute correlations with template ocular movements that look slightly
# different
corrmap(icas=[temp_icas[0], ica],
template=(0, 0), threshold=threshold, label='blink_misc', plot=False)
corrmap(icas=[temp_icas[0], ica],
template=(0, 1), threshold=threshold, label='blink_misc', plot=False)
###############################################################################
# 4) Create summary plots to show signal correction on main experimental
# condition
# create a-cue epochs
a_evs = events_from_annotations(raw, regexp='^(70)')[0]
a_epo = Epochs(raw, a_evs,
tmin=-2.0,
tmax=2.0,
reject_by_annotation=True,
proj=False,
preload=True)
a_epo.apply_baseline(baseline=(-0.3, -0.05))
a_evo = a_epo.average()
# loop over identified "bad" components
bad_components = []
for label in ica.labels_:
bad_components.extend(ica.labels_[label])
for bad_comp in np.unique(bad_components):
# show component frequency spectrum
fig_comp = ica.plot_properties(a_epo,
picks=bad_comp,
psd_args={'fmax': 35.},
show=False)[0]
# show how the signal is affected by component rejection
fig_evoked = ica.plot_overlay(a_evo, exclude=[bad_comp], show=False)
plt.close(fig_evoked)
# create HTML report
with open_report(fname.report(subject=subject)[0]) as report:
report.add_figs_to_section(fig_comp, 'Component %s identified '
'by correlation with template'
% bad_comp,
section='ICA',
replace=True)
report.add_figs_to_section(fig_evoked, 'Component %s rejected'
% bad_comp,
section='ICA',
replace=True)
report.save(fname.report(subject=subject)[1], overwrite=True,
open_browser=False)
# add bad components to exclusion list
ica.exclude = np.unique(bad_components)
# apply ica weights to data
ica.apply(raw)
###############################################################################
# 5) Save repaired data set
# output path
output_path = fname.output(processing_step='repaired_with_ica',
subject=subject,
file_type='raw.fif')
# save file
raw.save(output_path, overwrite=True)
| 2.375 | 2 |
molecool/tests/test_measure.py | MadelynnWatson/molecool | 0 | 12770288 | <reponame>MadelynnWatson/molecool
"""
Tests for the measure module
"""
# imports
import molecool
import numpy as np
import pytest
def test_calculate_distance():
r1 = np.array([0,0,0])
r2 = np.array([0,1,0])
expected_distance = 1
calculated_distance = molecool.calculate_distance(r1,r2)
assert expected_distance == calculated_distance
#Write a test for the calculate angle function
#Use points (0,0,-1) (0,0,0) (1,0,0)
#expected angle is 90 degrees
def test_calculate_angle():
rA=np.array([0,0,-1])
rB=np.array([0,0,0])
rC=np.array([1,0,0])
expectedangle=90
calculated_angle = molecool.calculate_angle(rA,rB,rC,degrees=True)
assert pytest.approx(expectedangle) == calculated_angle
@pytest.mark.parametrize("p1, p2,p3,expected_angle", [
(np.array([np.sqrt(2)/2 ,np.sqrt(2)/2 , 0]) , np.array([0,0,0]), np.array([1,0,0]), 45),
(np.array([0,0,-1]), np.array([0,1,0]), np.array([1,0,0]), 60),
])
def test_calculate_angle_many(p1,p2,p3, expected_angle):
calculated_value = molecool.calculate_angle(p1,p2,p3, degrees=True)
assert expected_angle == pytest.approx(calculated_value), F'{caluculated_value} {expected_angle}'
| 2.828125 | 3 |
Graphical.py | Sunuba/roc | 23 | 12770289 | from tkinter import *
from classes.AttackBarbarians import AttackBarbarians
from classes.ExploreFog import ExploreFog
from classes.Screenshot import Screenshot
from classes.tester import Tester
starter = Tk()
starter.winfo_toplevel().title('Rise of Kingdom - Automator')
starter.geometry('250x500')
class MainInterface:
v=StringVar()
v.set('BlueStacks')
i=IntVar()
i.set(26)
q=IntVar()
q.set(35)
x=IntVar()
x.set(4)
txt_process_name = Entry(starter,text=v)
txt_minbarb_level = Entry(starter,text=i)
txt_maxbarb_level = Entry(starter,text=q)
txt_troop_count = Entry(starter,text=x)
def __init__(self, barb_level, function):
self.barb_level = barb_level
self.function = function
def barb_allday(self):
process_name = self.txt_process_name.get()
minbarb_level = self.txt_minbarb_level.get()
maxbarb_level = self.txt_maxbarb_level.get()
troop_count = self.txt_troop_count.get()
while True:
attack = AttackBarbarians(minbarb_level,maxbarb_level,troop_count,process_name)
attack.start()
def test_start(self):
Tester.start()
def start_explore(self):
ExploreFog.start()
def take_screenshot(self):
Screenshot.shot('default.png')
def start_interface(self):
lbl_process_name = Label(starter, text="Enter process name")
lbl_minbarb_attack = Label(starter, text="Enter barbarian minlevel")
lbl_maxbarb_attack = Label(starter, text="Enter barbarian maxlevel")
lbl_barb_troop = Label(starter, text="Enter troop number and press button")
#btn_barb_attack = Button(starter, text="Attack Barbarian", command=(lambda: self.start_attack()))
#btn_explore = Button(starter, text="Explore Kingdom", command=(lambda: self.start_explore()))
#btn_test = Button(starter, text="test method", command=(lambda: self.test_start()))
btn_take_screenshot = Button(starter, text="Screenshot", command=(lambda: self.take_screenshot()))
#lbl_barb_allday = Label(starter, text="barb_allday")
btn_barb_allday = Button(starter, text="barb_allday", command=(lambda: self.barb_allday()))
lbl_process_name.pack()
self.txt_process_name.pack()
lbl_minbarb_attack.pack()
self.txt_minbarb_level.pack()
lbl_maxbarb_attack.pack()
self.txt_maxbarb_level.pack()
lbl_barb_troop.pack()
self.txt_troop_count.pack()
#btn_barb_attack.pack()
#btn_explore.pack()
#btn_test.pack()
btn_take_screenshot.pack()
# lbl_barb_allday.pack()
btn_barb_allday.pack()
starter.mainloop()
interface = MainInterface(barb_level=12, function='start_attack()')
interface.start_interface()
| 3.203125 | 3 |
train/experiment.py | deepmind/ithaca | 389 | 12770290 | # Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ithaca: Restoring and attributing ancient texts with deep neural networks."""
import bz2
import distutils
import functools
import glob
import os
import pickle
from absl import app
from absl import flags
from absl import logging
import dataloader
from ithaca.models.model import Model
from ithaca.util.alphabet import GreekAlphabet
from ithaca.util.loss import categorical_kl_divergence
from ithaca.util.loss import cross_entropy_label_smoothing_loss
from ithaca.util.loss import cross_entropy_loss
from ithaca.util.loss import cross_entropy_mask_loss
from ithaca.util.loss import date_loss_l1
from ithaca.util.optim import adaptive_grad_clip
from ithaca.util.optim import linear_warmup_and_sqrt_decay
from ithaca.util.optim import linear_weight
from ithaca.util.region_names import load_region_maps
import jax
import jax.numpy as jnp
from jaxline import experiment
from jaxline import platform
from jaxline import utils as jl_utils
import numpy as np
import optax
import tensorflow_datasets.public_api as tfds
FLAGS = flags.FLAGS
class Experiment(experiment.AbstractExperiment):
"""Ithaca experiment."""
# Holds a map from object properties that will be checkpointed to their name
# within a checkpoint. Currently it is assume that these are all sharded
# device arrays.
CHECKPOINT_ATTRS = {
'_params': 'params',
'_opt_state': 'opt_state',
}
def __init__(self, mode, init_rng, config):
"""Initializes experiment."""
super(Experiment, self).__init__(mode=mode)
self.mode = mode
self.init_rng = init_rng
self.config = config
# Same random key on each device.
self._rng_key = jl_utils.bcast_local_devices(self.init_rng)
# Checkpointed experiment state.
self._params = None
self._opt_state = None
# Input pipelines.
self._train_input = None
self._eval_input = None
# Forward and update functions.
self.forward = Model(**self.config.model)
self._update_func = jax.pmap(
self._update_func, axis_name='i', donate_argnums=(0, 1))
self._learning_rate_fn = functools.partial(
linear_warmup_and_sqrt_decay,
max_lr=self.config.optimizer.kwargs.learning_rate,
warmup_steps=self.config.optimizer.warmup)
self._opt_init, self._opt_update = self.optimizer()
if 'use_jit' in self.config.evaluation and self.config.evaluation.use_jit:
self._eval_batch = jax.jit(self._eval_batch)
# Create alphabet
alphabet_kwargs = dict(self.config.alphabet)
wordlist_path = alphabet_kwargs.pop('wordlist_path')
with open(wordlist_path, 'r') as f:
self._alphabet = GreekAlphabet(wordlist_file=f, **alphabet_kwargs)
# Create region mapping
self._region_map = {'main': None, 'sub': None}
if self.config.dataset.region_main_path:
with open(self.config.dataset.region_main_path, 'r') as f:
self._region_map['main'] = load_region_maps(f)
if self.config.dataset.region_sub_path:
with open(self.config.dataset.region_sub_path, 'r') as f:
self._region_map['sub'] = load_region_maps(f)
def optimizer(self):
config_opt = self.config.optimizer
kwargs = config_opt.kwargs.to_dict()
kwargs['learning_rate'] = self._learning_rate_fn
opt = getattr(optax, config_opt.name)(**kwargs)
if hasattr(config_opt, 'clip_adaptive') and config_opt.clip_adaptive:
if config_opt.clip_level > 0.:
opt = optax.chain(adaptive_grad_clip(config_opt.clip_level), opt)
elif config_opt.clip_level > 0.:
opt = optax.chain(optax.clip_by_global_norm(config_opt.clip_level), opt)
return opt
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step, rng, **unused_args):
"""See base class."""
if self._train_input is None:
self._initialize_train(rng)
batch = next(self._train_input)
(self._params, self._opt_state, scalars) = (
self._update_func(self._params, self._opt_state, global_step, batch,
rng))
scalars = jl_utils.get_first(scalars)
return scalars
def _initialize_train(self, rng):
# Check we haven't already restored params
if self._params is None:
logging.info(
'Initializing parameters rather than restoring from checkpoint.')
batch = next(self._build_train_input())
rng = jl_utils.get_first(rng)
params_rng, dropout_rng = jax.random.split(rng)
params_rng = jl_utils.bcast_local_devices(params_rng)
dropout_rng = jl_utils.bcast_local_devices(dropout_rng)
init_net = jax.pmap(
functools.partial(self.forward.init, is_training=True))
self._params = init_net({
'params': params_rng,
'dropout': dropout_rng
},
text_char=batch['text_char'],
text_word=batch['text_word'])
init_opt = jax.pmap(self._opt_init)
self._opt_state = init_opt(self._params)
self._train_input = jl_utils.py_prefetch(self._build_train_input)
self._train_input = jl_utils.double_buffer_on_gpu(self._train_input)
def _build_train_input(self):
"""See base class."""
num_devices = jax.device_count()
global_batch_size = self.config.training.batch_size
per_device_batch_size, ragged = divmod(global_batch_size, num_devices)
logging.info(
'num_devices: %d, per_device_batch_size: %d, global_batch_size: %d',
num_devices, per_device_batch_size, global_batch_size)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
config_dataset = self.config.dataset
with open(config_dataset.dataset_path) as dataset_file:
ds = dataloader.loader_tf(
per_device_batch_size,
config_dataset,
self._region_map,
alphabet=self._alphabet,
dataset_file=dataset_file,
mode='train')
ds = ds.batch(jax.local_device_count())
return iter(tfds.as_numpy(ds))
def _loss_fn(self, params, batch, global_step, rng):
text_char = batch['text_char']
text_word = batch['text_word']
text_unmasked = batch['text_unmasked']
text_mask = batch['text_mask']
next_sentence_mask = batch['next_sentence_mask']
next_sentence_label = batch['next_sentence_label']
subregion = batch['region_sub_id']
date_min = batch['date_min']
date_max = batch['date_max']
date_dist = batch['date_dist']
date_available = batch['date_available']
eps = 1e-6
(date_pred, subregion_logits, mask_logits, nsp_logits) = self.forward.apply(
params,
text_char=text_char,
text_word=text_word,
text_char_onehot=None,
text_word_onehot=None,
is_training=True,
rngs={'dropout': rng})
date_loss = 0.
subregion_loss = 0.
subregion_accuracy = 0.
mask_loss = 0.
mask_accuracy = 0.
nsp_loss = 0.
nsp_accuracy = 0.
# Date loss
if self.config.loss.date.enabled:
if self.config.loss.date.label_smoothing > 0:
date_dist_prob = jnp.exp(date_dist) # logprob to prob
date_dist_prob_smooth = date_dist_prob * jax.random.uniform(
rng,
shape=date_dist_prob.shape,
dtype=date_dist_prob.dtype,
minval=1 - self.config.loss.date.label_smoothing,
maxval=1 + self.config.loss.date.label_smoothing)
date_dist_prob_smooth /= date_dist_prob_smooth.sum(axis=-1)[:,
jnp.newaxis]
date_dist_prob_smooth = jnp.clip(date_dist_prob_smooth, 1e-6, 1)
date_dist = jnp.log(date_dist_prob_smooth)
date_loss = 0.
if 'l1' in self.config.loss.date.type.split('+'):
date_pred_x = jnp.arange(
self.config.dataset.date_min +
self.config.dataset.date_interval / 2,
self.config.dataset.date_max +
self.config.dataset.date_interval / 2,
self.config.dataset.date_interval).reshape(-1, 1)
date_pred_val = jnp.dot(jax.nn.softmax(date_pred, axis=-1), date_pred_x)
date_loss_l1_ = jax.vmap(date_loss_l1)(date_pred_val, date_min,
date_max, date_available)
jnp.nan_to_num(date_loss_l1_, copy=False)
date_loss += (
jnp.mean(date_loss_l1_, axis=0) * self.config.loss.date.weight_l1)
if 'dist' in self.config.loss.date.type.split('+'):
date_loss_dist_ = categorical_kl_divergence(date_dist, date_pred)
date_loss_dist_ *= date_available
jnp.nan_to_num(date_loss_dist_, copy=False)
date_loss += (
jnp.mean(date_loss_dist_, axis=0) *
self.config.loss.date.weight_dist)
date_loss *= linear_weight(global_step, self.config.loss.date.step_start,
self.config.loss.date.step_end)
# Region and subregion loss
if self.config.loss.region.enabled:
subregion_loss = jnp.mean(
cross_entropy_label_smoothing_loss(
subregion_logits,
subregion,
label_smoothing=self.config.loss.region.label_smoothing), 0)
jnp.nan_to_num(subregion_loss, copy=False)
subregion_loss *= self.config.loss.region.weight
subregion_accuracy = jnp.mean(
jnp.argmax(subregion_logits, -1) == subregion)
w = linear_weight(global_step, self.config.loss.region.step_start,
self.config.loss.region.step_end)
subregion_loss *= w
# Mask loss
if self.config.loss.mask.enabled:
mask_loss = jnp.sum(
cross_entropy_label_smoothing_loss(
mask_logits,
text_unmasked,
text_mask,
label_smoothing=self.config.loss.mask.label_smoothing), 1) # [B]
assert mask_loss.ndim == 1
jnp.nan_to_num(mask_loss, copy=False)
mask_loss = jnp.mean(mask_loss, 0) * self.config.loss.mask.weight # []
mask_all_accuracy = (jnp.argmax(mask_logits, -1) == text_unmasked).astype(
mask_logits.dtype)
mask_accuracy = jnp.divide(
jnp.sum(
jnp.multiply(mask_all_accuracy,
text_mask.astype(mask_logits.dtype))),
jnp.sum(text_mask) + eps)
mask_loss *= linear_weight(global_step, self.config.loss.mask.step_start,
self.config.loss.mask.step_end)
# NSP loss
if self.config.loss.nsp.enabled:
nsp_loss = jnp.sum(
jax.vmap(jax.vmap(cross_entropy_mask_loss))(nsp_logits,
next_sentence_label,
next_sentence_mask),
1) # [B]
assert nsp_loss.ndim == 1
jnp.nan_to_num(nsp_loss, copy=False)
nsp_loss = jnp.mean(nsp_loss, 0) * self.config.loss.nsp.weight
nsp_all_accuracy = (jnp.argmax(
nsp_logits, -1) == next_sentence_label).astype(nsp_logits.dtype)
nsp_accuracy = jnp.divide(
jnp.sum(
jnp.multiply(nsp_all_accuracy,
next_sentence_mask.astype(nsp_logits.dtype))),
jnp.sum(next_sentence_mask) + eps)
nsp_loss *= linear_weight(global_step, self.config.loss.nsp.step_start,
self.config.loss.nsp.step_end)
loss = date_loss + subregion_loss + mask_loss + nsp_loss
scaled_loss = loss / jax.device_count()
# NOTE: We use scaled_loss for grads and unscaled for logging.
return scaled_loss, (loss, date_loss, subregion_loss, subregion_accuracy,
mask_loss, mask_accuracy, nsp_loss, nsp_accuracy)
def _update_func(self, params, opt_state, global_step, batch, rng):
"""Applies an update to parameters and returns new state."""
# This function computes the gradient of the first output of loss_fn and
# passes through the other arguments unchanged.
grad_loss_fn = jax.grad(self._loss_fn, has_aux=True)
scaled_grads, (loss, date_loss, subregion_loss, subregion_accuracy,
mask_loss, mask_accuracy, nsp_loss,
nsp_accuracy) = grad_loss_fn(params, batch, global_step, rng)
scaled_grads = jax.tree_map(jnp.nan_to_num, scaled_grads)
grads = jl_utils.tree_psum(scaled_grads, axis_name='i')
# Compute and apply updates via our optimizer.
learning_rate = self._learning_rate_fn(global_step)
updates, opt_state = self._opt_update(grads, opt_state, params=params)
params = optax.apply_updates(params, updates)
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = {
'loss/train': loss,
'loss/date': date_loss,
'loss/subregion': subregion_loss,
'loss/mask': mask_loss,
'loss/nsp': nsp_loss,
'accuracy/subregion': subregion_accuracy,
'accuracy/mask': mask_accuracy,
'accuracy/nsp': nsp_accuracy,
'opt/learning_rate': learning_rate,
'opt/grad_norm': optax.global_norm(grads),
'opt/param_norm': optax.global_norm(params),
}
scalars = jax.lax.pmean(scalars, axis_name='i')
return params, opt_state, scalars
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, rng, **unused_kwargs):
"""See base class."""
if self._eval_input is None:
self._initialize_eval()
global_step = np.array(jl_utils.get_first(global_step))
summary, outputs = self._eval_epoch(jl_utils.get_first(rng))
for k, v in summary.items():
summary[k] = np.array(v)
score = summary['score/eval']
logging.info('[Step %d] eval_score=%.2f', global_step, score)
# Log outputs
checkpoint_dir = jl_utils.get_checkpoint_dir(FLAGS.config,
jax.process_index())
outputs_path = os.path.join(checkpoint_dir, 'best_outputs.pkl.bz2')
score_path = os.path.join(checkpoint_dir, 'best_score.txt')
model_log_path = os.path.join(checkpoint_dir, 'model_log')
best_model_log_path = os.path.join(checkpoint_dir, 'best_model_log')
# Check for preexisting outputs
best_score = None
best_step = None
if os.path.exists(score_path):
with open(score_path, 'r') as f:
tok = f.read().strip().split(' ')
best_step = int(tok[0])
best_score = float(tok[1])
# Store outputs if score is better
if best_score is None or (score > best_score and global_step > best_step):
best_score = score
with open(score_path, 'w') as f:
f.write(f'{global_step} {best_score}')
with open(outputs_path, 'wb') as f:
outputs_pkl = pickle.dumps(outputs, protocol=2)
outputs_pkl_bz2 = bz2.compress(outputs_pkl)
f.write(outputs_pkl_bz2)
if self.config.evaluation.store_model_log:
if os.path.isdir(best_model_log_path):
map(os.remove, glob.glob(best_model_log_path + '/*'))
else:
os.makedirs(best_model_log_path)
distutils.dir_util.copy_tree(model_log_path, best_model_log_path)
logging.info('[Step %d] Writing eval outputs: %s.', global_step,
outputs_path)
# Log best score
summary['score/eval_best'] = best_score
return summary
def _initialize_eval(self):
self._eval_input = jl_utils.py_prefetch(self._build_eval_input)
def _build_eval_input(self):
"""Builds the evaluation input pipeline."""
config_dataset = self.config.dataset
with open(config_dataset.dataset_path) as dataset_file:
ds = dataloader.loader_tf(
self.config.evaluation.batch_size,
config_dataset,
self._region_map,
alphabet=self._alphabet,
dataset_file=dataset_file,
mode=self.config.evaluation.mode)
return iter(tfds.as_numpy(ds))
def _eval_batch(self, params, batch, rng):
"""Evaluates a batch."""
phi_id = batch['id']
text_char = batch['text_char']
text_word = batch['text_word']
text_unmasked = batch['text_unmasked']
text_mask = batch['text_mask']
next_sentence_mask = batch['next_sentence_mask']
next_sentence_label = batch['next_sentence_label']
subregion = batch['region_sub_id']
date_min = batch['date_min']
date_max = batch['date_max']
date_dist = batch['date_dist']
date_available = batch['date_available']
# with hlogging.context() as log:
(date_pred, subregion_logits, mask_logits, nsp_logits) = self.forward.apply(
params,
text_char=text_char,
text_word=text_word,
text_char_onehot=None,
text_word_onehot=None,
is_training=False,
rngs={'dropout': rng})
# Log model weights
model_log = {}
subregion_loss = 0.
subregion_accuracy = 0.
date_loss = 0.
date_l1_loss = 0.
nsp_loss = 0.
nsp_accuracy = 0.
# eps = 1e-6
date_count = 0
mask_count = 0
nsp_count = 0
# Date loss
if self.config.loss.date.enabled:
date_pred_x = jnp.arange(
self.config.dataset.date_min + self.config.dataset.date_interval / 2,
self.config.dataset.date_max + self.config.dataset.date_interval / 2,
self.config.dataset.date_interval).reshape(-1, 1)
date_pred_val = jnp.dot(jax.nn.softmax(date_pred, axis=-1), date_pred_x)
date_l1_loss = jnp.sum(
jax.vmap(date_loss_l1)(date_pred_val, date_min, date_max,
date_available),
axis=0)
if 'l1' in self.config.loss.date.type.split('+'):
date_loss += date_l1_loss * self.config.loss.date.weight_l1
if 'dist' in self.config.loss.date.type.split('+'):
date_loss_dist_ = categorical_kl_divergence(date_dist, date_pred)
date_loss_dist_ *= date_available
date_loss += (
jnp.sum(date_loss_dist_, axis=0) *
self.config.loss.date.weight_dist)
date_count = jnp.sum(date_available)
# Region and subregion loss
if self.config.loss.region.enabled:
subregion_loss = jnp.sum(
cross_entropy_loss(subregion_logits, subregion), 0)
subregion_loss *= self.config.loss.region.weight
subregion_accuracy = jnp.mean(
jnp.argmax(subregion_logits, -1) == subregion)
# Mask loss
if self.config.loss.mask.enabled:
mask_loss = jnp.sum(
cross_entropy_label_smoothing_loss(
mask_logits, text_unmasked, text_mask, label_smoothing=0),
1) # [B]
# mask_loss /= jnp.sum(text_mask, axis=1) + eps # [B]
assert mask_loss.ndim == 1
mask_loss = jnp.mean(mask_loss, 0) * self.config.loss.mask.weight # []
mask_all_accuracy = (jnp.argmax(mask_logits, -1) == text_unmasked).astype(
mask_logits.dtype)
mask_accuracy = jnp.sum(
jnp.multiply(mask_all_accuracy, text_mask.astype(mask_logits.dtype)))
mask_count = jnp.sum(text_mask)
# NSP loss
if self.config.loss.nsp.enabled:
nsp_loss = jnp.sum(
jax.vmap(jax.vmap(cross_entropy_mask_loss))(nsp_logits,
next_sentence_label,
next_sentence_mask),
1) # [B]
assert nsp_loss.ndim == 1
nsp_loss = jnp.sum(nsp_loss, 0) * self.config.loss.nsp.weight
nsp_all_accuracy = (jnp.argmax(
nsp_logits, -1) == next_sentence_label).astype(nsp_logits.dtype)
nsp_accuracy = jnp.sum(
jnp.multiply(nsp_all_accuracy,
next_sentence_mask.astype(nsp_logits.dtype)))
nsp_count = jnp.sum(next_sentence_mask)
# Outputs
scalars = {
'score/eval':
(mask_accuracy + subregion_accuracy - date_l1_loss * 0.01),
'loss/eval': mask_loss + date_loss + subregion_loss,
'loss/date': date_loss,
'loss/date_l1': date_l1_loss,
'loss/subregion': subregion_loss,
'loss/mask': mask_loss,
'loss/nsp': nsp_loss,
'count/date': date_count,
'count/nsp': nsp_count,
'count/mask': mask_count,
'accuracy/subregion': subregion_accuracy,
'accuracy/mask': mask_accuracy,
'accuracy/nsp': nsp_accuracy,
}
outputs = {
'outputs/id': phi_id,
'outputs/date_pred': date_pred.astype('float16'),
'outputs/date_min': date_min,
'outputs/date_max': date_max,
'outputs/date_dist': date_dist.astype('float16'),
'outputs/date_available': date_available,
'outputs/subregion_logits': subregion_logits.astype('float16'),
'outputs/subregion': subregion,
}
return scalars, outputs, model_log
def _eval_epoch(self, rng):
"""Evaluates an epoch."""
summary = {}
outputs = {}
total_num_sequences = 0
# Prepare directories for storing model log
checkpoint_dir = jl_utils.get_checkpoint_dir(FLAGS.config,
jax.process_index())
model_log_path = os.path.join(checkpoint_dir, 'model_log')
if self.config.evaluation.store_model_log:
if os.path.isdir(model_log_path):
map(os.remove, glob.glob(model_log_path + '/*'))
else:
os.makedirs(model_log_path)
# Checkpoints broadcast for each local device
params = jl_utils.get_first(self._params)
# Model log buffer initialisation
model_log_buffer = []
def _flush_model_log_buffer(model_log_buffer):
"""Writes model log to bz2 pickle files."""
while model_log_buffer:
model_log_batch_path, model_log_pkl_bz2 = model_log_buffer.pop(0)
with open(model_log_batch_path, 'wb') as f:
f.write(model_log_pkl_bz2)
# Converting to numpy here allows us to reset the generator
for batch in self._eval_input():
# Make sure that the input has batch_dim=1
assert batch['text_char'].shape[0] == 1
summary_batch, outputs_batch, model_log_batch = self._eval_batch(
params, batch, rng)
# Append batch values to dictionary
for k, v in summary_batch.items():
summary[k] = summary.get(k, 0) + v
for k, v in outputs_batch.items():
outputs.setdefault(k, []).append(v)
total_num_sequences += self.config.evaluation.batch_size
# Store model log per batch
if self.config.evaluation.store_model_log:
# Append to buffer
model_log_batch_path = os.path.join(
model_log_path,
str(outputs_batch['outputs/id'][0]) + '.pkl.bz2')
model_log_pkl = pickle.dumps(model_log_batch, protocol=2)
model_log_pkl_bz2 = bz2.compress(model_log_pkl)
model_log_buffer += [(model_log_batch_path, model_log_pkl_bz2)]
# Flush model log buffer
if (len(model_log_buffer) %
self.config.evaluation.store_model_log_steps == 0):
_flush_model_log_buffer(model_log_buffer)
# Flush remaining model log buffer
if self.config.evaluation.store_model_log:
_flush_model_log_buffer(model_log_buffer)
# Normalise and concatenate
summary['loss/date'] /= summary['count/date']
summary['loss/date_l1'] /= summary['count/date']
summary['loss/mask'] /= summary['count/mask']
summary['accuracy/mask'] /= summary['count/mask']
summary['loss/nsp'] /= summary['count/nsp']
summary['accuracy/nsp'] /= summary['count/nsp']
summary['loss/subregion'] /= total_num_sequences
summary['accuracy/subregion'] /= total_num_sequences
summary['score/eval'] = (
summary['accuracy/mask'] + summary['accuracy/subregion'] -
summary['loss/date_l1'] * 0.01)
summary['loss/eval'] = (
summary['loss/mask'] + summary['loss/date'] + summary['loss/subregion'])
for k, v in outputs.items():
outputs[k] = np.concatenate(v, axis=0)
return summary, outputs
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(platform.main, Experiment))
| 1.53125 | 2 |
firmware/set_wind_speed.py | mastensg/windportal | 1 | 12770291 | #!/usr/bin/env python3
import sys
import tkinter as tk
import time
import copy
import numpy as np
import matplotlib as mpl
import matplotlib.backends.tkagg as tkagg
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,
NavigationToolbar2Tk)
import pandas as pd
import wp_ipc
import wp_gust
##############################################################################
def plot():
fig = mpl.figure.Figure(figsize=(3, 2))
ax = fig.add_subplot(111)
figure_canvas_agg = FigureCanvasAgg(fig)
series = pandas.Series(data)
dplt = series.plot(ax=ax)
figure_canvas_agg.draw()
figure_x, figure_y, figure_w, figure_h = fig.bbox.bounds
figure_w, figure_h = int(figure_w), int(figure_h)
global the_fig_photo
the_fig_photo = tk.PhotoImage(
master=the_canvas, width=figure_w, height=figure_h)
# Position: convert from top-left anchor to center anchor
loc = (0, 0)
the_canvas.delete("all")
the_canvas.create_image(
loc[0] + figure_w / 2, loc[1] + figure_h / 2, image=the_fig_photo)
tkagg.blit(
the_fig_photo, figure_canvas_agg.get_renderer()._renderer, colormode=2)
return the_fig_photo # XXX: has to be held
def setup_gui():
the_window = tk.Tk()
the_window.title("A figure in a canvas")
the_window.bind('<Escape>', sys.exit)
the_canvas = tk.Canvas(the_window, width=300, height=200)
the_canvas.pack()
the_fig_photo = None
the_wind_speed = tk.Scale(
the_window,
from_=0.0,
to=40.0,
resolution=0.1,
orient=tk.HORIZONTAL,
label="wind speed",
length=300,
command=None)
the_wind_speed.pack()
the_wind_speed.set(20.0)
the_potentiometer = tk.Scale(
the_window,
from_=0.0,
to=1.0,
resolution=0.01,
orient=tk.HORIZONTAL,
label="potentiometer",
length=300,
command=None)
the_potentiometer.pack()
the_potentiometer.set(0.5)
the_perturbation_period = tk.Scale(
the_window,
from_=10,
to=10000,
resolution=1,
orient=tk.HORIZONTAL,
label="perturbation period (ms)",
length=300,
command=None)
the_perturbation_period.pack()
the_perturbation_period.set(1000)
the_perturbation_amplitude = tk.Scale(
the_window,
from_=0.0,
to=1.0,
resolution=0.001,
orient=tk.HORIZONTAL,
label="perturbation amplitude",
length=300,
command=None)
the_perturbation_amplitude.pack()
the_perturbation_amplitude.set(0.1)
the_perturbation = tk.Scale(
the_window,
from_=-1.0,
to=1.0,
resolution=0.01,
orient=tk.HORIZONTAL,
label="perturbation",
length=300,
command=None)
the_perturbation.pack()
the_fan_duty = tk.Scale(
the_window,
from_=0.0,
to=1.0,
resolution=0.01,
orient=tk.HORIZONTAL,
label="fan duty",
length=300,
command=None)
the_fan_duty.pack()
tk.Label(the_window, text="\nsimulation\n").pack()
#the_fan_duty
inputs = {
'perturbation_period': the_perturbation_period,
'perturbation_amplitude': the_perturbation_amplitude,
'wind_speed': the_wind_speed,
'scale': the_potentiometer,
}
outputs = {
'fan_duty': the_fan_duty,
'perturbation': the_perturbation,
}
return the_window, inputs, outputs
##############################################################################
def update_inputs_gui(inputs, widgets):
for name, widget in widgets.items():
inputs.__dict__[name] = float(widget.get())
def set_outputs_gui(widgets, state):
for name, widget in widgets.items():
v = state.__dict__[name]
widgets[name].set(v)
def main():
loop_interval = 100
state = wp_gust.State()
inputs = wp_gust.Inputs()
ipc_session = wp_ipc.Session()
window, input_widgets, output_widgets = setup_gui()
def loop():
nonlocal state
inputs.__dict__['time'] = time.time()
wp_gust.update_inputs_ipc(inputs, ipc_session)
update_inputs_gui(inputs, input_widgets)
state = wp_gust.next_state(state, inputs)
set_outputs_gui(output_widgets, state)
ipc_session.send("wind_speed", inputs.wind_speed)
window.after(loop_interval, loop)
loop()
tk.mainloop()
if __name__ == '__main__':
main()
| 2.5 | 2 |
passl/modeling/heads/simclr_contrastive_head.py | juneweng/PASSL | 136 | 12770292 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
from .builder import HEADS
import paddle.nn.functional as F
import paddle.fluid.layers as layers
LARGE_NUM = 1e9
@HEADS.register()
class SimCLRContrastiveHead(nn.Layer):
"""Head for contrastive learning.
Args:
temperature (float): The temperature hyper-parameter that
controls the concentration level of the distribution.
Default: 0.1.
"""
def __init__(self, temperature=0.5, return_accuracy=True, multi_rank=False):
super(SimCLRContrastiveHead, self).__init__()
self.criterion = nn.CrossEntropyLoss()
self.temperature = temperature
self.return_accuracy = return_accuracy
self.multi_rank = multi_rank
def forward(self, pos, neg):
"""Forward head.
Args:
pos (Tensor): Nx1 positive similarity.
neg (Tensor): Nxk negative similarity.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
hidden1, hidden2 = pos, neg
batch_size = pos.shape[0]
# Gather hidden1/hidden2 across replicas and create local labels.
if self.multi_rank is True:
hidden1_large = self.add_allgather(hidden1, "hidden1"+str(self.co2))
hidden2_large = self.add_allgather(hidden2, "hidden2"+str(self.co2))
hidden1_large = paddle.reshape(hidden1_large,
[-1, hidden1_large.shape[-1]])
hidden2_large = paddle.reshape(hidden2_large,
[-1, hidden2_large.shape[-1]])
enlarged_batch_size = paddle.shape(hidden1_large)[0]
trainer_id = self.args.trainer_id
labels_idx = paddle.arange(0, batch_size, 1,
"int32") + trainer_id * batch_size
labels = F.one_hot(
paddle.reshape(labels_idx, [batch_size]),
enlarged_batch_size * 2)
masks = F.one_hot(
paddle.reshape(labels_idx, [batch_size]),
enlarged_batch_size)
else:
hidden1_large = hidden1
hidden2_large = hidden2
labels = F.one_hot(
paddle.reshape(
paddle.arange(0, batch_size, 1, "int32"), [batch_size]),
batch_size * 2)
masks = F.one_hot(
paddle.reshape(
paddle.arange(0, batch_size, 1, "int32"), [batch_size]),
batch_size)
logits_aa = paddle.matmul(
hidden1, hidden1_large, transpose_y=True) / self.temperature
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = paddle.matmul(
hidden2, hidden2_large, transpose_y=True) / self.temperature
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = paddle.matmul(
hidden1, hidden2_large, transpose_y=True) / self.temperature
logits_ba = paddle.matmul(
hidden2, hidden1_large, transpose_y=True) / self.temperature
loss_a = paddle.nn.functional.softmax_with_cross_entropy(
paddle.concat([logits_ab, logits_aa], 1), labels, soft_label=True)
loss_b = paddle.nn.functional.softmax_with_cross_entropy(
paddle.concat([logits_ba, logits_bb], 1), labels, soft_label=True)
contrast_loss = loss_a + loss_b
logits_ab_co2 = logits_ab - masks * LARGE_NUM
logits_ba_co2 = logits_ba - masks * LARGE_NUM
logit_a = paddle.concat([logits_aa, logits_ab_co2], 1)
logit_b = paddle.concat([logits_ba_co2, logits_bb], 1)
log_a = paddle.nn.functional.log_softmax(logit_a)
log_b = paddle.nn.functional.log_softmax(logit_b)
a = paddle.nn.functional.softmax(logit_a)
b = paddle.nn.functional.softmax(logit_b)
kl_1 = paddle.nn.functional.kl_div(log_a, b, reduction='batchmean')
kl_2 = paddle.nn.functional.kl_div(log_b, a, reduction='batchmean')
co2_loss = 1 * (kl_1 + kl_2)
total_contrast_loss = contrast_loss + 3 * co2_loss
loss = layers.reduce_mean(total_contrast_loss)
contrastive_label = paddle.unsqueeze(
paddle.argmax(
labels, axis=1), 1)
acc1 = layers.accuracy(input=logits_ab, label=contrastive_label)
outputs = dict()
outputs['loss'] = loss
outputs['acc1'] = acc1
return outputs
def accuracy(output, target, topk=(1, )):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with paddle.no_grad():
maxk = max(topk)
batch_size = target.shape[0]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = paddle.cast(pred == target.reshape([1, -1]).expand_as(pred),
'float32')
res = []
for k in topk:
correct_k = correct[:k].reshape([-1]).sum(0, keepdim=True)
res.append(correct_k * 100.0 / batch_size)
return res
def add_allgather(self, hidden, name=""):
block = self._train_program.global_block()
hidden_large = block.create_var(
name=name,
shape=[self.args.trainer_num] + list(hidden.shape),
persistable=False,
dtype=core.VarDesc.VarType.FP32)
op_len = len(list(enumerate(block.ops)))
op_maker = core.op_proto_and_checker_maker
self.op_role_key = op_maker.kOpRoleAttrName()
block._insert_op(
op_len,
type='c_allgather',
inputs={'X': hidden},
outputs={'Out': hidden_large},
attrs={
'nranks': self.args.trainer_num,
self.op_role_key: OpRole.Forward,
"use_calc_stream": True
})
return hidden_large
| 2.234375 | 2 |
PlotExamples/title_gitrev.py | scienceopen/python-examples | 5 | 12770293 | #!/usr/bin/env python
"""
example of putting git short revision in matplotlib plot, up in the corner
(rather than in title where git revision text is too large)
This is helpful for when a colleague wants a plot exactly recreated from a year ago,
to help find the exact code used to create that plot.
http://matplotlib.org/api/figure_api.html
"""
import subprocess
from matplotlib.pyplot import figure, show
try:
gitrev = subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"], universal_newlines=True
).strip("\n")
except Exception: # maybe they don't have git installed
gitrev = ""
fg = figure()
ax = fg.gca()
ax.plot([1, 2])
ax.set_title("my cool plot")
fg.text(1.0, 1.0, "git: " + gitrev, ha="right", va="top", rotation="vertical")
show()
| 2.625 | 3 |
disp/cli/cmd_tools.py | zhubonan/disp | 1 | 12770294 | <filename>disp/cli/cmd_tools.py
"""
Collection of useful tools
"""
import click
from ase.io import read
from disp.tools.modcell import modify_cell
@click.group('tools')
@click.pass_context
def tools(ctx):
"""Collection of tools"""
_ = ctx
@tools.command('modcell')
@click.argument('base_cell')
@click.argument('other_cell')
def modcell(base_cell, other_cell):
"""Modify the structure of a CELL file using another"""
click.echo('\n'.join(modify_cell(base_cell, read(other_cell))))
| 2.0625 | 2 |
String/784.The Longest Common Prefix II/Solution.py | Zhenye-Na/LxxxCode | 12 | 12770295 | <gh_stars>10-100
class Solution:
"""
@param words: the n strings
@param target: the target string
@return: The ans
"""
def the_longest_common_prefix(self, words, target):
# write your code here
ans = 0
for word in words:
same = 0
for j in range(0, len(target)):
if j > len(word) - 1 or target[j] != word[j]:
break
same += 1
ans = max(ans, same)
return ans | 3.296875 | 3 |
app/base/func.py | edwarts/igenweb_supplier | 0 | 12770296 | # list to csv
def listToCSV(obj):
csv = ''
for x in obj:
csv += str(x)
csv += ','
return csv[:-1]
# csv to list
def CSVToList(csv):
obj = csv.split(',')
li = []
for x in obj:
li.append(int(x))
return li
# 更新数据库
def update_db(obj, d):
if not isinstance(d, dict):
raise TypeError
for i in d:
setattr(obj, i, d[i])
| 3.34375 | 3 |
dogey/exceptions.py | Shadofer/dogey | 3 | 12770297 | class DogeyError(Exception):
""" The base Dogey Exception, expect this as the main type of Dogey-specific errors such as InvalidCredentialsError. """
pass
class DogeyCommandError(Exception):
def __init__(self, command_name: str, message: str, *args):
""" The basic Dogey exception for commands, expect this in on_command_error.
Args:
command_name (str): The name of the command.
message (str): The message of the exception.
"""
assert isinstance(command_name, str)
assert isinstance(message, str)
self.command_name = command_name
self.message = message
super().__init__(command_name, message, *args)
class InvalidCredentialsError(Exception):
"""An invalid token/refresh token has been passed to the Dogey client. """
pass
class InstanceAlreadyCreated(DogeyError):
"""A Dogey instance has already been created, multiple calls to .start will cause this. """
pass
class MissingRequiredArgument(DogeyCommandError):
def __init__(self, command_name: str, argument: str):
"""A required argument is missing.
Args:
command_name (str): The command name.
argument (str): The required argument.
"""
assert isinstance(argument, str)
self.command_name = command_name
self.argument = argument
super().__init__(command_name, f'"{argument}" is a required argument that is missing.')
class CommandNotFound(DogeyCommandError):
def __init__(self, command_name: str):
"""A command can not be found.
Args:
command_name (str): The command name.
"""
assert isinstance(command_name, str)
self.command_name = command_name
super().__init__(command_name, f'The command could not be found.')
class TooManyArguments(DogeyCommandError):
def __init__(self, command_name: str):
"""Too many arguments have been passed to a command.
Args:
command_name (str): The command name.
"""
assert isinstance(command_name, str)
self.command_name = command_name
super().__init__(command_name, f'Too many arguments have been passed.')
| 3.34375 | 3 |
examples/list_stable_particles.py | tianluyuan/particletools | 2 | 12770298 | from __future__ import print_function
from particletools.tables import (PYTHIAParticleData, c_speed_of_light,
print_stable, make_stable_list)
import math
pdata = PYTHIAParticleData()
print_stable(pdata.ctau('D0') / c_speed_of_light,
title=('Particles with known finite lifetimes longer '
'than that of D0 ({0}cm)').format(pdata.ctau('D0')))
print()
print('Known particles with tau > 1e-8s:', make_stable_list(1e-8))
| 2.34375 | 2 |
network-client/src/gmu/chord/FingerEntry.py | danfleck/Class-Chord | 1 | 12770299 | ''' Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
Created on Feb 24, 2014
@author: dfleck
'''
import math
class FingerEntry:
'''Represents an entry in the finger table.
Note: Finger indexes go from 0-->m-1 which is different than the
Chord paper which goes from 1-->m
'''
m = 128 # Number of bits in entry set
def __init__(self, k, n, nodeLocation):
'''k is the finger table entry.
n is the node ID of the node holding this entry
'''
#print("DEBUG: fingerINIT: %d %d " % (k-1,n))
twoToTheM = math.pow(2, FingerEntry.m)
self.start = n + math.pow(2, k-1) % twoToTheM
self.intervalStart = self.start
self.intervalEnd = n + math.pow(2, k) % twoToTheM
self.nodeLocation = nodeLocation # This is the succ on the tables in the Chord paper
def __str__(self):
if self.nodeLocation is None:
nodeId = -999
else:
nodeId = self.nodeLocation.id
return "Start:%d End:%d NodeLocation:%d" % (self.start, self.intervalEnd, nodeId) | 3.203125 | 3 |
repos/system_upgrade/el7toel8/actors/opensshconfigscanner/actor.py | sm00th/leapp-repository | 21 | 12770300 | from leapp.actors import Actor
from leapp.libraries.actor import readopensshconfig
from leapp.models import OpenSshConfig
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
class OpenSshConfigScanner(Actor):
"""
Collect information about the OpenSSH configuration.
Currently supporting the following options:
* PermitRootLogin
* UsePrivilegeSeparation
* Protocol
* Ciphers
* MACs
"""
name = 'read_openssh_config'
consumes = ()
produces = (OpenSshConfig, )
tags = (FactsPhaseTag, IPUWorkflowTag, )
def process(self):
readopensshconfig.scan_sshd(self.produce)
| 2.5 | 2 |
bin/shell.py | charnley/optimize_gamess_parameters | 1 | 12770301 |
from subprocess import Popen, PIPE
def shell(cmd, shell=False):
if shell:
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
else:
cmd = cmd.split()
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
return output
| 2.96875 | 3 |
app/question/migrations/0001_initial.py | PICT-ACM-Student-Chapter/OJ_API | 2 | 12770302 | # Generated by Django 3.1.4 on 2020-12-22 07:46
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import question.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.CharField(max_length=10, primary_key=True, serialize=False)),
('name', models.CharField(max_length=30)),
('description', models.TextField()),
('score', models.IntegerField()),
('input_format', models.TextField(default='')),
('output_format', models.TextField(default='')),
('constraints', models.TextField(default='')),
('correct_code', models.TextField(blank=True, null=True)),
('correct_code_lang', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.language')),
],
),
migrations.CreateModel(
name='Testcase',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('input', models.FileField(upload_to=question.models.upload_input_rename)),
('output', models.FileField(upload_to=question.models.upload_output_rename)),
('is_public', models.BooleanField(default=False)),
('weightage', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(10)])),
('que_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='test_cases', to='question.question')),
],
),
]
| 1.820313 | 2 |
src/delete_non_photos.py | thompsonsed/sortphotos | 0 | 12770303 | """
Deletes files in a directory that is not a photo.
"""
import logging
from PIL import Image
import pathlib
def check_image_with_pil(path: pathlib.Path) -> bool:
"""
Checks if the path is an image.
:param pathlib.Path path: path to check the image of
:return: true if the path is an image.
:rtype: bool
"""
try:
with Image.open(path) as image:
pass
except IOError:
return False
return True
def remove_file_if_image(path: pathlib.Path, test: bool = False) -> None:
"""
Removes files in the path recursively, if they are not images.
:param pathlib.Path path: the path to the file or directory
:param bool test: if true, does not delete the files.
:return: None
:rtype: None
"""
if path.is_dir():
for file in path.iterdir():
remove_file_if_image(file, test=test)
else:
if not check_image_with_pil(path):
if test:
logging.info("Would remove {}.".format(path))
else:
logging.info("Removing {}.".format(path))
path.unlink()
def main():
import argparse
# setup command line parsing
parser = argparse.ArgumentParser(description="Deletes files which cannot be parsed by EXIF.")
parser.add_argument("src_dir", type=str, help="source directory")
parser.add_argument("-t", "--test", action="store_true", help="run a test of the removal", dest="test")
parser.add_argument("-v", "--verbose", action="store_true", help="output logging information", dest="verbose")
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(20)
else:
logging.getLogger().setLevel(40)
path = pathlib.Path(args.src_dir)
if not path.exists():
raise IOError("Path does not exist at {}.".format(path))
remove_file_if_image(path, test=args.test)
if __name__ == "__main__":
main()
| 3.453125 | 3 |
src/scripts/ona_service/notification_publisher.py | cvcrckt/ona | 0 | 12770304 | # Copyright 2015 Observable Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, unicode_literals
# python builtins
from os import environ as os_environ
from json import dumps
from logging import getLogger, DEBUG, Formatter
from logging.handlers import SysLogHandler
from socket import gethostname, SOCK_DGRAM, SOCK_STREAM
from time import gmtime, sleep
# local
from service import Service
from snmp_handler import SnmpHandler, SNMP_TRAP_PORT, V2 as SNMPV2
from utils import utc, utcnow, persistent_dict
logger = getLogger(__name__)
ENV_NOTIFICATION_TYPES = 'OBSRVBL_NOTIFICATION_TYPES'
DEFAULT_NOTIFICATION_TYPES = 'alerts observations'
POST_PUBLISH_WAIT_SECONDS = 0.020
UPDATE_INTERVAL_SECONDS = 60
STATE_FILE = '.notifications.state'
MESSAGE_MAP = {
'alerts': {'endpoint': 'alerts', 'priority': 'error'},
'observations': {'endpoint': 'observations', 'priority': 'info'},
'alerts-detail': {'endpoint': 'alert-notifications', 'priority': 'error'},
}
CONFIG_DEFAULTS = {
'syslog_enabled': 'false',
'syslog_facility': 'user',
'syslog_format': ('{time} {sensor_hostname} OBSRVBL '
'[{facility}.{priority}] {message}'),
'syslog_server': None,
'syslog_server_port': 162,
'syslog_server_protocol': 'udp',
'snmp_enabled': 'false',
'snmp_objectid': None,
'snmp_server': None,
'snmp_server_port': SNMP_TRAP_PORT,
'snmp_user': None,
'snmp_version': SNMPV2,
'snmpv3_engineid': None,
'snmpv3_passphrase': None,
}
# translate from human readable config key names to what's in the env
def cfg_format(key):
return 'OBSRVBL_{}'.format(key.upper())
# application config
_CONFIG = {}
# how we actually read the config
def config(key):
return _CONFIG[cfg_format(key)]
# how we reload the config
def _reload_config():
global _CONFIG
_CONFIG = {cfg_format(k): v for k, v in CONFIG_DEFAULTS.iteritems()}
_CONFIG.update(os_environ)
def create_logger():
_reload_config()
log = getLogger('obsrvbl')
log.setLevel(DEBUG)
log.propagate = False
# set up handlers
log.handlers = []
if config('snmp_enabled').lower() == 'true':
log.addHandler(_snmp_log_handler(config))
if config('syslog_enabled').lower() == 'true':
log.addHandler(_syslog_log_handler(config, gethostname()))
return log
def _snmp_log_handler(config):
snmp_config = {
'host': config('snmp_server'),
'port': int(config('snmp_server_port')),
'objectID': config('snmp_objectid'),
'user': config('snmp_user'),
'version': config('snmp_version'),
'engineID': config('snmpv3_engineid'),
'passcode': config('snmpv3_passphrase'),
}
return SnmpHandler(**snmp_config)
def _syslog_log_handler(config, hostname):
host = config('syslog_server')
port = int(config('syslog_server_port'))
if config('syslog_server_protocol').lower() == 'tcp':
socktype = SOCK_STREAM
else:
socktype = SOCK_DGRAM
log_format = config('syslog_format')
facility = config('syslog_facility')
handler = SysLogHandler(
(host, port),
SysLogHandler.facility_names[facility],
socktype=socktype,
)
log_format = log_format.format(
time='%(asctime)s.%(msecs)d+00:00',
sensor_hostname=hostname,
facility=facility,
priority='%(levelname)s',
message='%(message)s'
)
SYSLOG_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
handler.formatter = Formatter(log_format, datefmt=SYSLOG_DATE_FORMAT)
handler.formatter.converter = gmtime # UTC
return handler
class NotificationPublisher(Service):
"""
Routinely queries Observation infrastructure for new notification events.
These are then forwarded to the configured syslog or snmp service.
"""
def __init__(self, *args, **kwargs):
kwargs.update({
'poll_seconds': UPDATE_INTERVAL_SECONDS,
})
super(NotificationPublisher, self).__init__(*args, **kwargs)
self.state = persistent_dict(STATE_FILE)
self.logger = create_logger()
notification_types = os_environ.get(
ENV_NOTIFICATION_TYPES, DEFAULT_NOTIFICATION_TYPES
)
self.notification_types = set(notification_types.split())
def get_data(self, endpoint, params):
try:
result = self.api.get_data(endpoint, params).json()
except ValueError:
return None
if 'error' in result:
return None
return result['objects']
def _publish(self, message, priority):
log_func = getattr(self.logger, priority)
formatted = dumps(message)
try:
log_func(formatted)
except Exception as ex:
logger.warning(
"Got error='%s' when trying to public "
"priority='%s', message='%s'",
ex,
priority,
message
)
else:
logger.info(
"Published message, priority='%s', message='%s'",
priority,
formatted
)
def publish(self, messages, priority):
for m in messages:
self._publish(m, priority)
# Rest a bit before sending the next message
sleep(POST_PUBLISH_WAIT_SECONDS)
def execute(self, now=None):
if not self.logger.handlers:
return
for data_type in self.notification_types:
if data_type not in MESSAGE_MAP:
continue
endpoint = MESSAGE_MAP[data_type]['endpoint']
priority = MESSAGE_MAP[data_type]['priority']
try:
params = self.state[data_type]
except KeyError:
params = {'time__gt': utcnow().replace(tzinfo=utc).isoformat()}
self.state[data_type] = params
messages = self.get_data(endpoint, params)
if not messages:
continue
max_time = max(msg['time'] for msg in messages)
self.state[data_type] = {'time__gt': max_time}
self.publish(messages, priority)
if __name__ == '__main__':
watcher = NotificationPublisher()
watcher.run()
| 1.710938 | 2 |
tests/devices/test_kogan_switch2.py | lperez31/tuya-local | 0 | 12770305 | <filename>tests/devices/test_kogan_switch2.py
"""Tests for the switch entity."""
from unittest import IsolatedAsyncioTestCase
from unittest.mock import AsyncMock, patch
from homeassistant.components.switch import DEVICE_CLASS_OUTLET
from homeassistant.const import STATE_UNAVAILABLE
from custom_components.tuya_local.generic.switch import TuyaLocalSwitch
from custom_components.tuya_local.helpers.device_config import TuyaDeviceConfig
from ..const import KOGAN_SOCKET_PAYLOAD2
from ..helpers import assert_device_properties_set
SWITCH_DPS = "1"
TIMER_DPS = "9"
CURRENT_DPS = "18"
POWER_DPS = "19"
VOLTAGE_DPS = "20"
class TestKoganSwitch(IsolatedAsyncioTestCase):
def setUp(self):
device_patcher = patch("custom_components.tuya_local.device.TuyaLocalDevice")
self.addCleanup(device_patcher.stop)
self.mock_device = device_patcher.start()
cfg = TuyaDeviceConfig("kogan_switch2.yaml")
switch = cfg.primary_entity
self.switch_name = switch.name
self.subject = TuyaLocalSwitch(self.mock_device(), switch)
self.dps = KOGAN_SOCKET_PAYLOAD2.copy()
self.subject._device.get_property.side_effect = lambda id: self.dps[id]
def test_should_poll(self):
self.assertTrue(self.subject.should_poll)
def test_name_returns_device_name(self):
self.assertEqual(self.subject.name, self.subject._device.name)
def test_friendly_name_returns_config_name(self):
self.assertEqual(self.subject.friendly_name, self.switch_name)
def test_unique_id_returns_device_unique_id(self):
self.assertEqual(self.subject.unique_id, self.subject._device.unique_id)
def test_device_info_returns_device_info_from_device(self):
self.assertEqual(self.subject.device_info, self.subject._device.device_info)
def test_device_class_is_outlet(self):
self.assertEqual(self.subject.device_class, DEVICE_CLASS_OUTLET)
def test_is_on(self):
self.dps[SWITCH_DPS] - True
self.assertTrue(self.subject.is_on)
self.dps[SWITCH_DPS] = False
self.assertFalse(self.subject.is_on)
def test_is_on_when_unavailable(self):
self.dps[SWITCH_DPS] = None
self.assertEqual(self.subject.is_on, STATE_UNAVAILABLE)
async def test_turn_on(self):
async with assert_device_properties_set(
self.subject._device, {SWITCH_DPS: True}
):
await self.subject.async_turn_on()
async def test_turn_off(self):
async with assert_device_properties_set(
self.subject._device, {SWITCH_DPS: False}
):
await self.subject.async_turn_off()
async def test_toggle_turns_the_switch_on_when_it_was_off(self):
self.dps[SWITCH_DPS] = False
async with assert_device_properties_set(
self.subject._device, {SWITCH_DPS: True}
):
await self.subject.async_toggle()
async def test_toggle_turns_the_switch_off_when_it_was_on(self):
self.dps[SWITCH_DPS] = True
async with assert_device_properties_set(
self.subject._device, {SWITCH_DPS: False}
):
await self.subject.async_toggle()
def test_current_power_w(self):
self.dps[POWER_DPS] = 1234
self.assertEqual(self.subject.current_power_w, 123.4)
def test_device_state_attributes_set(self):
self.dps[TIMER_DPS] = 1
self.dps[VOLTAGE_DPS] = 2350
self.dps[CURRENT_DPS] = 1234
self.dps[POWER_DPS] = 5678
self.assertCountEqual(
self.subject.device_state_attributes,
{
"timer": 1,
"current_a": 1.234,
"voltage_v": 235.0,
"current_power_w": 567.8,
},
)
self.dps[TIMER_DPS] = 0
self.dps[CURRENT_DPS] = None
self.dps[VOLTAGE_DPS] = None
self.dps[POWER_DPS] = None
self.assertCountEqual(
self.subject.device_state_attributes,
{
"timer": 0,
"current_a": None,
"voltage_v": None,
"current_power_w": None,
},
)
async def test_update(self):
result = AsyncMock()
self.subject._device.async_refresh.return_value = result()
await self.subject.async_update()
self.subject._device.async_refresh.assert_called_once()
result.assert_awaited()
| 2.296875 | 2 |
objetto/observers.py | brunonicko/objetto | 8 | 12770306 | # -*- coding: utf-8 -*-
"""Observer mixin class."""
from ._observers import ActionObserver, ActionObserverExceptionData, ActionObserverToken
__all__ = ["ActionObserver", "ActionObserverToken", "ActionObserverExceptionData"]
| 1.28125 | 1 |
base/catalog/urls.py | daavelino/vulnerability-catalog | 12 | 12770307 | from django.urls import include, path
from catalog.admin import admin_site
from django.contrib.auth import logout
from django.conf import settings
from . import views
app_name="catalog"
urlpatterns = [
path('', views.HomeView.as_view(), name='home'),
path('accounts/login/', views.LoginView.as_view(), name='login'),
path('accounts/logout/', views.LogoutView.logout_user, name='logout'),
path('admin/', admin_site.urls),
path('panorama/', views.PanoramaView.as_view(), name='panorama'),
path('resources/upload/getfile/', views.MassiveUpload.uploadData, name='getUploadFile'),
path('resources/data/converter/', views.DataConverter.as_view(), name='converter'),
path('vulnerability/add/', views.AddVulnerability.as_view(), name='addVulnerability'),
path('vulnerability/data/deleteall/', views.RemoveAllVulnerabilities.removeData),
path('vulnerability/data/json/export/', views.JsonExportView.export_database, name='exportData'),
path('vulnerability/data/json/filter/', views.JsonFilterView.get_data, name='jsonfilter'),
path('vulnerability/data/index', views.IndexView.as_view(), name='index'),
path('vulnerability/data/json/<int:num>/', views.JsonDetailView.result),
path('vulnerability/data/json/massiveupload/', views.MassiveUpload.as_view(), name='massiveUpload'),
path('vulnerability/data/panorama/json/', views.PanoramaJsonView.result),
path('vulnerability/delete/<int:pk>/', views.DeleteVulnerability.as_view(), name='deleteVulnerability'),
path('vulnerability/data/delete/', views.DeleteByList.as_view(), name='deleteByList'),
path('vulnerability/detail/<int:pk>/', views.DetailedView.as_view(), name='detail'),
path('vulnerability/detail/json/<int:num>/', views.JsonDetailView.result, name='json_detail'),
path('vulnerability/search/', views.SearchView.search, name='search'),
path('vulnerability/update/fastupdate/<int:pk>/', views.FastUpdateVulnerability.as_view(), name='fastUpdateVulnerability'),
path('vulnerability/update/<int:pk>/', views.UpdateVulnerability.as_view(), name='updateVulnerability'),
path('vulnerability/tinymce/', include('tinymce.urls')),
]
| 1.859375 | 2 |
ynlldb.py | espider/yinuo | 7 | 12770308 | <filename>ynlldb.py
#!/usr/bin/python
# coding:utf-8
# help for debug .net core with lldb
# this work with lldb.like:command script import ~/ynlldb.py
# ref: https://lldb.llvm.org/python-reference.html
# ref: https://lldb.llvm.org/python_reference/index.html
# Copyright (c) 2017, chengliang
# All rights reserved.
import os
import imp
import lldb
import shlex
import datetime
import commands
import argparse
from util.colorstyle import *
from util.exportcontent import *
def __lldb_init_module(debugger, internal_dict):
""" start with lldb import py """
export_content(
' %s' %
use_style_level(
important_level['high3'],
'welcome to use ynlldb module.'))
show_base_info()
register_lldb_commands()
def show_base_info():
""" show the target base info like process,threads,execute path etc. """
export_content(' ')
export_content(
' time: %s' %
use_style_level(
important_level['high3'],
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
export_content(
' system: %s' %
use_style_level(
important_level['high3'],
commands.getoutput('cat /etc/redhat-release')))
export_content(
' lldb.debugger: %s' %
use_style_level(
important_level['high3'],
lldb.debugger))
target = lldb.debugger.GetSelectedTarget()
if target:
export_content(
' target: %s' %
use_style_level(
important_level['high3'],
target))
ptr_size = target.GetAddressByteSize()
if ptr_size == 8:
export_content(
' target.GetAddressByteSize: %s' %
use_style_level(
important_level['high3'],
'64-bit'))
elif ptr_size == 4:
export_content(
' target.GetAddressByteSize: %s' %
use_style_level(
important_level['high3'],
'32-bit'))
else:
export_content(
' target.GetAddressByteSize: %s' %
use_style_level(
important_level['high3'], '???'))
export_content(
' target.executable: %s' %
use_style_level(
important_level['high3'],
target.executable))
export_content(
' target.executable.basename: %s' %
use_style_level(
important_level['high3'],
target.executable.basename))
export_content(
' target.executable.fullpath: %s' %
use_style_level(
important_level['high3'],
target.executable.fullpath))
for module in target.modules:
if module.file.basename == 'libcoreclr.so':
netcorelib_dir = os.path.dirname(module.file.fullpath)
export_content(
' .net core lib dir: %s' %
use_style_level(
important_level['high3'],
netcorelib_dir))
libsosplugin_file = os.path.join(
netcorelib_dir, 'libsosplugin.so')
if os.path.exists(libsosplugin_file):
lldb.debugger.HandleCommand(
'plugin load %s' %
libsosplugin_file)
export_content(
' libsosplugin file: %s load over' %
use_style_level(
important_level['high3'],
libsosplugin_file))
else:
export_content(
' libsospluginfile no file in %s' %
use_style_level(
important_level['high3'],
libsosplugin_file))
process = target.GetProcess()
if process:
pid = process.id
export_content(
' pid: %s' %
use_style_level(
important_level['high3'],
pid))
export_content(
' process: %s' %
use_style_level(
important_level['high3'],
process))
else:
export_content(
' %s' %
use_style_level(
important_level['high3'],
'no target in current debugger, attach -p PID and reload ynlldb.py use command script import'))
def register_lldb_commands():
""" register all commands to lldb """
target = lldb.debugger.GetSelectedTarget()
if target:
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
commands_directory = os.path.join(current_dir, 'commandlist')
export_content(
' register commands directory: %s' %
use_style_level(
important_level['high3'],
commands_directory))
for file in os.listdir(commands_directory):
file_name, file_extension = os.path.splitext(file)
if file_extension == '.py':
module = imp.load_source(
file_name, os.path.join(
commands_directory, file))
module._loadedFunctions = {}
if hasattr(module, 'register_lldb_commands'):
for command in module.register_lldb_commands():
# os.path.join(commands_directory, file_name + file_extension)
func = make_run_command(command)
name = command.name()
help_text = command.description().splitlines()[0]
key = file_name + '_' + name
module._loadedFunctions[key] = func
function_name = '__' + key
# export_content(' register command name : %s' %
# use_style_level(important_level['high3'], key))
# alias function name
lldb.debugger.HandleCommand(
'script ' +
function_name +
' = sys.modules[\'' +
module.__name__ +
'\']._loadedFunctions[\'' +
key +
'\']')
# register name to lldb command
lldb.debugger.HandleCommand(
'command script add --help "{help}" --function {function} {name}'.format(
help=help_text.replace(
'"', '\\"'), function=function_name, name=name))
else:
pass
else:
export_content('no .py file')
def make_run_command(command):
def run_command(debugger, input, result, dict):
split_input = shlex.split(input)
parser = argparse.ArgumentParser('')
options = command.options()
if len(options) > 0:
last_group = 0
group = None
for i in range(len(options)):
if options[i].mutually > 0:
# mutually group
if last_group != options[i].mutually:
# new group
group = parser.add_mutually_exclusive_group()
group.add_argument(
options[i].short,
options[i].long,
dest=options[i].dest,
nargs=options[i].nargs,
type=options[i].type,
help=options[i].help,
default=options[i].default,
required=options[i].required)
last_group = options[i].mutually
else:
# same group
group.add_argument(
options[i].short,
options[i].long,
dest=options[i].dest,
nargs=options[i].nargs,
type=options[i].type,
help=options[i].help,
default=options[i].default,
required=options[i].required)
else:
# no mutually group
parser.add_argument(
options[i].short,
options[i].long,
dest=options[i].dest,
nargs=options[i].nargs,
type=options[i].type,
help=options[i].help,
default=options[i].default,
required=options[i].required)
last_group = 0
args = parser.parse_known_args(split_input)
export_content(' %s ' % use_style_level(important_level['low2'], '-------------'))
command.run(args[0], args[1])
run_command.__doc__ = help_for_command(command)
return run_command
def help_for_command(command):
""" generate help doc for command """
help = command.description()
if command.options():
help += '\n\nOptions:'
for option in command.options():
if option.long and option.short:
option_flag = option.long + '/' + option.short
elif option.longName:
option_flag = option.long
else:
option_flag = option.short
help += '\n ' + option_flag + ' '
if option.type.__name__ == 'str_to_bool':
help += '<' + str(option.dest) + '>; Type: bool'
else:
help += '<' + str(option.dest) + '>; Type: ' + option.type.__name__
help += '; ' + option.help
return help
| 2.234375 | 2 |
jim/fizzbuzz_objects.py | CorySpitzer/FizzBuzz | 0 | 12770309 | """
fizzbuzz_objects.py (Python 2.7.5)
An object oriented approach to the FizzBuzz problem.
<NAME> | cs.marlboro.edu | Jan 2014 | opensource.org/licenses/MIT
"""
class FizzBuzzInt(object):
""" An integer with a string representation per the FizzBuzz recipe.
>>> print FizzBuzzInt(7)
7
>>> print FizzBuzzInt(5)
Buzz
>>> print FizzBuzzInt(15)
FizzBuzz
"""
specials = {3: 'Fizz', 5: 'Buzz'}
def __init__(self, value=1):
self.value = value
FizzBuzzInt.special_keys = sorted(FizzBuzzInt.specials.keys())
def __str__(self):
result = ''
for i in FizzBuzzInt.special_keys:
if self.value % i == 0:
result += FizzBuzzInt.specials[i]
return result if result else str(self.value)
class FizzBuzzRange(object):
""" A range of FizzBuzzInts
>>> print FizzBuzzRange(11, 17)
11
Fizz
13
14
FizzBuzz
16
"""
def __init__(self, low=1, high=101):
self.low = low
self.high = high
def __str__(self):
# Aside : This code puts an extra \n in the output, failing the spec.
# result = ''
# for i in xrange(self.low, self.high)):
# result += str(FizzBuzzInt()) + '\n'
# return result
# The version below doesn't have that problem.
# (Can you say 'fence post error'?)
result = []
for i in xrange(self.low, self.high):
result.append(str(FizzBuzzInt(i)))
return '\n'.join(result)
if __name__ == '__main__':
import doctest
doctest.testmod()
print FizzBuzzRange()
| 3.8125 | 4 |
srv/decorate.py | greenify/zodiacy | 1 | 12770310 | """
Copyright (c) 2011, <NAME>.
License: MIT (see http://www.opensource.org/licenses/mit-license.php for details)
URL: http://www.gtsystem.eu/blog/2011/11/bottle-decorator-for-validate-query-parameters/
"""
from bottle import request
import functools
import inspect
def checkParams(**types):
def decorate(f):
farg, _, _, def_params = inspect.getargspec(f)
if def_params is None:
def_params = []
farg = farg[:len(farg) - len(def_params)]
param_info = [(par, ptype, par in farg)
for par, ptype in types.items()]
@functools.wraps(f)
def wrapper(*args, **kargs):
getparam = request.GET.get
for par, ptype, required in param_info:
value = getparam(par)
if not value: # None or empty str
if required:
error = "%s() requires the parameter %s" % (wrapper.__name__, par)
raise TypeError(error)
continue
try:
kargs[par] = ptype(value)
except:
error = "Cannot convert parameter %s to %s" % (
par, ptype.__name__)
raise ValueError(error)
return f(*args, **kargs)
return wrapper
return decorate
| 2.59375 | 3 |
vendors/rez-2.23.1-py2.7/rez/serialise.py | ColinKennedy/tk-config-default2-respawn | 4 | 12770311 | """
Read and write data from file. File caching via a memcached server is supported.
"""
from rez.package_resources_ import package_rex_keys
from rez.utils.scope import ScopeContext
from rez.utils.sourcecode import SourceCode, early, late, include
from rez.utils.logging_ import print_debug
from rez.utils.filesystem import TempDirs
from rez.utils.data_utils import ModifyList
from rez.exceptions import ResourceError, InvalidPackageError
from rez.utils.memcached import memcached
from rez.utils.system import add_sys_paths
from rez.config import config
from rez.vendor.enum import Enum
from rez.vendor import yaml
from contextlib import contextmanager
from inspect import isfunction, ismodule, getargspec
from StringIO import StringIO
import sys
import os
import os.path
tmpdir_manager = TempDirs(config.tmpdir, prefix="rez_write_")
file_cache = {}
class FileFormat(Enum):
py = ("py",)
yaml = ("yaml",)
txt = ("txt",)
__order__ = "py,yaml,txt"
def __init__(self, extension):
self.extension = extension
@contextmanager
def open_file_for_write(filepath):
"""Writes both to given filepath, and tmpdir location.
This is to get around the problem with some NFS's where immediately reading
a file that has just been written is problematic. Instead, any files that we
write, we also write to /tmp, and reads of these files are redirected there.
"""
stream = StringIO()
yield stream
content = stream.getvalue()
filepath = os.path.realpath(filepath)
tmpdir = tmpdir_manager.mkdtemp()
cache_filepath = os.path.join(tmpdir, os.path.basename(filepath))
with open(filepath, 'w') as f:
f.write(content)
with open(cache_filepath, 'w') as f:
f.write(content)
file_cache[filepath] = cache_filepath
def load_from_file(filepath, format_=FileFormat.py, update_data_callback=None,
disable_memcache=False):
"""Load data from a file.
Note:
Any functions from a .py file will be converted to `SourceCode` objects.
Args:
filepath (str): File to load.
format_ (`FileFormat`): Format of file contents.
update_data_callback (callable): Used to change data before it is
returned or cached.
disable_memcache (bool): If True, don't r/w to memcache.
Returns:
dict.
"""
filepath = os.path.realpath(filepath)
cache_filepath = file_cache.get(filepath)
if cache_filepath:
# file has been written by this process, read it from /tmp to avoid
# potential write-then-read issues over NFS
return _load_file(filepath=cache_filepath,
format_=format_,
update_data_callback=update_data_callback)
elif disable_memcache:
return _load_file(filepath=filepath,
format_=format_,
update_data_callback=update_data_callback)
else:
return _load_from_file(filepath=filepath,
format_=format_,
update_data_callback=update_data_callback)
def _load_from_file__key(filepath, format_, update_data_callback):
st = os.stat(filepath)
if update_data_callback is None:
callback_key = 'None'
else:
callback_key = getattr(update_data_callback, "__name__", "None")
return str(("package_file", filepath, str(format_), callback_key,
st.st_ino, st.st_mtime))
@memcached(servers=config.memcached_uri if config.cache_package_files else None,
min_compress_len=config.memcached_package_file_min_compress_len,
key=_load_from_file__key,
debug=config.debug_memcache)
def _load_from_file(filepath, format_, update_data_callback):
return _load_file(filepath, format_, update_data_callback)
def _load_file(filepath, format_, update_data_callback):
load_func = load_functions[format_]
if config.debug("file_loads"):
print_debug("Loading file: %s" % filepath)
with open(filepath) as f:
result = load_func(f, filepath=filepath)
if update_data_callback:
result = update_data_callback(format_, result)
return result
def load_py(stream, filepath=None):
"""Load python-formatted data from a stream.
Args:
stream (file-like object).
Returns:
dict.
"""
scopes = ScopeContext()
g = dict(scope=scopes,
early=early,
late=late,
include=include,
ModifyList=ModifyList,
InvalidPackageError=InvalidPackageError)
try:
exec stream in g
except Exception as e:
import traceback
frames = traceback.extract_tb(sys.exc_info()[2])
while filepath and frames and frames[0][0] != filepath:
frames = frames[1:]
msg = "Problem loading %s: %s" % (filepath, str(e))
stack = ''.join(traceback.format_list(frames)).strip()
if stack:
msg += ":\n" + stack
raise ResourceError(msg)
result = {}
excludes = set(('scope', 'InvalidPackageError', '__builtins__',
'early', 'late', 'include', 'ModifyList'))
for k, v in g.iteritems():
if k not in excludes and \
(k not in __builtins__ or __builtins__[k] != v):
result[k] = v
result.update(scopes.to_dict())
result = process_python_objects(result, filepath=filepath)
return result
class EarlyThis(object):
"""The 'this' object for @early bound functions."""
def __init__(self, data):
self._data = data
def __getattr__(self, attr):
missing = object()
value = self._data.get(attr, missing)
if value is missing:
raise AttributeError("No such package attribute '%s'" % attr)
if isfunction(value) and (hasattr(value, "_early") or hasattr(value, "_late")):
raise ValueError(
"An early binding function cannot refer to another early or "
"late binding function: '%s'" % attr)
return value
def process_python_objects(data, filepath=None):
"""Replace certain values in the given package data dict.
Does things like:
* evaluates @early decorated functions, and replaces with return value;
* converts functions into `SourceCode` instances so they can be serialized
out to installed packages, and evaluated later;
* strips some values (modules, __-leading variables) that are never to be
part of installed packages.
Returns:
dict: Updated dict.
"""
def _process(value):
if isinstance(value, dict):
for k, v in value.items():
value[k] = _process(v)
return value
elif isfunction(value):
func = value
if hasattr(func, "_early"):
# run the function now, and replace with return value
#
# make a copy of the func with its own globals, and add 'this'
import types
fn = types.FunctionType(func.func_code,
func.func_globals.copy(),
name=func.func_name,
argdefs=func.func_defaults,
closure=func.func_closure)
this = EarlyThis(data)
fn.func_globals.update({"this": this})
with add_sys_paths(config.package_definition_build_python_paths):
# this 'data' arg support isn't needed anymore, but I'm
# supporting it til I know nobody is using it...
#
spec = getargspec(func)
args = spec.args or []
if len(args) not in (0, 1):
raise ResourceError("@early decorated function must "
"take zero or one args only")
if args:
value_ = fn(data)
else:
value_ = fn()
# process again in case this is a function returning a function
return _process(value_)
elif hasattr(func, "_late"):
return SourceCode(func=func, filepath=filepath,
eval_as_function=True)
elif func.__name__ in package_rex_keys:
# if a rex function, the code has to be eval'd NOT as a function,
# otherwise the globals dict doesn't get updated with any vars
# defined in the code, and that means rex code like this:
#
# rr = 'test'
# env.RR = '{rr}'
#
# ..won't work. It was never intentional that the above work, but
# it does, so now we have to keep it so.
#
return SourceCode(func=func, filepath=filepath,
eval_as_function=False)
else:
# a normal function. Leave unchanged, it will be stripped after
return func
else:
return value
def _trim(value):
if isinstance(value, dict):
for k, v in value.items():
if isfunction(v):
if v.__name__ == "preprocess":
# preprocess is a special case. It has to stay intact
# until the `DeveloperPackage` has a chance to apply it;
# after which it gets removed from the package attributes.
#
pass
else:
del value[k]
elif ismodule(v) or k.startswith("__"):
del value[k]
else:
value[k] = _trim(v)
return value
data = _process(data)
data = _trim(data)
return data
def load_yaml(stream, **kwargs):
"""Load yaml-formatted data from a stream.
Args:
stream (file-like object).
Returns:
dict.
"""
# if there's an error parsing the yaml, and you pass yaml.load a string,
# it will print lines of context, but will print "<string>" instead of a
# filename; if you pass a stream, it will print the filename, but no lines
# of context.
# Get the best of both worlds, by passing it a string, then replacing
# "<string>" with the filename if there's an error...
content = stream.read()
try:
return yaml.load(content) or {}
except Exception, e:
if stream.name and stream.name != '<string>':
for mark_name in 'context_mark', 'problem_mark':
mark = getattr(e, mark_name, None)
if mark is None:
continue
if getattr(mark, 'name') == '<string>':
mark.name = stream.name
raise e
def load_txt(stream, **kwargs):
"""Load text data from a stream.
Args:
stream (file-like object).
Returns:
string.
"""
content = stream.read()
return content
def clear_file_caches():
"""Clear any cached files."""
_load_from_file.forget()
load_functions = {FileFormat.py: load_py,
FileFormat.yaml: load_yaml,
FileFormat.txt: load_txt}
# Copyright 2013-2016 <NAME>.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| 2.671875 | 3 |
age/data/load/transformations.py | torfjelde/covid19_datasets | 49 | 12770312 | <reponame>torfjelde/covid19_datasets<filename>age/data/load/transformations.py
import pandas as pd
def add_both_sexes(data: pd.DataFrame) -> pd.DataFrame:
"""Add male and female data to obtain data for both sexes combined."""
if len(data.Sex.unique()) != 2:
raise ValueError(
f'Expecting 2 sexes in data.Sex, but found {len(data.Sex.unique())}')
both = data.groupby(['Date', 'Age']).sum().reset_index().copy()
both['Sex'] = 'b'
return pd.concat([data, both], axis=0)
def rescale(data: pd.DataFrame, ref_data: pd.DataFrame, field: str) -> pd.DataFrame:
"""Proportionally rescale data so that totals match daily totals given in ref_data"""
scale = data.query('Sex == "b"').groupby('Date').sum().merge(
ref_data[['DATE', field]], left_on='Date', right_on='DATE')
scale['factor'] = scale[f'{field}_y'] / scale[f'{field}_x']
# Don't rescale small values
scale.loc[scale[f'{field}_y'] < 10, 'factor'] = 1
data = data.merge(scale[['DATE', 'factor']],
left_on='Date', right_on='DATE')
data[field] = round(data[field] * data.factor)
data = data.drop(['DATE', 'factor'], axis='columns')
return data
def periodic_to_daily(data: pd.DataFrame) -> pd.DataFrame:
"""Convert a dataframe that has new cases or deaths sampled periodically to daily sampling."""
process_df = data.set_index(['Date', 'Age', 'Sex']).unstack().unstack().fillna(0).reset_index()
gap_days = process_df.Date.diff().dt.days
gap_days.index = process_df.Date
process_df = process_df.set_index('Date').divide(gap_days, axis=0)
process_df = process_df.resample('d').interpolate()
process_df = round(process_df.stack().stack().reset_index())
return process_df
def smooth_sample(data: pd.DataFrame, rolling_window: int = 3) -> pd.DataFrame:
"""Apply smoothing to a sample of data."""
return round(
data.set_index(['Date', 'Age', 'Sex'])
.unstack().unstack().fillna(0.)
.rolling(rolling_window, center=True, min_periods=1)
.mean()).stack().stack().reset_index()
def cumulative_to_new(data: pd.DataFrame) -> pd.DataFrame:
"""Convert a time series of cumulative counts in tidy format to a one that of daily counts."""
return (data
.set_index(['Date', 'Age', 'Sex'])
.unstack()
.unstack()
.diff()
.stack()
.stack()
.reset_index())
def ensure_contiguous(data):
"""Ensure the dates are contiguous in the given data."""
data = data.drop_duplicates(['Date', 'Sex', 'Age'])
data = data.set_index(['Date', 'Sex', 'Age']).unstack().unstack()
data = data.fillna(0) # Fill the holes in the age-sex cross product
data = data.resample('d').ffill() # Fill the holes in the dates
return data.stack().stack().reset_index()
| 3.15625 | 3 |
problemsets/Codeforces/Python/A63.py | juarezpaulino/coderemite | 0 | 12770313 | <filename>problemsets/Codeforces/Python/A63.py<gh_stars>0
"""
*
* Author: <NAME>(coderemite)
* Email: <EMAIL>
*
"""
n=int(input())
o=dict(zip('tminp',[0,1,1,2,3]))
a=sorted(enumerate([input().split()for _ in[0]*n]),key=lambda x:(o[x[1][1][2]],x[0]))
for x in a:print(x[1][0])
| 3.15625 | 3 |
notions/models/database.py | micktwomey/notions | 1 | 12770314 | import datetime
import enum
import typing
import uuid
import pydantic
from .color import Color
from .number import Number
from .parent import DatabaseParents
from .rich_text import RichText
class NumberProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["number"] = "number"
number: Number
def get_value(self):
return self.number.get_value()
class SelectOption(pydantic.BaseModel):
id: str
name: str
color: Color
def get_value(self):
return self.color.value
class Select(pydantic.BaseModel):
options: typing.List[SelectOption]
def get_value(self):
return [o.get_value() for o in self.options]
class SelectProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["select"] = "select"
select: Select
def get_value(self):
return self.select.get_value()
class CreatedTimeProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["created_time"] = "created_time"
created_time: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.created_time
class CreatedByProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["created_by"] = "created_by"
created_by: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.created_by
class LastEditedTimeProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["last_edited_time"] = "last_edited_time"
last_edited_time: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.last_edited_time
class LastEditedByProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["last_edited_by"] = "last_edited_by"
last_edited_by: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.last_edited_by
class URLProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["url"] = "url"
url: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.url
class TitleProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["title"] = "title"
title: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.title
class RichTextProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["rich_text"] = "rich_text"
rich_text: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.rich_text
class DateProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["date"] = "date"
date: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.date
class FilesProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["files"] = "files"
files: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.files
class PeopleProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["people"] = "people"
people: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.people
class CheckboxProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["checkbox"] = "checkbox"
checkbox: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.checkbox
class EmailProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["email"] = "email"
email: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.email
class PhoneNumberProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["phone_number"] = "phone_number"
phone_number: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.phone_number
class MultiSelectOption(pydantic.BaseModel):
id: str
name: str
color: Color
def get_value(self):
return self.color.value
class MultiSelectOptions(pydantic.BaseModel):
options: typing.List[MultiSelectOption]
def get_value(self):
return [o.get_value() for o in self.options]
class MultiSelectProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["multi_select"] = "multi_select"
multi_select: MultiSelectOptions
def get_value(self):
return self.multi_select.get_value()
class Formula(pydantic.BaseModel):
expression: str
def get_value(self):
return self.expression
class FormulaProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["formula"] = "formula"
formula: Formula
def get_value(self):
return self.formula.get_value()
class Rollup(pydantic.BaseModel):
relation_property_name: str
relation_property_id: str
rollup_property_name: str
rollup_property_id: str
function: str # TODO: change to an enum
def get_value(self):
return {
"relation_property_name": self.relation_property_name,
"relation_property_id": self.relation_property_id,
"rollup_property_name": self.rollup_property_name,
"rollup_property_id": self.rollup_property_id,
"function": self.function,
}
class RollupProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["rollup"] = "rollup"
rollup: Rollup
def get_value(self):
return self.rollup.get_value()
class Relation(pydantic.BaseModel):
database_id: uuid.UUID
synced_property_name: typing.Optional[str]
synced_property_id: typing.Optional[str]
def get_value(self):
return {
"database_id": self.database_id,
"synced_property_name": self.synced_property_name,
"synced_property_id": self.synced_property_id,
}
class RelationProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["relation"] = "relation"
relation: Relation
def get_value(self):
return self.relation.get_value()
Property = typing.Union[
NumberProperty,
SelectProperty,
CreatedTimeProperty,
URLProperty,
TitleProperty,
RichTextProperty,
DateProperty,
FilesProperty,
PeopleProperty,
CheckboxProperty,
EmailProperty,
PhoneNumberProperty,
MultiSelectProperty,
FormulaProperty,
RollupProperty,
CreatedByProperty,
LastEditedTimeProperty,
LastEditedByProperty,
RelationProperty,
]
Properties = typing.Dict[str, Property]
class Database(pydantic.BaseModel):
object: typing.Literal["database"] = "database"
id: uuid.UUID
created_time: datetime.datetime
last_edited_time: datetime.datetime
title: typing.List[RichText]
parent: DatabaseParents
properties: Properties
| 2.453125 | 2 |
web/pipeline/migrations/0078_auto_20200904_0711.py | stevenstuber/CIT | 10 | 12770315 | <reponame>stevenstuber/CIT
# Generated by Django 2.2.13 on 2020-09-04 07:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0077_community_parent_community'),
]
operations = [
migrations.AlterField(
model_name='community',
name='parent_community',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='child_communities', to='pipeline.Community'),
),
]
| 1.617188 | 2 |
exchange-scrappers/pipelines.py | joa-rodrigues/crypto-exchange-listings | 6 | 12770316 | <filename>exchange-scrappers/pipelines.py
import datetime
import pymongo
import telepot
from scrapy.utils.project import get_project_settings
settings = get_project_settings()
class CoinbaseScrapper(object):
def __init__(self):
mongo_server = settings.get('MONGO_SERVER')
mongo_url = f"mongodb://{mongo_server['host']}:{mongo_server['port']}/{mongo_server['database']}"
client = pymongo.MongoClient(mongo_url)
self.coinbase_crypto_lists = client.get_database().get_collection(mongo_server['coinbase_cryptos_lists'])
self.coinbase_crypto_reports = client.get_database().get_collection(mongo_server['coinbase_cryptos_reports'])
def process_item(self, items, spider):
coinbase_currencies = items["value"]
coinbase_currencies_keys = (o["id"] for o in coinbase_currencies)
mongo_currencies = self.coinbase_crypto_lists.find()
mongo_currencies_keys = (o["key"] for o in mongo_currencies)
added_currencies = list(set(coinbase_currencies_keys) - set(mongo_currencies_keys))
removed_currencies = list(set(mongo_currencies_keys) - set(coinbase_currencies_keys))
# Add or update added currencies
report = {
"date": datetime.datetime.today(),
"added": added_currencies,
"removed": removed_currencies
}
self.coinbase_crypto_reports.insert_one(report)
# Save added currencies
for currency in added_currencies:
self.coinbase_crypto_lists.find_one_and_update(
{
"key": currency,
},
{"$set":
{
"key": currency,
}
}, upsert=True
)
# Remove cryptos from database if necessary
for currency in removed_currencies:
self.mycol.delete_one(
{
"key": currency,
}
)
# send notification if added only
if added_currencies:
telegram_bot = settings.get('TELEGRAM_BOT')
bot = telepot.Bot(telegram_bot['token'])
message = f"""
<b>coinbase listed cryptos : </b> <i>{len(items["value"])}</i>
<b>added : </b> <i>{added_currencies}</i>
<b>removed : </b> <i>{removed_currencies}</i>
"""
bot.sendMessage(
telegram_bot['receiver_id'],
parse_mode='html',
text=message
)
print("END COINBASE LOOP")
class BinanceScrapper(object):
def __init__(self):
mongo_server = settings.get('MONGO_SERVER')
mongo_url = f"mongodb://{mongo_server['host']}:{mongo_server['port']}/{mongo_server['database']}"
client = pymongo.MongoClient(mongo_url)
self.binance_crypto_lists = client.get_database().get_collection(mongo_server['binance_cryptos_lists'])
self.binance_crypto_reports = client.get_database().get_collection(mongo_server['binance_cryptos_reports'])
def process_item(self, items, spider):
binance_pairs = items["value"]
binance_pairs_keys = []
for binance_pair in binance_pairs:
key = binance_pair["baseAsset"] + "-" + binance_pair["quoteAsset"]
binance_pairs_keys.append(key)
mongo_pairs = self.binance_crypto_lists.find()
mongo_pairs_keys = (o["key"] for o in mongo_pairs)
added_pairs = list(set(binance_pairs_keys) - set(mongo_pairs_keys))
removed_pairs = list(set(mongo_pairs_keys) - set(binance_pairs_keys))
# Save report currencies
report = {
"date": datetime.datetime.today(),
"added": added_pairs,
"removed": removed_pairs
}
self.binance_crypto_reports.insert_one(report)
# Add or update added pairs
for pair in added_pairs:
self.binance_crypto_lists.find_one_and_update(
{
"key": pair,
},
{"$set":
{
"key": pair,
}
}, upsert=True
)
# Remove pairs from database id necessary
for pair in removed_pairs:
self.mycol.delete_one(
{
"key": pair,
}
)
# send notification if added only
if added_pairs:
telegram_bot = settings.get('TELEGRAM_BOT')
bot = telepot.Bot(telegram_bot['token'])
# The fist time we run there is more than 1000 pair, telegram message size is limited
message = f"""
<b>binance listed pairs : </b> <i>{len(items["value"])}</i>
<b>added : </b> <i>{added_pairs[:200]}</i>
<b>removed : </b> <i>{removed_pairs}</i>
"""
bot.sendMessage(
telegram_bot['receiver_id'],
parse_mode='html',
text=message
)
print("END BINANCE LOOP")
| 2.53125 | 3 |
tests/scheduler/invalid_cron_recurring.py | abhijeetkaurav1st/calm-dsl | 0 | 12770317 | import uuid
from calm.dsl.builtins import Job, JobScheduler
start_date_time = "2050-10-08 16:17:15"
expiry_date_time = "2050-10-09 00:17:00"
cron = "15 1 32 * *"
time_zone = "America/Jamaica"
RUNBOOK_NAME = "invalid_cron_recurring"
class JobInvalidRecurringSpec(Job):
"""Recurring Job for Executing a Runbook with invalid cron"""
name = "test_job_invalid_cron_recurring_" + str(uuid.uuid4())[:8]
schedule_info = JobScheduler.ScheduleInfo.recurring(
cron, start_date_time, expiry_date_time, time_zone
)
executable = JobScheduler.Exec.runbook(RUNBOOK_NAME, False)
| 2.484375 | 2 |
labeling-tool/main.py | bytecell/Industrial-Project | 14 | 12770318 | import sys
from PyQt5.uic import loadUi
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, \
QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem
from transformers import AutoTokenizer
import re
import emoji
from soynlp.normalizer import repeat_normalize
Height = 400
Width = 600
emojis = ''.join(emoji.UNICODE_EMOJI.keys())
pattern = re.compile(f'[^ .,?!/@$%~%·∼()\x00-\x7Fㄱ-ㅣ가-힣{emojis}]+')
url_pattern = re.compile(
r'https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)')
def clean(x):
x = pattern.sub(' ', x)
x = url_pattern.sub('', x)
x = x.strip()
x = repeat_normalize(x, num_repeats=2)
return x
class SelectForm(QDialog):
def __init__(self):
super(SelectForm, self).__init__()
loadUi('select-form.ui', self)
self.selectFile.clicked.connect(self.select_file_clicked)
def select_file_clicked(self):
file_list = QFileDialog.getOpenFileName(self)
self.open_process_form(file_list[0])
def open_process_form(self, path):
process_form = ProcessForm(path)
widget.addWidget(process_form)
widget.setCurrentIndex(widget.currentIndex() + 1)
class ProcessForm(QDialog):
POS = 'T-POS'
NEG = 'T-NEG'
NEU = 'T-NEU'
NATURAL = 'O'
def __init__(self, path):
super(ProcessForm, self).__init__()
loadUi('process-form.ui', self)
self.review_size = 0
self.reviews = []
self.original = []
self.output = []
self.cur_index = 0
self.load_file(path)
self.pbar = QProgressBar(self)
self.pbar.setGeometry(650, 200, 300, 40)
self.pbar.setMaximum(self.review_size - 1)
self.pbar.setValue(self.cur_index)
self.pbar.setFormat("%i/%d" % (self.pbar.value() + 1, self.pbar.maximum() + 1))
self.tableWidget = QTableWidget(self)
self.tableWidget.move(50, 50)
self.tableWidget.resize(1500, 130)
self.tableWidget.setRowCount(2)
self.tableWidget.setColumnCount(len(self.reviews[0]))
self.tableWidget.setSelectionMode(QAbstractItemView.SingleSelection)
self.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tableWidget.cellClicked.connect(self.__mycell_clicked)
self.setTableWidgetData()
prevBtn = QPushButton('Prev', self)
prevBtn.move(500, 205)
prevBtn.clicked.connect(self.getPrevReview)
passBtn = QPushButton('Pass', self)
passBtn.move(1450, 180)
passBtn.clicked.connect(self.passReview)
nextBtn = QPushButton('Next', self)
nextBtn.move(1000, 205)
nextBtn.clicked.connect(self.getNextReview)
saveBtn = QPushButton('Save', self)
saveBtn.move(1450, 300)
saveBtn.clicked.connect(self.saveResult)
self.setWindowTitle('Cap11 LabelingTool')
self.resize(1600, 350)
self.center()
widget.setFixedHeight(350)
widget.setFixedWidth(1600)
def __mycell_clicked(self, row, col):
before = self.output[self.cur_index][col]
if before == self.NATURAL:
self.output[self.cur_index][col] = self.POS
elif before == self.POS:
self.output[self.cur_index][col] = self.NEG
else:
self.output[self.cur_index][col] = self.NATURAL
self.setTableWidgetData()
def getNextReview(self):
self.tableWidget.scrollTo(self.tableWidget.model().index(0, 0))
self.cur_index += 1
self.cur_index = self.cur_index % self.review_size
self.pbar.setFormat("%i/%d" % (self.cur_index + 1, self.pbar.maximum() + 1))
self.setTableWidgetData()
def getPrevReview(self):
self.tableWidget.scrollTo(self.tableWidget.model().index(0, 0))
self.cur_index -= 1
self.cur_index = self.cur_index % self.review_size
self.pbar.setFormat("%i/%d" % (self.cur_index + 1, self.pbar.maximum() + 1))
self.setTableWidgetData()
def passReview(self):
del self.original[self.cur_index]
del self.reviews[self.cur_index]
del self.output[self.cur_index]
self.review_size = len(self.reviews)
self.pbar.setMaximum(self.review_size - 1)
self.cur_index = self.cur_index % self.review_size
self.pbar.setFormat("%i/%d" % (self.cur_index + 1, self.pbar.maximum() + 1))
self.tableWidget.scrollTo(self.tableWidget.model().index(0, 0))
self.setTableWidgetData()
def saveResult(self):
with open("./output.txt", 'w') as outputFile:
for i in range(self.cur_index + 1):
outputFile.write(self.original[i])
outputFile.write('####')
for label in range(len(self.output[i])):
outputFile.write("%s=%s" % (self.reviews[i][label], self.output[i][label]))
outputFile.write('\n')
def setTableWidgetData(self):
self.tableWidget.setColumnCount(len(self.reviews[self.cur_index]))
self.pbar.setValue(self.cur_index)
for idx, word in enumerate(self.reviews[self.cur_index]):
status = self.output[self.cur_index][idx]
newItem = QTableWidgetItem(word)
color = QtCore.Qt.white
if status == self.NEU:
color = QtCore.Qt.gray
elif status == self.POS:
color = QtCore.Qt.green
elif status == self.NEG:
color = QtCore.Qt.red
newItem.setBackground(color)
self.tableWidget.setItem(0, idx, newItem)
self.tableWidget.setItem(1, idx, QTableWidgetItem(status))
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
def load_file(self, path):
with open(path, 'r', encoding="utf-8-sig") as f:
for line in f.readlines():
line = clean(line.replace("\n", ""))
words = tokenizer.tokenize(line)
words = [word.replace("#", "") for word in words]
self.original.append(line)
self.reviews.append(words)
self.output.append([self.NATURAL] * len(words))
self.review_size = len(self.reviews)
tokenizer = AutoTokenizer.from_pretrained("./bert/")
app = QApplication(sys.argv)
select_form = SelectForm()
widget = QStackedWidget()
widget.addWidget(select_form)
widget.setFixedHeight(Height)
widget.setFixedWidth(Width)
widget.show()
sys.exit(app.exec_())
| 2.09375 | 2 |
src/models/predict_CNN.py | danielemingolla/sentiment_analysis_performances_comparison | 0 | 12770319 | from keras.models import load_model
from sklearn.externals import joblib
from keras.preprocessing.sequence import pad_sequences
import os
current_directory = os.getcwd()
file_name = "CNN__31_05_2020__20_33.h5"
tokenizer_name = "tokenizer_31_05_2020__20_45.pkl"
input_path = "\\".join([current_directory, "models", file_name])
tokenizer_path = "\\".join([current_directory, "transformer", tokenizer_name])
tokenizer = joblib.load(tokenizer_path)
model = load_model(input_path)
reviews = ["Posto non dei migliori, abbiamo trovato un sacco di polvere per terra, orrendo!",
"Luogo al centro di Pisa, abbastanza carino e con personale gentile",
"Personale scortese!"]
X = tokenizer.texts_to_sequences(reviews)
maxlen = 80
X = pad_sequences(X, padding='post', maxlen=maxlen)
result = model.predict_classes(X)
for reviews, predict in zip(reviews, result):
if(predict == 1):
predict = 'POSITIVO'
elif(predict == 2):
predict = 'NEGATIVO'
else:
predict = 'NEUTRO'
print("{} --> {}".format(reviews, predict))
| 2.78125 | 3 |
src/python/wakplot.py | chanul13/EDMFTF | 7 | 12770320 | <reponame>chanul13/EDMFTF
#!/usr/bin/env python
from scipy import *
from pylab import *
from scipy import weave
import glob, os, sys
code="""
#line 10 "wakplot.py"
using namespace std;
double Ry2eV = 13.6056920311654;
double Ax = 0;
for (int ib=0; ib<nbands; ib++){
complex<double> ekw=ekom(ib);
if (ekw.imag() > -small) ekw=complex<double>(ekw.real(),-small);
complex<double> gc = abs(cohd(ib))/(omega+mu-ekw);
//complex<double> gc = 1./(omega+mu-ekw);
Ax += -gc.imag()/M_PI;
}
return_val = Ax;
"""
if __name__ == '__main__':
if len(sys.argv)<2:
intensity = 0.2
else:
intensity = float(sys.argv[1])
small = 1e-5 # 0.01 # 1e-5
#itensity = 0.2
DY = 0 # 0.01318
fEF = open('EF.dat', 'r')
mu = float(fEF.next().split()[0])
print 'mu=', mu
wg = glob.glob('*.klist_band')
if len(wg)>0:
fg = open(wg[0], 'r')
wkpointi=[]
wkpoints=[]
for il,line in enumerate(fg):
if line[:3]=='END': break
com = line[:10].split()
if com:
legnd=line.split()[0]
wkpoints.append(legnd)
wkpointi.append(il)
print wkpointi
print wkpoints
nkp = wkpointi[-1]+1
print 'nkp=', nkp
fdat = open('eigvals.dat', 'r')
if os.path.isfile('cohfactorsd.dat'):
fcoh = open('cohfactorsd.dat', 'r')
else:
fcoh = None
ikp=0
Akom=[]
try:
while True:
data = fdat.next().split()
if fcoh is not None: dach = fcoh.next().split()
(ikp, isym, nbands, nemin, nomega) = map(int, data[1:6])
ekom = zeros(nbands, dtype=complex)
dach=ones((nomega,nbands), dtype=complex)
index=range(nomega)
omw=zeros(nomega,dtype=float)
if fcoh is not None:
for iom in range(nomega):
datc = array(map(float,fcoh.next().split()))
omw[iom] = datc[0]
dach[iom,:] = datc[1::2]+datc[2::2]*1j
#print 'shape=', shape(dach), 'nbands=', nbands
# need to sort frequency because open-mp mixes them up
index=sorted(index, key=lambda i: omw[i])
#for i in range(len(index)):
# print omw[index[i]],
#print
Aom=zeros(nomega,dtype=float)
om=zeros(nomega,dtype=float)
for iom in range(nomega):
data = array(map(float, fdat.next().split()))
omega = float(data[0])
ekom = data[1::2]+data[2::2]*1j
om[iom] = omega
cohd = dach[index[iom]]
#print 'om=', omega, omw[index[iom]]
Aom[iom] = weave.inline(code, ['nbands', 'omega', 'mu', 'ekom', 'small', 'ikp', 'cohd'],
type_converters=weave.converters.blitz, compiler = 'gcc')
Akom.append( Aom )
except StopIteration:
pass
Akom = array(Akom).transpose()
print 'shape(Akom)=', shape(Akom)
vmm = [0,max(map(max,Akom))*intensity]
(ymin,ymax) = (om[0]+DY,om[-1]+DY)
(xmin,xmax) = (0, shape(Akom)[1]-1)
#(xmin,xmax) = (0, nkp-1)
print 'xmin,xmax,ymin,ymax=', xmin, xmax, ymin, ymax
imshow(Akom, interpolation='bilinear', cmap=cm.hot, origin='lower', vmin=vmm[0], vmax=vmm[1], extent=[xmin,xmax,ymin,ymax], aspect=(xmax-xmin)*0.8/(ymax-ymin) )
for i in range(len(wkpointi)):
print 'wp=', wkpointi[i]
plot([wkpointi[i],wkpointi[i]], [ymin,ymax], 'w-')
plot([xmin,xmax],[0,0], 'w:')
dytck=0.005
Ntck=5
for j in range(len(wkpointi)-1):
for ix in range(1,Ntck):
x = wkpointi[j]+(wkpointi[j+1]-wkpointi[j])*ix/float(Ntck)
plot([x,x],[-dytck,dytck],'w-')
axis([xmin,xmax,ymin,ymax])
xticks( wkpointi, wkpoints, fontsize='x-large' )
#colorbar()
show()
| 2.015625 | 2 |
src/__init__.py | PythonistaMX/py231 | 3 | 12770321 | #! /usr/bin/python3
from flask import abort, jsonify
from json import loads
carreras = ("Sistemas", "Derecho", "Actuaría", "Arquitectura", "Administración")
orden = ('nombre', 'primer_apellido', 'segundo_apellido', 'carrera','semestre', 'promedio', 'al_corriente')
campos = {'cuenta': (int, True), 'nombre': (str, True), 'primer_apellido': (str, True), 'segundo_apellido': (str, False), 'carrera': (str, True), 'semestre': (int, True), 'promedio': (float, True), 'al_corriente': (bool, True)}
def carga_base(ruta):
with open(ruta, 'tr') as base:
return eval(base.read())
def escribe_base(lista ,ruta):
with open(ruta, 'wt') as base:
base.write(str(lista))
def busca_base(cuenta, base):
for alumno in base:
try:
if alumno['cuenta'] == int(cuenta):
return alumno
except:
return False
return False
def es_tipo(dato, tipo):
if tipo == str:
return True
else:
try:
return tipo(dato) is dato
except:
return False
def reglas(dato, campo):
if campo == "carrera" and dato not in carreras:
return False
elif campo == "semestre" and dato < 1:
return False
elif campo == "promedio" and (dato < 0 or dato > 10):
return False
elif (campo in ("nombre", "primer_apellido") and (dato == "")):
return False
else:
return True
def valida(dato, campo):
return es_tipo(dato, campos[campo][0]) and reglas(dato, campo)
def recurso_completo(base, ruta, cuenta, peticion):
try:
candidato = {'cuenta': int(cuenta)}
peticion = loads(peticion)
if (set(peticion)).issubset(set(orden)):
for campo in orden:
if not campos[campo][1] and campo not in peticion:
candidato[campo] = ''
elif valida(peticion[campo], campo):
candidato[campo] = peticion[campo]
else:
abort(400)
else:
abort(400)
except:
abort(400)
base.append(candidato)
escribe_base(base, ruta)
return jsonify(candidato) | 3.078125 | 3 |
tests/stores/test_elasticsearch_conf.py | synthetic-intelligence/zentral | 0 | 12770322 | <gh_stars>0
from django.test import SimpleTestCase
from django.utils.crypto import get_random_string
from accounts.events import EventMetadata, LoginEvent
from zentral.conf.config import ConfigDict
from zentral.core.exceptions import ImproperlyConfigured
from zentral.core.stores.backends.elasticsearch import EventStore
class TestElasticsearchStoreConf(SimpleTestCase):
@staticmethod
def build_login_event(routing_key=None):
return LoginEvent(EventMetadata(routing_key=routing_key), {"user": {"username": get_random_string(12)}})
def test_index_and_indices(self):
with self.assertRaises(ImproperlyConfigured) as cm:
EventStore(ConfigDict({
'servers': ["http://elastic:9200"],
'indices': {"un": {}},
'index': 'zentral-events',
'store_name': 'yolo'
}))
self.assertEqual(cm.exception.args[0], 'index and indices cannot be both set')
def test_indices_not_a_mapping(self):
with self.assertRaises(ImproperlyConfigured) as cm:
EventStore(ConfigDict({
'servers': ["http://elastic:9200"],
'indices': "yolo",
'store_name': 'yolo'
}))
self.assertEqual(cm.exception.args[0], 'indices must be a Mapping')
def test_indices_missing_or_invalid_index_priority(self):
with self.assertRaises(ImproperlyConfigured) as cm:
EventStore(ConfigDict({
'servers': ["http://elastic:9200"],
'indices': {"un": {}},
'store_name': 'yolo'
}))
self.assertEqual(cm.exception.args[0], 'missing or invalid index priority')
def test_indices_duplicated_index_priority(self):
with self.assertRaises(ImproperlyConfigured) as cm:
EventStore(ConfigDict({
'servers': ["http://elastic:9200"],
'indices': {"un": {"priority": 10}, "deux": {"priority": 10}},
'store_name': 'yolo'
}))
self.assertEqual(cm.exception.args[0], 'all indices must have a different priority')
def test_indices_invalid_event_filters(self):
with self.assertRaises(ImproperlyConfigured) as cm:
EventStore(ConfigDict({
'servers': ["http://elastic:9200"],
'indices': {"un": {"priority": 20,
"included_event_filters": "yolo"},
"deux": {"priority": 10}},
'store_name': 'yolo'
}))
self.assertEqual(cm.exception.args[0], "invalid event filters for index 'un'")
def test_default_index_filtered(self):
with self.assertRaises(ImproperlyConfigured) as cm:
EventStore(ConfigDict({
'servers': ["http://elastic:9200"],
'indices': {"un": {"priority": 20,
"included_event_filters": [{"event_type": ["yolo"]}]},
"deux": {"priority": 10,
"included_event_filters": [{"event_type": ["fomo"]}]}},
'store_name': 'yolo'
}))
self.assertEqual(cm.exception.args[0], "default index 'deux' (lowest priority) cannot be filtered")
def test_no_index_configured(self):
with self.assertRaises(ImproperlyConfigured) as cm:
EventStore(ConfigDict({
'servers': ["http://elastic:9200"],
'store_name': 'yolo'
}))
self.assertEqual(cm.exception.args[0], "no index configured")
def test_missing_read_index(self):
with self.assertRaises(ImproperlyConfigured) as cm:
EventStore(ConfigDict({
'servers': ["http://elastic:9200"],
'indices': {"un": {"priority": 20,
"included_event_filters": [{"event_type": ["yolo"]}]},
"deux": {"priority": 10}},
'store_name': 'yolo'
}))
self.assertEqual(cm.exception.args[0], "missing read index")
def test_one_index_get_event_index(self):
store_index = get_random_string(12)
store = EventStore(ConfigDict({
'servers': ["http://elastic:9200"],
'index': store_index,
'store_name': 'yolo'
}))
event = self.build_login_event(routing_key="jomo")
self.assertEqual(store._get_event_index(event.serialize()), store_index)
def test_one_index_serialize_event(self):
store_index = get_random_string(12)
store = EventStore(ConfigDict({
'servers': ["http://elastic:9200"],
'index': store_index,
'store_name': 'yolo'
}))
store.use_mapping_types = False
event = self.build_login_event(routing_key="jomo")
index, es_doc_type, es_event_d = store._serialize_event(event)
self.assertEqual(index, store_index)
self.assertEqual(es_doc_type, "doc")
self.assertEqual(es_event_d["type"], "zentral_login")
self.assertEqual(es_event_d["tags"], ["zentral"])
self.assertEqual(es_event_d["routing_key"], "jomo")
def test_indices_get_event_index_1(self):
store = EventStore(ConfigDict({
'servers': ["http://elastic:9200"],
'indices': {"un": {"priority": 20,
"included_event_filters": [{"routing_key": ["yolo"]}]},
"deux": {"priority": 10}},
'read_index': "all_integers",
'store_name': 'yolo'
}))
event = self.build_login_event(routing_key="jomo")
self.assertEqual(store._get_event_index(event.serialize()), "deux")
def test_indices_get_event_index_2(self):
store = EventStore(ConfigDict({
'servers': ["http://elastic:9200"],
'indices': {"un": {"priority": 20,
"included_event_filters": [{"routing_key": ["yolo"]}]},
"deux": {"priority": 10}},
'read_index': "all_integers",
'store_name': 'yolo'
}))
event = self.build_login_event(routing_key="yolo")
self.assertEqual(store._get_event_index(event.serialize()), "un")
| 2.109375 | 2 |
xssor/tcp.py | boundmania/xssor2 | 2,126 | 12770323 | <filename>xssor/tcp.py
sys_name = "XSS'OR"
sys_copyright = <EMAIL>cos.me"
def sys(req):
return {
'sys_name': sys_name,
'sys_copyright': sys_copyright,
}
| 2.03125 | 2 |
lightkurve_ext_pg_runner.py | orionlee/PH_TESS_I_LightCurveViewer | 2 | 12770324 | import contextlib
import logging
from time import time_ns
from types import SimpleNamespace
import warnings
import ipywidgets as widgets
from IPython.display import display
import lightkurve_ext as lke
import lightkurve_ext_tls as lke_tls
import lightkurve_ext_pg as lke_pg
def _current_time_millis():
return time_ns() / 1000000
def _flatten(lc, flatten_kwargs):
if flatten_kwargs is None:
return lc
flatten_kwargs = flatten_kwargs.copy()
window_length_in_days = flatten_kwargs.pop("window_length_in_days", None)
if window_length_in_days is not None:
window_length = lke.to_window_length_for_2min_cadence(window_length_in_days)
flatten_kwargs["window_length"] = window_length
return lc.flatten(**flatten_kwargs)
def _remove_fig_title(*ax_args):
# Used to remove the extra title in %matplotlib widget mode
# alternative would be disbale them globally, see
# https://github.com/matplotlib/ipympl/issues/229#issuecomment-633430427
for ax in ax_args:
if ax is not None:
ax.get_figure().canvas.header_visible = False
ax.get_figure().canvas.footer_visible = False
# ax.get_figure().canvas.toolbar_visible = False
# ax.get_figure().canvas.resizable = False
def run_tls(
lc, pg_kwargs={}, flatten_kwargs=None, plot_pg=True, plot_lc_model=True, plot_transit_depth=True, display_context=None
):
if display_context is None:
# note : nullcontext() requires Python 3.7
ctx_validate, ctx_plot = contextlib.nullcontext(), contextlib.nullcontext()
else:
ctx_validate, ctx_plot = display_context["validate"], display_context["plot"]
with ctx_validate:
lc = lc.remove_nans().normalize()
lc = _flatten(lc, flatten_kwargs)
time_b = _current_time_millis()
pg = lke_tls.TransitLeastSquaresPeriodogram.from_lightcurve(lc, **pg_kwargs)
time_e = _current_time_millis()
pg.elapsed_time = time_e - time_b
lke_pg.validate_tls_n_report(pg)
with ctx_plot:
ax_pg = None
if plot_pg:
ax_pg = lke_pg.plot_pg_n_mark_max(pg)
_remove_fig_title(ax_pg)
ax_lc_model_1, ax_lc_model_2, ax_lc_model_f = None, None, None
if plot_lc_model:
ax_lc_model_1, ax_lc_model_2, ax_lc_model_f = lke_pg.plot_lc_with_model(lc, pg)
_remove_fig_title(ax_lc_model_1, ax_lc_model_2, ax_lc_model_f)
ax_tt_depth = None
if plot_transit_depth:
ax_tt_depth = lke_pg.errorbar_transit_depth(pg)
_remove_fig_title(ax_tt_depth)
return SimpleNamespace(
pg=pg,
lc=lc,
ax_pg=ax_pg,
ax_lc_model_1=ax_lc_model_1,
ax_lc_model_2=ax_lc_model_2,
ax_lc_model_f=ax_lc_model_f,
ax_tt_depth=ax_tt_depth,
)
def run_bls(
lc,
use_stellar_specific_search_grid=False,
pg_kwargs={},
flatten_kwargs=None,
plot_pg=True,
plot_lc_model=True,
display_context=None,
):
if display_context is None:
ctx_validate, ctx_plot = contextlib.nullcontext(), contextlib.nullcontext()
else:
ctx_validate, ctx_plot = display_context["validate"], display_context["plot"]
with ctx_validate:
lc = lc.remove_nans().normalize()
lc = _flatten(lc, flatten_kwargs)
time_b = _current_time_millis()
if use_stellar_specific_search_grid:
pg = lke_tls.create_bls_pg_with_stellar_specific_search_grid(lc, **pg_kwargs)
else:
pg = lc.to_periodogram(method="bls", **pg_kwargs)
time_e = _current_time_millis()
pg.elapsed_time = time_e - time_b
lke_pg.validate_bls_n_report(pg)
with ctx_plot:
ax_pg = None
if plot_pg:
ax_pg = lke_pg.plot_pg_n_mark_max(pg)
_remove_fig_title(ax_pg)
ax_lc_model_1, ax_lc_model_2, ax_lc_model_f = None, None, None
if plot_lc_model:
with warnings.catch_warnings():
# avoid warnings about using max power values
warnings.filterwarnings("ignore", message=".*Using.*")
logger = logging.getLogger("lightkurve.periodogram")
logger.setLevel(logging.ERROR)
ax_lc_model_1, ax_lc_model_2, ax_lc_model_f = lke_pg.plot_lc_with_model(lc, pg)
_remove_fig_title(ax_lc_model_1, ax_lc_model_2, ax_lc_model_f)
ax_tt_depth = None
# ax_tt_depth = lke_pg.errorbar_transit_depth(pg) # bls has no info directly
return SimpleNamespace(
pg=pg,
lc=lc,
ax_pg=ax_pg,
ax_lc_model_1=ax_lc_model_1,
ax_lc_model_2=ax_lc_model_2,
ax_lc_model_f=ax_lc_model_f,
ax_tt_depth=ax_tt_depth,
)
def run_bls_n_tls(
lc,
use_stellar_specific_search_grid_for_bls=False,
plot_pg=True,
plot_lc_model=True,
plot_transit_depth=True,
bls_pg_kwargs={},
tls_pg_kwargs={},
):
# Run TLS and BLS and have their results displayed side-by-side.
#
# For the matplotlib figures to be displayed inside the respective boxes in Jupyter, magic
# %matplotlib widget
# is needed (requiring ipympl package)
#
# sometimes it crashes the browsers (possibly too many interactive figures?!)
out_bls_validate = widgets.Output(layout={"border": "0px solid lightgray"})
out_bls_plot = widgets.Output(layout={"border": "0px solid lightgray"})
out_tls_validate = widgets.Output(layout={"border": "0px solid lightgray"})
out_tls_plot = widgets.Output(layout={"border": "0px solid lightgray"})
ctr = widgets.GridBox(
children=[out_bls_validate, out_tls_validate, out_bls_plot, out_tls_plot],
layout=widgets.Layout(width="auto", grid_template_rows="auto", grid_template_columns="50% 50%", grid_gap="5px 10px"),
)
run_bls(
lc,
use_stellar_specific_search_grid=use_stellar_specific_search_grid_for_bls,
pg_kwargs=bls_pg_kwargs,
plot_pg=plot_pg,
plot_lc_model=plot_lc_model,
display_context=dict(validate=out_bls_validate, plot=out_bls_plot),
)
run_tls(
lc,
tls_pg_kwargs,
plot_pg=plot_pg,
plot_lc_model=plot_lc_model,
plot_transit_depth=plot_transit_depth,
display_context=dict(validate=out_tls_validate, plot=out_tls_plot),
)
# with out_bls:
# run_bls(lc, bls_pg_kwargs, plot_pg=plot_pg, plot_lc_model=plot_lc_model)
# with out_tls:
# run_tls(lc, tls_pg_kwargs, plot_pg=plot_pg, plot_lc_model=plot_lc_model, plot_transit_depth=plot_transit_depth)
return display(ctr)
| 2.171875 | 2 |
bk169X/main.py | ddamiani/bk169X | 1 | 12770325 | <filename>bk169X/main.py<gh_stars>1-10
import sys
import argparse
import serial
import IPython
import os
import glob
import bk169X.control as _bkcont
import bk169X.calib as _bkcal
def __parse_cli():
parser = argparse.ArgumentParser(
description='A tool for control and calibration of BK Precision 169X Series DC power supplies'
)
settle = 1.5
serial_port_linux = '/dev/ttyUSB0'
serial_port_osx = '/dev/cu.usbserial-*'
serial_port = None
if os.name == 'nt':
serial_port = 'COM3'
elif os.name == 'posix':
if os.path.exists(serial_port_linux):
serial_port = serial_port_linux
else:
# Possible dev name on OSX
devs = glob.glob(serial_port_osx)
if devs:
serial_port = devs[0]
parser.add_argument(
'-p',
'--port',
metavar='PORT',
default=serial_port,
help='the serial port the power supply is attached to (default: {})'.format(serial_port)
)
subparser = parser.add_subparsers(dest='mode', help='The mode choice of the tool')
subparser.required = True
control_parser = subparser.add_parser('control', help='Control mode of the tool')
control_parser.add_argument(
'-s',
'--simulate',
action='store_true',
help='run the control software with a simulated serial device'
)
calib_parser = subparser.add_parser('calib', help='Calibrtion mode of the tool')
calib_parser.set_defaults(simulate=False)
calib_parser.add_argument(
'vstart',
metavar='VSTART',
type=float,
help='the starting voltage for calibration scans'
)
calib_parser.add_argument(
'vend',
metavar='VEND',
type=float,
help='the ending voltage for calibration scans'
)
calib_parser.add_argument(
'vstep',
metavar='VSTEP',
type=float,
help='the voltage step size for the calibration scans'
)
calib_parser.add_argument(
'-s',
'--settle',
metavar='SETTLE',
type=float,
default=settle,
help='the settling time before reading back the voltage (default {time:.2f} s)'.format(time=settle)
)
return parser.parse_args()
def main():
try:
__args = __parse_cli()
__port = __args.port
__banner_base = '* {mode} tool for BK Precision 169X Series DC power supplies *'
__banner_stp = 'Power supply settings: {volt:4.2f} V, {curr:5.3f} A\n'
__banner_read = 'Power supply readings: {volt:4.2f} V, {curr:5.3f} A\n'
# prompt user for input if no serial port was specified
if __port is None:
__port = input('Please specify a serial port to use (e.g. COM3, /dev/ttyUSB0): ')
with _bkcont.PowerSupply(__port, simulated=__args.simulate) as __bkps:
if __args.mode == 'calib':
__banner = __banner_base.format(mode='Calibration')
calib = _bkcal.PowerSupplyCalib(__bkps, __args.vstart, __args.vend, __args.vstep, __args.settle)
__stp_v, __stp_c = calib.ps.setpoint()
__status = __banner_stp.format(volt=__stp_v, curr=__stp_c)
__status += __banner_read.format(volt=calib.ps.voltage(), curr=calib.ps.current())
elif __args.mode == 'control':
ps = __bkps
__banner = __banner_base.format(mode='Control')
__stp_v, __stp_c = ps.setpoint()
__status = __banner_stp.format(volt=__stp_v, curr=__stp_c)
__status += __banner_read.format(volt=ps.voltage(), curr=ps.current())
else:
print('Unknown tool mode: {mode}'.format(mode=__args.mode))
sys.exit(1)
__banner = '\n{0}\n{1}\n{0}\n'.format('*'*len(__banner), __banner)
IPython.embed(banner1=__banner, banner2=__status)
except serial.SerialException as ser_ex:
print('Problem connecting to power supply:', ser_ex)
sys.exit(1)
except KeyboardInterrupt:
print('\nExiting tool!')
if __name__ == '__main__':
main()
| 2.625 | 3 |
manga_py/providers/translate_webtoons_com.py | sonvt1710/manga-py | 7 | 12770326 | from manga_py.provider import Provider
from .helpers.std import Std
class TranslateWebToonsCom(Provider, Std):
def get_archive_name(self) -> str:
return self.normal_arc_name(self.get_chapter_index())
def get_chapter_index(self) -> str:
return self.re.search(r'\bepisodeNo=(\d+)', self.chapter).group(1)
def get_content(self):
return self.http_get(self.get_url())
def get_manga_name(self) -> str:
return self.text_content_full(self.content, 'h3.subj')
def _chapters(self, content):
return self._elements('.detail_lst > ul > li > a', content)
@staticmethod
def _filter_chapters(chapters):
result = []
for item in chapters:
content = item.cssselect('.rate_num.cplt')[0].text_content_full().strip('\n\t\r \0')
if content == '100%':
result.append(item)
return result
def get_chapters(self):
pages = self._elements('.paginate > a:not([class])')
chapters = self._chapters(self.content)
if pages:
n = self.normalize_uri
for i in pages:
content = self.http_get(n(i.get('href')))
chapters += self._chapters(content)
return self._filter_chapters(chapters)
def get_files(self):
parser = self.html_fromstring(self.chapter)
return self._images_helper(parser, '.img_info > img')
def get_cover(self) -> str:
return self._cover_from_content('.thmb img')
def book_meta(self) -> dict:
# todo meta
pass
main = TranslateWebToonsCom
| 2.3125 | 2 |
python/tasker/src/json/Importer.py | PUT-II/akai-org-rekrutacja | 0 | 12770327 | <reponame>PUT-II/akai-org-rekrutacja<filename>python/tasker/src/json/Importer.py
import json
class Importer:
def __init__(self):
self.__encoded_tasks: str = ""
pass
def read_tasks(self):
with open("taski.json", mode="r", encoding="utf8") as file:
self.__encoded_tasks = file.read()
def get_tasks(self):
return json.loads(self.__encoded_tasks)
| 2.25 | 2 |
kCharge-firmware/handlers.py | koalacreations/kCharge-firmware | 0 | 12770328 | import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def start_action(payload, channels, ws):
# extract all of the data that we need
channel = payload.get("channel")
action = payload.get("action")
rate = payload.get("rate")
cutoff_voltage = payload.get("cutoffVoltage")
# start the relevant action
if action == "charge":
log.info("Starting CHARGE from startAction command.")
elif action == "discharge":
channels[channel-1].start_discharge()
elif action == "dcResistance":
log.info("Starting DC RESISTANCE from startAction command.")
def stop_action(payload, channels, ws):
# extract all of the data that we need
channel = payload.get("channel")
channels[channel-1].stop_action()
| 2.953125 | 3 |
tests/test_misc.py | jansel/torchdynamo | 41 | 12770329 | <filename>tests/test_misc.py
#!/usr/bin/env pytest
import collections
import copy
import dataclasses
import dis
import enum
import functools
import math
import sys
import typing
import unittest
import numpy as np
import torch
import torchdynamo.testing
from torchdynamo import bytecode_transformation
from torchdynamo.testing import CompileCounter
from torchdynamo.testing import requires_static_shapes
from torchdynamo.testing import same
from torchdynamo.testing import unsupported
mytuple = collections.namedtuple("mytuple", ["a", "b", "ab"])
def my_custom_function(x):
return x + 1
class MiscTests(torchdynamo.testing.TestCase):
def test_boolarg(self):
def boolarg(aa, bb, flag):
if flag:
return aa - bb
else:
return bb - aa
a = torch.randn(10, 10)
b = torch.randn(10, 10)
correct1 = boolarg(a, b, True)
correct2 = boolarg(a, b, False)
correct3 = boolarg(a, b, None)
counter = CompileCounter()
with torchdynamo.optimize_assert(counter):
val1 = boolarg(a, b, True)
val2 = boolarg(a, b, False)
val3 = boolarg(a, b, None)
val4 = boolarg(a, b, True)
self.assertTrue(same(val1, correct1))
self.assertTrue(same(val2, correct2))
self.assertTrue(same(val3, correct3))
self.assertTrue(same(val4, correct1))
self.assertEqual(counter.frame_count, 3)
def test_callpacked(self):
def call_packed(args):
a, b, c = args
return a - b * c
counter = CompileCounter()
a = torch.randn(10, 10)
b = torch.randn(10, 10)
c = torch.randn(10, 10)
correct = call_packed([a, b, c])
with torchdynamo.optimize_assert(counter):
val1 = call_packed([a, b, c])
val2 = call_packed((a, b, c))
val3 = call_packed([a, b, c])
val4 = call_packed((a, b, c))
self.assertTrue(same(val1, correct))
self.assertTrue(same(val2, correct))
self.assertTrue(same(val3, correct))
self.assertTrue(same(val4, correct))
self.assertEqual(counter.frame_count, 2)
def test_raises(self):
def fn(a, b, c, cls):
x = a + b - c * 10
raise cls(str(x))
counter = CompileCounter()
a = torch.randn(10, 10)
b = torch.randn(10, 10)
c = torch.randn(10, 10)
with torchdynamo.optimize(counter):
self.assertRaises(AssertionError, lambda: fn(a, b, c, AssertionError))
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 3)
def test_inplace(self):
def inplace1(a, b):
o = torch.empty((10, 10))
o.copy_(a)
o -= b
return o
torchdynamo.testing.standard_test(self, inplace1, 2, expected_ops=3)
def test_unpack4(self):
def unpack4(a, b):
a = a[:5, :]
b = b[:5, :]
x, y = a.size()
o = torch.empty((x, y))
o.copy_(a / b)
return o
torchdynamo.testing.standard_test(
self, unpack4, 2, expected_ops=5, expected_ops_dynamic=8
)
def test_unpack5(self):
def unpack5(a, b):
a = a[:5, :]
b = b[:5, :]
x, y = a.shape
o = torch.empty((x, y))
o.copy_(a / b)
return o
torchdynamo.testing.standard_test(
self, unpack5, 2, expected_ops=5, expected_ops_dynamic=8
)
def test_matmul1(self):
def matmul_op1(a, b):
return a @ b
# TODO(jansel): FX doesn't support this, should add upstream support
torchdynamo.testing.standard_test(self, matmul_op1, 2, expected_ops=1)
def test_builtin_isinstance(self):
def fn(x):
t = torch.arange(1, 3)
a = isinstance(x, torch.Tensor)
b = isinstance(t, torch.Tensor)
c = isinstance(x, int)
d = isinstance(3, int)
e = isinstance([1, 2, 3], list)
f = isinstance({"foo": 1, "bar": 2}, dict)
res = [a, b, c, d, e, f]
# Can't run yet due to other unimplemented instructions
# res += [isinstance(torch.nn.LazyLinear(2, 3), torch.nn.Linear)]
return res
torchdynamo.testing.standard_test(self, fn, 1, expected_ops=1)
def test_fold(self):
def fn(a):
return a + math.sqrt(63)
torchdynamo.testing.standard_test(self, fn, 1, expected_ops=1)
def test_shape_unpack(self):
def fn(x):
a, b = x.size()
return x * b
i = torch.randn(5, 10)
r1 = fn(i)
with torchdynamo.optimize(lambda gm: gm.forward):
r2 = fn(i)
self.assertTrue(same(r1, r2))
def test_empty_list(self):
def fn(x, ll):
if len(ll) == 0 and not ll and ll is not None:
return x + 1
i = torch.randn(5, 10)
r1 = fn(i, [])
with torchdynamo.optimize(lambda gm: gm.forward):
r2 = fn(i, [])
r3 = fn(i, tuple())
self.assertTrue(same(r1, r2))
self.assertTrue(same(r1, r3))
def test_config_obj(self):
class Cfg:
def __init__(self):
self.val = 0.5
self.count = 3
def fn(x, cfg):
for i in range(cfg.count):
x = x + cfg.val
return x
cfg1 = Cfg()
cfg1.val = 1.0
cfg2 = Cfg()
v = torch.zeros(1)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
v = fn(v, cfg1) # 3
v = fn(v, cfg2) # 4.5
cfg2.count = 1
v = fn(v, cfg2) # 5
cfg2.val = 2.0
v = fn(v, cfg2) # 7
self.assertEqual(v[0], 7)
self.assertEqual(cnts.op_count, 8)
def test_config_getattr_default(self):
class Cfg:
def __init__(self):
self.val = 0.5
self.count = 10
def fn(x, cfg):
if getattr(cfg, "just_add_7", False):
return x + 7
for i in range(cfg.count):
x = x + cfg.val
return x
cfg1 = Cfg()
v = torch.zeros(1)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertEqual(fn(v, cfg1)[0], 5)
self.assertEqual(fn(v, cfg1)[0], 5)
cfg1.just_add_7 = True
self.assertEqual(fn(v, cfg1)[0], 7)
self.assertEqual(fn(v, cfg1)[0], 7)
cfg1.just_add_7 = False
self.assertEqual(fn(v, cfg1)[0], 5)
self.assertEqual(fn(v, cfg1)[0], 5)
self.assertEqual(cnts.frame_count, 3)
def test_size_input(self):
def fn(x, s):
a, b = s
return x + (a - b)
v = torch.zeros(10, 20)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertEqual(fn(v, v.size())[0, 0], -10)
self.assertEqual(fn(v, (10, 20))[0, 0], -10)
self.assertEqual(fn(v, [10, 20])[0, 0], -10)
self.assertEqual(cnts.op_count, 2)
def test_cell_output1(self):
out = None
def fn(a, b):
nonlocal out
out = a + b * 10
v = torch.Tensor([100])
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertIsNone(fn(v, v))
self.assertEqual(out[0], 1100)
self.assertEqual(cnts.op_count, 2)
def test_cell_output2(self):
out = None
def fn(a, b):
nonlocal out
c = unsupported(a, b)
out = a + b * 10 + c
v = torch.Tensor([100])
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertIsNone(fn(v, v))
self.assertEqual(out[0], 1200)
self.assertEqual(cnts.op_count, 3)
def test_return_nested_function(self):
out = None
def fn(a, b):
nonlocal out
c = a + b
d = a + 1.0
def fn2(f: int = 7, g: float = 9.0):
nonlocal out
out = a + b * 10
return c * f - d * g
return fn2
v1 = torch.Tensor([100])
v2 = torch.Tensor([200])
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertEqual(fn(v1, v2)(1.5)[0], -459)
self.assertEqual(out[0], 2100)
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 7)
def test_tensor_dict1(self):
def fn(inputs):
return inputs["a"] - inputs["b"] * 1.5
v1 = torch.Tensor([100])
v2 = torch.Tensor([200])
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertEqual(fn({"a": v1, "b": v2})[0], -200)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_tensor_dict2(self):
def fn1(inputs):
total = torch.zeros(1)
for k, v in inputs.items():
total += v
return total
def fn2(inputs):
total = torch.zeros(1)
for v in inputs.values():
total += v
return total
def fn3(inputs):
total = torch.zeros(1)
for k in inputs.keys():
total += inputs[k]
return total
v1 = torch.Tensor([100])
v2 = torch.Tensor([200])
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertEqual(fn1({"a": v1, "b": v2})[0], 300)
self.assertEqual(fn2({"a": v1, "b": v2})[0], 300)
self.assertEqual(fn3({"a": v1, "b": v2})[0], 300)
self.assertEqual(cnts.frame_count, 3)
self.assertEqual(cnts.op_count, 9)
def test_dictcomp(self):
def fn1(inputs):
return {k: v + 1 for k, v in inputs.items()}
v1 = torch.Tensor([100])
v2 = torch.Tensor([200])
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertEqual(fn1({"a": v1, "b": v2})["a"], 101)
self.assertEqual(fn1({"a": v1, "b": v2})["b"], 201)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_listcomp(self):
def fn2(inputs):
return torch.sum(torch.cat([v + 1 for k, v in inputs.items()], 0))
v1 = torch.Tensor([100])
v2 = torch.Tensor([200])
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertEqual(fn2({"a": v1, "b": v2}), 302)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 4)
def test_is_floating_point(self):
def fn(a, b):
x = a + 1.0
if torch.is_floating_point(b):
x = x + b
return x + 2.0
return torchdynamo.testing.standard_test(self, fn=fn, nargs=2, expected_ops=3)
def test_is_floating_point2(self):
def fn(a, b):
x = a + 1.0
if b.is_floating_point():
x = x + b
return x + 2.0
return torchdynamo.testing.standard_test(self, fn=fn, nargs=2, expected_ops=3)
def test_is_tensor(self):
def fn(a, b):
x = a + 1.0
if torch.is_tensor(b):
x = x + b
return x + 2.0
return torchdynamo.testing.standard_test(self, fn=fn, nargs=2, expected_ops=3)
def test_numel(self):
def fn(a):
return a + a.numel() + torch.numel(a)
return torchdynamo.testing.standard_test(
self, fn=fn, nargs=1, expected_ops=2, expected_ops_dynamic=4
)
def test_pair(self):
def fn(a):
return (
torch.zeros(torch.nn.modules.utils._pair(a.size()))
+ a
+ torch.ones(torch.nn.modules.utils._ntuple(3)(3)).sum()
)
return torchdynamo.testing.standard_test(
self, fn=fn, nargs=1, expected_ops=5, expected_ops_dynamic=8
)
def test_tensor_item(self):
def fn(a, b):
return (a + b).sum().item()
v1 = torch.randn((10, 10))
v2 = torch.randn((10, 10))
correct = fn(v1, v2)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize((cnts)):
self.assertEqual(fn(v1, v2), correct)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_namedtuple1(self):
def fn(a, b):
tmp = mytuple(a, b, a + b)
return mytuple(tmp.a, tmp[1], tmp.ab + b)
v1 = torch.Tensor([10])
v2 = torch.Tensor([20])
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertEqual(fn(v1, v2).ab, 50)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_namedtuple2(self):
def fn(packed):
a, b, c = packed
if hasattr(packed, "b"):
b = packed.b + 1
c = packed[2]
return a + b + c
v1 = torch.Tensor([1])
v2 = torch.Tensor([2])
v3 = torch.Tensor([3])
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertEqual(fn(mytuple(v1, v2, v3))[0], 7)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 3)
def test_range_input(self):
def fn(a, rng):
x = a
for i in rng:
x = x + i
return x
return torchdynamo.testing.standard_test(
self, fn=functools.partial(fn, rng=range(3)), nargs=1, expected_ops=3
)
def test_no_grad(self):
def fn1(a, b):
x = a + 1
# redundant no_grad should get ignored
with torch.no_grad():
x = x + b
x = x + 2
return x
def fn2(a, b):
x = a + 1
with torch.set_grad_enabled(False):
x = x + b
x = x + 2
return x
def fn3(a, b):
x = a + 1
with torch.enable_grad():
x = x + b
x = x + 2
return x
def fn4(a, b):
x = a + 1
with torch.set_grad_enabled(True):
if torch.is_grad_enabled():
x = x + b
x = x + 2
return x
with torch.no_grad():
torchdynamo.testing.standard_test(self, fn=fn1, nargs=2, expected_ops=3)
torchdynamo.testing.standard_test(self, fn=fn2, nargs=2, expected_ops=3)
torchdynamo.testing.standard_test(self, fn=fn3, nargs=2, expected_ops=5)
torchdynamo.testing.standard_test(self, fn=fn4, nargs=2, expected_ops=5)
with torch.enable_grad():
torchdynamo.testing.standard_test(self, fn=fn1, nargs=2, expected_ops=5)
torchdynamo.testing.standard_test(self, fn=fn2, nargs=2, expected_ops=5)
torchdynamo.testing.standard_test(self, fn=fn3, nargs=2, expected_ops=3)
torchdynamo.testing.standard_test(self, fn=fn4, nargs=2, expected_ops=3)
def test_build_tuple_unpack(self):
def fn1(a, b, c):
return a - b / c
def fn2(a, b, c):
tmp1 = (a,)
tmp2 = (b, c)
args = (*tmp1, *tmp2)
return fn1(*args)
def fn3(a, *args):
return fn1(a, *args)
torchdynamo.testing.standard_test(self, fn=fn2, nargs=3, expected_ops=2)
torchdynamo.testing.standard_test(self, fn=fn3, nargs=3, expected_ops=2)
def test_list_mul(self):
def fn(count):
head_mask = count * [None] * count
return head_mask
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertEqual(fn(2), [None] * 4)
self.assertEqual(cnts.frame_count, 0)
self.assertEqual(cnts.op_count, 0)
def test_user_getattr1(self):
class MyConfig(dict):
def __getattr__(self, name):
return self[name]
def fn(cfg, x, y):
return x + y + cfg.offset
x = torch.randn(10)
cfg = MyConfig(offset=5)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertTrue(same(fn(cfg, x, x), 2 * x + 5))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_user_getattr2(self):
class MyConfig:
defined_on_class = 1
def __init__(self):
self.defined_on_object = 2
def __getattr__(self, name):
return 3
def fn(cfg, x):
return x + cfg.defined_on_class - cfg.defined_on_object + cfg.not_defined
x = torch.randn(10)
cfg = MyConfig()
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertTrue(same(fn(cfg, x), x + 1 - 2 + 3))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 3)
def test_user_property(self):
class MyConfig:
@property
def prop5(self):
return 5
def fn(cfg, x, y):
return x + y + cfg.prop5
x = torch.randn(10)
cfg = MyConfig()
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertTrue(same(fn(cfg, x, x), 2 * x + 5))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_dataclass_fields(self):
@dataclasses.dataclass
class MyDataClass:
a: torch.Tensor
b: torch.Tensor = None
c: torch.Tensor = None
d: torch.Tensor = None
e: torch.Tensor = None
def fn(obj):
class_fields = dataclasses.fields(obj)
assert len(class_fields)
assert all(field.default is None for field in class_fields[1:])
other_fields_are_none = all(
getattr(obj, field.name) is None for field in class_fields[1:]
)
assert not other_fields_are_none
total = getattr(obj, class_fields[0].name)
for field in class_fields[1:]:
v = getattr(obj, field.name)
if v is not None:
total += v
return total
obj1 = MyDataClass(torch.randn(10), torch.randn(10), torch.randn(10))
obj2 = MyDataClass(torch.randn(10), e=torch.randn(10))
correct1 = fn(obj1)
correct2 = fn(obj2)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertTrue(same(fn(obj1), correct1))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertTrue(same(fn(obj2), correct2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 1)
@requires_static_shapes
def test_tensor_build_list_unpack(self):
def fn(x):
# seen in fastNLP_Bert
return torch.cat([*x], dim=-1)
val = torch.randn([1, 1, 473, 768])
correct = fn(val)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertTrue(same(fn(val), correct))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_numpy_int_constant(self):
def fn(x, a, b):
return x + (a % b)
args = [torch.randn(10), 4096, np.int64(8)]
correct = fn(*args)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertTrue(same(fn(*args), correct))
self.assertTrue(same(fn(*args), correct))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_dict_mutation_side_effect(self):
def fn(d):
d["c"] = d["a"] + d.pop("b")
return d
args1 = {"a": torch.randn(10), "b": torch.randn(10)}
args2 = dict(args1)
assert fn(args1) is args1
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertIs(fn(args2), args2)
self.assertTrue(same(args1, args2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 1)
def test_module_deepcopy(self):
m1 = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
)
m2 = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
)
def fn(m, x):
m_copy = copy.deepcopy(m)
return m_copy(x)
v = torch.randn(10)
correct1 = fn(m1, v)
correct2 = fn(m2, v)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
for _ in range(10):
self.assertTrue(same(fn(m1, v), correct1))
with torchdynamo.optimize(cnts):
for _ in range(10):
self.assertTrue(same(fn(m2, v), correct2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 4)
def test_type_copy(self):
def fn(seq):
a, b = seq
return type(seq)([a + 1, b + 2, a + b])
args1 = [torch.randn(10), torch.randn(10)]
args2 = tuple([torch.randn(10), torch.randn(10)])
correct1 = fn(args1)
correct2 = fn(args2)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertTrue(same(fn(args1), correct1))
self.assertTrue(same(fn(args2), correct2))
self.assertIsInstance(fn(args1), list)
self.assertIsInstance(fn(args2), tuple)
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 6)
def test_setattr_mutation1(self):
class MyObj:
def __init__(self, a, b):
self.a = a
self.b = b
def fn(obj):
obj.c = obj.a * obj.b + 1
obj.b = obj.a * obj.c + 2
obj.a = obj.b * obj.c + 3
obj.c = obj.a * obj.b + 4
obj.b = obj.a * obj.c + 5
obj.a = obj.b * obj.c + 6
return obj
x1 = torch.randn(10)
x2 = torch.randn(10)
obj1 = MyObj(x1, x2)
obj2 = MyObj(x1, x2)
fn(obj2)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
self.assertIs(fn(obj1), obj1)
self.assertTrue(same(obj1.a, obj2.a))
self.assertTrue(same(obj1.b, obj2.b))
self.assertTrue(same(obj1.c, obj2.c))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 12)
def test_setattr_mutation2(self):
class MyObj:
def __init__(self, x):
self.a = x + 1
self.b = x + 2
def fn(x):
x = x / 3.0
obj = MyObj(x)
obj.c = obj.a * obj.b + 1
obj.b = obj.a * obj.c + 2
obj.a = obj.b * obj.c + 3
return obj
x1 = torch.randn(10)
obj2 = fn(x1)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
obj1 = fn(x1)
self.assertTrue(same(obj1.a, obj2.a))
self.assertTrue(same(obj1.b, obj2.b))
self.assertTrue(same(obj1.c, obj2.c))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 9)
def test_setattr_mutation3(self):
# TODO(jansel): dead code eliminate the object creation
class MyObj:
def __init__(self, x):
super().__init__()
self.a = x + 1
self.b = x + 2
def fn(x):
x = x / 3.0
obj = MyObj(x)
obj.c = obj.a * obj.b + 1
obj.b = obj.a * obj.c + 2
obj.a = obj.b * obj.c + 3
return obj.a, obj.b, obj.c
x1 = torch.randn(10)
obj2 = fn(x1)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
obj1 = fn(x1)
self.assertTrue(same(obj1, obj2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 9)
def test_user_defined_class_name(self):
class MyClassFoo:
pass
def fn1(a, b, c):
tmp = MyClassFoo()
if tmp.__class__.__name__ == "MyClassFoo":
return a - b / c
torchdynamo.testing.standard_test(self, fn=fn1, nargs=3)
def test_manual_seed(self):
def fn(a, b):
x = a + b
torch.manual_seed(9000)
return x + 1
torchdynamo.testing.standard_test(self, fn=fn, nargs=2, expected_ops=3)
def test_usr_cls_staticmethod(self):
class Foo:
@staticmethod
def bar(a, b):
return a + b
def fn(a, b):
return Foo.bar(a, b) - 1
torchdynamo.testing.standard_test(self, fn=fn, nargs=2)
def test_usr_cls_classmethod(self):
class Foo:
@classmethod
def bar(cls, a, b):
return a + b
def fn(a, b):
return Foo.bar(a, b) - 1
torchdynamo.testing.standard_test(self, fn=fn, nargs=2)
def test_dunder_methods(self):
class Foo:
def __init__(self, val):
super().__init__()
self.val = val
def __add__(self, other):
return Foo(self.val + other.val)
def __mul__(self, other):
return Foo(self.val * other.val)
def __truediv__(self, other):
return Foo(self.val / other.val)
def __sub__(self, other):
return Foo(self.val - other.val)
def fn(a, b, c):
return Foo(a) + Foo(b) * Foo(c) / Foo(a) - Foo(b)
torchdynamo.testing.standard_test(self, fn=fn, nargs=3, expected_ops=4)
def test_function_annotation(self):
class Variable:
pass
def fn(x):
x = x / 3.0
def inner(y: typing.List[Variable]):
return x + 1
return inner
x1 = torch.randn(10)
obj2 = fn(x1)([])
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize_assert(cnts):
obj1 = fn(x1)([])
self.assertTrue(same(obj1, obj2))
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 2)
def test_nested_closure(self):
v0 = torch.randn(10)
def fn1():
v1 = torch.randn(10)
def fn2(*args, **kwargs):
assert len(args) == 1
assert len(kwargs) == 1
v2 = torch.randn(10) + args[0] + kwargs["b"]
def fn3(v3=torch.randn(10)):
def fn4():
return v0 + v1 + v2 + v3 + 1
return fn4
return fn3
return fn2(1, b=2)()
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize_assert(cnts):
tmp1 = fn1()
tmp2 = fn1()
self.assertTrue(tmp1().shape, (10,))
self.assertTrue(same(tmp1(), tmp1()))
self.assertFalse(same(tmp1(), tmp2()))
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 9)
def test_nested_closure_mutation(self):
def fn1():
v1 = torch.randn(10)
def fn2():
v2 = torch.randn(10)
def fn3():
nonlocal v1, v2
v1 += 1
v2 += 2
return v1 + v2
return fn3
rv = fn2()
rv()
rv()
return rv
torch.manual_seed(9000)
counter1 = fn1()
result1 = [counter1(), counter1(), counter1()]
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize_assert(cnts):
torch.manual_seed(9000)
counter2 = fn1()
result2 = [counter2(), counter2(), counter2()]
result1.append(counter1())
result2.append(counter2())
self.assertTrue(same(result1, result2))
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 11)
def test_write_to_closures_in_inlining(self):
out = []
for use_dynamo in [False, True]:
def make_counter():
x = torch.randn(10)
def counter():
nonlocal x
x = x + 1
return x
return counter
torch.manual_seed(0)
counter = make_counter()
if not use_dynamo:
out.append(counter() + counter())
else:
cnts = torchdynamo.testing.CompileCounter()
@torchdynamo.optimize(cnts, nopython=True)
def fn(counter):
return counter() + counter()
out.append(fn(counter))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 3)
self.assertFalse(same(counter() + counter(), out[-1]))
self.assertTrue(same(out[0], out[1]))
def test_top_package_import(self):
def fn(x):
import torch.fx
assert not isinstance(x, torch.fx.Proxy)
return torch.sin(x)
x = torch.randn(4, 5)
ref = fn(x)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize_assert(cnts):
res = fn(x)
self.assertTrue(same(ref, res))
def test_nested_optimize_decorator(self):
cnts2 = torchdynamo.testing.CompileCounter()
cnts3 = torchdynamo.testing.CompileCounter()
@torchdynamo.run()
def fn1(x):
return torch.sin(x) * 10
@torchdynamo.optimize(cnts2, nopython=True)
def fn2(x):
return fn1(x) + 1
@torchdynamo.optimize(cnts3, nopython=True)
def fn3(x):
return torch.relu(fn2(x))
fn3(torch.randn(4, 5))
self.assertEqual(cnts2.frame_count, 0)
self.assertEqual(cnts3.frame_count, 1)
self.assertEqual(cnts3.op_count, 4)
def test_nested_disable_decorator(self):
cnts = torchdynamo.testing.CompileCounter()
@torchdynamo.disable()
def fn1(x):
return torch.sin(x) * 10
@torchdynamo.optimize(cnts)
def fn2(x):
x = x + 1
x = x + 1
x = fn1(x) # graph break
x = x + 1
x = x + 1
return x
@torchdynamo.optimize(cnts, nopython=True)
def fn3(x):
return fn2(x)
fn2(torch.randn(4, 5))
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 4)
try:
fn3(torch.randn(4, 5))
self.assertFalse(True)
except torchdynamo.exc.Unsupported as e:
self.assertIn("call torchdynamo.disable() wrapped function", str(e))
def test_torch_size(self):
cnts = torchdynamo.testing.CompileCounter()
def fn(x):
output_size = torch.Size([10, 10])
x = x.view(*output_size)
return (x,)
x = torch.randn(100, requires_grad=True)
x_clone = x.clone()
ref = fn(x)
with torchdynamo.optimize(cnts, nopython=True):
res = fn(x_clone)
self.assertTrue(same(ref, res))
def test_torch_seed(self):
cnts = torchdynamo.testing.CompileCounter()
def fn(x):
attention_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(attention_seed)
return (x,)
x = torch.randn(100, requires_grad=True)
ref = fn(x)
with torchdynamo.optimize(cnts, nopython=True):
res = fn(x)
self.assertTrue(same(ref, res))
def test_is_tensor_like(self):
cnts = torchdynamo.testing.CompileCounter()
def f(x):
if torch.overrides.is_tensor_like(x):
return (x * 2,)
return (torch.ones(10) + x,)
x = torch.randn(10)
ref0 = f(x)
ref1 = f(4)
with torchdynamo.optimize(cnts, nopython=True):
res0 = f(x)
res1 = f(4)
self.assertTrue(same(ref0, res0))
self.assertTrue(same(ref1, res1))
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_rand(self):
cnts = torchdynamo.testing.CompileCounter()
device = "cuda"
def fn():
return torch.randn(10, device=device)
torch.manual_seed(10)
ref_run1 = fn()
torch.manual_seed(10)
ref_run2 = fn()
self.assertTrue(same(ref_run1, ref_run2))
torch.manual_seed(10)
with torchdynamo.optimize(cnts, nopython=True):
res = fn()
self.assertTrue(same(res, ref_run1))
def test_slice_input(self):
cnts = torchdynamo.testing.CompileCounter()
def getitem(a, idx):
if isinstance(idx, slice):
return (
torch.zeros(1),
a[idx]
+ [
100,
],
)
else:
return (torch.zeros(1), a[idx])
layers = list(range(10))
ref0 = getitem(layers, slice(0, 2, 1))
ref1 = getitem(layers, 2)
ref2 = getitem(layers, slice(3, 8, 2))
with torchdynamo.optimize(cnts, nopython=True):
res0 = getitem(layers, slice(0, 2, 1))
res1 = getitem(layers, 2)
res2 = getitem(layers, slice(3, 8, 2))
self.assertTrue(ref0 == res0)
self.assertTrue(ref1 == res1)
self.assertTrue(ref2 == res2)
def test_grad(self):
cnts = torchdynamo.testing.CompileCounter()
def fn(a, b):
out = a * b
out.sum().backward()
real_out = torch.sigmoid(a.grad + b)
return real_out
inps = [torch.randn(4, requires_grad=True) for _ in range(2)]
for inp in inps:
inp.grad = None
ref = fn(*inps)
for inp in inps:
inp.grad = None
with torchdynamo.optimize(cnts):
res = fn(*inps)
self.assertTrue(same(ref, res))
@unittest.skipIf(sys.version_info < (3, 10), "use linetable when python >= 3.10")
def test_linetable_writer(self):
def fn():
a = 10
b = 20
c = a + b
f = "linetable_writer"
return f"Test if {f} generates correct co_linetable: {c}"
inst = dis.get_instructions(fn)
result = bytecode_transformation.assemble(inst, fn.__code__.co_firstlineno)
self.assertTrue(result[1] == fn.__code__.co_linetable)
@unittest.skipIf(sys.version_info >= (3, 10), "use lnotab when python < 3.10")
def test_lnotab_writer(self):
def fn():
a = 10
b = 20
c = a + b
f = "lnotab_writer"
return f"Test if {f} generates correct co_lnotab: {c}"
inst = dis.get_instructions(fn)
result = bytecode_transformation.assemble(inst, fn.__code__.co_firstlineno)
self.assertTrue(result[1] == fn.__code__.co_lnotab)
def test_python_slice(self):
def f1(input):
y = 0
for i, x in enumerate(input[2:], 1):
y = y + x
return y
def f2(input):
y = 0
for i, x in enumerate(input.shape[2:], 1):
y = y + x
return y
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
res1 = f1([1, 2, 3, 5])
res2 = f2(torch.rand([2, 3, 4, 5]))
self.assertEqual(res1, 8)
self.assertEqual(res2, 9)
def test_const_dict_variable_python_type(self):
from torchdynamo.variables import ConstDictVariable
d1 = {"a": 10, "b": 20}
d2 = collections.OrderedDict([("x", 12), ("y", 22)])
self.assertEqual(ConstDictVariable(d1, dict).python_type(), dict)
self.assertEqual(
ConstDictVariable(d2, collections.OrderedDict).python_type(),
collections.OrderedDict,
)
def test_builtin_subclasses_as_method_on_class_type(self):
class Foo:
def __init__(name):
self.ame_ = name
def get_name(self):
return "Foo " + self.name_
class Bar(Foo):
def __init__(name):
self.name_ = name
def get_name(self):
return "Bar " + self.name_
class Baz(Foo):
def __init__(name):
self.name_ = name
def get_name(self):
return "Baz " + self.name_
subs_of_foo_reg = Foo.__subclasses__()
counter = CompileCounter()
@torchdynamo.optimize_assert(counter)
def fn():
return Foo.__subclasses__()
subs_of_foo_optim = fn()
self.assertEqual(len(subs_of_foo_reg), 2)
self.assertEqual(subs_of_foo_reg, subs_of_foo_optim)
def test_builtin_subclasses_as_method_on_var(self):
class Foo:
def __init__(name):
self.name_ = name
def get_name(self):
return "Foo " + self.name_
class Bar(Foo):
def __init__(name):
self.name_ = name
def get_name(self):
return "Bar " + self.name_
class Baz(Bar):
def __init__(name):
self.name_ = name
def get_name(self):
return "Baz " + self.name_
subs_of_foo_reg = Foo.__subclasses__()
sub_of_foo_subclass_var_reg = subs_of_foo_reg[0].__subclasses__()
sub_of_foo_subclass_var_optim = list()
counter = CompileCounter()
@torchdynamo.optimize_assert(counter)
def fn():
return Foo.__subclasses__()
@torchdynamo.optimize_assert(counter)
def fn_single(subs_of_foo_optim):
return subs_of_foo_optim[0].__subclasses__()
subs_of_foo_optim = fn()
sub_of_foo_subclass_var_optim = fn_single(subs_of_foo_optim)
self.assertEqual(len(sub_of_foo_subclass_var_optim), 1)
self.assertEqual(sub_of_foo_subclass_var_optim, sub_of_foo_subclass_var_reg)
def test_enum_no_graphbreaks(self):
class Foo(enum.Enum):
FOO = 0
BAR = 1
def fn(x, foo):
if foo is Foo.FOO:
x = torch.add(x, 1.0)
x = torch.mul(x, 1.0)
return x
x = torch.randn(1)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts, nopython=True):
fn(x, Foo.FOO)
self.assertEqual(cnts.op_count, 2)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts, nopython=True):
fn(x, Foo.BAR)
self.assertEqual(cnts.op_count, 1)
def test_id_of_nn_module(self):
class M(torch.nn.Module):
def forward(self, x, ref_id):
self_id = id(self)
if self_id == ref_id:
x = torch.mul(x, 1.0)
x = torch.add(x, 1.0)
return x
m = M().eval()
data = torch.randn(1)
cnts = torchdynamo.testing.CompileCounter()
correct_ref_id = id(m)
with torchdynamo.optimize(cnts, nopython=True):
m(data, correct_ref_id)
self.assertEqual(cnts.op_count, 2)
cnts = torchdynamo.testing.CompileCounter()
incorrect_ref_id = id(m) + 1
with torchdynamo.optimize(cnts, nopython=True):
m(data, incorrect_ref_id)
self.assertEqual(cnts.op_count, 1)
def test_inline_func_jump_on_tensor_condition(self):
def f1(input):
if input == 0:
return input + 1
else:
return input + 2
def f2(input):
return f1(input)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
res1 = f2(torch.tensor([1.0]))
res2 = f2(torch.tensor([0.0]))
self.assertEqual(res1, 3)
self.assertEqual(res2, 1)
def test_frozenset_torch_func_contains(self):
funcs = frozenset([torch.add])
def fn(x, func):
if func in funcs:
x = torch.add(x, 1.0)
x = torch.mul(x, 1.0)
return x
x = torch.randn(1)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts, nopython=True):
fn(x, torch.add)
self.assertEqual(cnts.op_count, 2)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts, nopython=True):
fn(x, torch.mul)
self.assertEqual(cnts.op_count, 1)
def test_inline_list_mutation(self):
def f1(x):
x.append(torch.ones(8))
return x
def f2():
x = [torch.ones(6)]
f1(x)
return x
res1 = f2()
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
res2 = f2()
self.assertTrue(same(res1, res2))
def test_inline_dict_mutation(self):
def f1(d):
d["c"] = d["a"] + d.pop("b")
return d
def f2():
d = {"a": torch.ones(5), "b": torch.ones(5)}
f1(d)
return d
res1 = f2()
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
res2 = f2()
self.assertTrue(same(res1, res2))
def test_recursive_inline_list_mutation(self):
def f1(x, y):
x.append(torch.tensor([1.1]))
y.append(torch.tensor([1.2]))
return x, y
def f2(x, y):
x.append(torch.tensor([2.1]))
y.append(torch.tensor([2.2]))
f1(x, y)
return x, y
def f3(x):
x.append(torch.tensor([3.1]))
y = [torch.tensor([3.2])]
f2(x, y)
return x, y
def f4():
x = [torch.tensor([4.1])]
return f3(x)
res1 = f4()
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
res2 = f4()
self.assertTrue(same(res1, res2))
def test_disallow_in_graph(self):
cnts = torchdynamo.testing.CompileCounter()
@torchdynamo.optimize(cnts)
def fn(a):
x = torch.add(a, 1)
x = torch.add(x, 1)
x = torch.sub(x, 1)
x = torch.add(x, 1)
x = torch.add(x, 1)
return x
torchdynamo.disallow_in_graph(torch.sub)
fn(torch.randn(10))
torchdynamo.allow_in_graph(torch.sub)
# check for graph break on sub
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 4)
def test_allow_in_graph(self):
cnts = torchdynamo.testing.CompileCounter()
@torchdynamo.optimize(cnts)
def fn(a):
x = torch.add(a, 1)
x = torch.add(x, 1)
x = my_custom_function(x)
x = torch.add(x, 1)
x = torch.add(x, 1)
return x
torchdynamo.allow_in_graph(my_custom_function)
fn(torch.randn(10))
torchdynamo.disallow_in_graph(my_custom_function)
# check for no graph break
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 5)
def test_sample_input(self):
from torch.testing._internal.common_methods_invocations import SampleInput
def fn(sample):
if isinstance(sample.input, torch.Tensor):
return sample.input * 2
return torch.zeros(())
sample = SampleInput(torch.ones(2))
ref = fn(sample)
with torchdynamo.optimize("eager"):
res = fn(sample)
self.assertTrue(same(ref, res))
def test_update_locals_and_stack_uses_shared_cache(self):
def fn(x):
perm = [0, 3, 5]
perm = [i for i in range(min(perm))] + perm
perm.extend(i for i in range(x.dim()) if i not in perm)
return perm
x = torch.rand([2, 2, 2, 2, 2, 2])
res1 = fn(x)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
res2 = fn(x)
self.assertTrue(same(res1, res2))
def test_dict_reconstruct_keeps_original_order(self):
def fn():
modules = collections.OrderedDict([("act", torch.nn.ReLU())])
module_dict = torch.nn.ModuleDict(modules)
next_modules = {"fc4": torch.nn.Linear(5, 6), "act3": torch.nn.Sigmoid()}
modules.update(next_modules.items())
module_dict.update(next_modules)
return modules, module_dict
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
modules, module_dict = fn()
self.assertEqual(len(module_dict), len(modules))
for k1, m2 in zip(modules, module_dict.children()):
self.assertTrue(modules[k1] is m2)
def test_unspecialized_primitive_variable(self):
# correctness check
def fn(x, y, z):
xy = [x + y, y, False]
np_x = x.numpy()
np_y = y.numpy()
return {
"x": x,
"z": z,
"a": np_y.sum(),
"b": xy,
"c": np_y[0][0] / 68,
"d": np_x.sum(),
}
x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64)
y = torch.ones([2, 2], dtype=torch.int64)
z = np.int64(12)
res1 = fn(x, y, z)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
res2 = fn(x, y, z)
self.assertTrue(same(res1, res2))
def test_unspecialized_primitive_variable2(self):
# no recompilations if passing on different numpy int values
def fn(x, y):
return {"a": x + 1, "b": y / 2}
x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64)
cnts = torchdynamo.testing.CompileCounter()
with torchdynamo.optimize(cnts):
for i in range(10):
fn(x, np.int64(i))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
| 2.4375 | 2 |
luft/common/s3_utils.py | profesia/luft | 1 | 12770330 | # -*- coding: utf-8 -*-
"""S3 utils."""
import gzip
from typing import Optional
import boto3
def get_s3(aws_access_key, aws_secret_access_key):
"""Get S3 connections."""
s3 = boto3.client('s3', aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key)
return s3
def get_s3_resource(aws_access_key, aws_secret_access_key):
"""Get S3 resource."""
s3_resource = boto3.resource('s3', aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key)
return s3_resource
def write_s3(env: str, source_system: str, source_subsystem: str, object_name: str, s3,
s3_bucket: str, content, date_valid: str, page: int = 1,
extension: str = 'json', compress: bool = True, s3_path: Optional[str] = None):
"""Write to S3."""
s3_path = s3_path or ('{env}/{source_system}/{source_subsystem}/'
'{object_name}{date_valid}/data-{page}.{extension}')
if compress:
content = gzip.compress(content.encode('utf-8'))
extension = extension + '.gz'
s3_key = s3_path.format(
env=env,
source_system=source_system,
source_subsystem=source_subsystem,
object_name=object_name,
date_valid='/{0}'.format(date_valid) if date_valid else '',
page=page,
extension=extension
)
print('Writing to {}'.format(s3_key))
s3.put_object(Body=content, Bucket=s3_bucket, Key=s3_key)
| 2.53125 | 3 |
extra_tests/test_weakref.py | olliemath/pypy | 1 | 12770331 | <filename>extra_tests/test_weakref.py
import sys
import textwrap
import subprocess
def test_WeakValueDictionary_len(tmpdir):
src = textwrap.dedent("""
from weakref import WeakValueDictionary
class Foo:
pass
N = 1000
D = WeakValueDictionary()
for i in range(N):
D[i] = Foo()
for i in range(10):
x = len(D)
print('OK')
""")
testfile = tmpdir.join('testfile.py')
testfile.write(src)
#
# by setting a very small PYPY_GC_NURSERY value, we force running a minor
# collection inside WeakValueDictionary.__len__. We just check that the
# snippet above completes correctly, instead of raising "dictionary
# changed size during iteration"
env = {'PYPY_GC_NURSERY': '1k'}
subprocess.run([sys.executable, str(testfile)], env=env, check=True)
def test_WeakKeyDictionary_len(tmpdir):
src = textwrap.dedent("""
from weakref import WeakKeyDictionary
class Foo:
pass
N = 1000
D = WeakKeyDictionary()
for i in range(N):
D[Foo()] = i
for i in range(10):
x = len(D)
print('OK')
""")
testfile = tmpdir.join('testfile.py')
testfile.write(src)
#
# by setting a very small PYPY_GC_NURSERY value, we force running a minor
# collection inside WeakValueDictionary.__len__. We just check that the
# snippet above completes correctly, instead of raising "dictionary
# changed size during iteration"
env = {'PYPY_GC_NURSERY': '1k'}
subprocess.run([sys.executable, str(testfile)], env=env, check=True)
| 2.546875 | 3 |
src/tinder/v2/__init__.py | maxime-peim/tinder-api | 0 | 12770332 | <filename>src/tinder/v2/__init__.py
import tinder
class V2Exception(tinder.APIException):
pass
URL = tinder.URL / 'v2'
BUCKET_EP = URL / 'buckets'
RECS_EP = URL / 'user' / 'recs'
MATCHES = URL / 'matches' | 2.140625 | 2 |
projects/tests/test_example.py | csm-adapt/karon | 1 | 12770333 | <gh_stars>1-10
import pytest
import numpy as np
import pandas as pd
from karon.tree import NLRTree
from ..example import Example
@pytest.fixture
def structured():
return {
'filename': '../../tests/data/example.xlsx',
'aggregate': 'data/example-aggregate.xlsx',
'propagate': 'data/example-propagate.xlsx',
'names': [
"e4f6efde-abdb-4a02-a25a-3c3859857aee",
"e8a80a70-18cc-44b9-8668-4f479918f13a",
"398a8f61-9707-45d8-802d-84bd11179a56",
"e0276301-425d-4d14-9bb4-05c6e3414772",
"2983b9dd-227a-4136-83c6-473e2deac44d",
"85bf6cf2-22ac-49e8-862d-f192781f73a3",
"5b91f720-6a2f-456b-a14f-bb961d1f80dd",
"94073a4f-cd2f-46dc-95c0-37d60b32e9ad",
"7bc7c8e8-dcc6-4317-8518-d600f26573ed",
"e0b7b18c-d025-4beb-856a-7a0f054c9ea2",
"c1e2c204-62f3-4ed6-9226-35747a43fb9c",
"b397eac8-87a0-47c4-b5ab-f9514bf50bfa",
"5022fa10-98d1-4dc0-b35e-4e800ab3cce6",
"d9e2da61-80dc-9521-2c6b-9e31c4999d81",
"3036de92-0d52-4f51-a0c0-422f5ad3d8db",
"d887d4d2-d9ab-4673-8d69-d0daf8af8551",
"11fe2186-ba63-4b69-8735-25fee5cda5d4",
"3a50216b-50b3-4212-b7fe-4bd96605556f",
"c37290e2-1f8e-8820-8ef5-9adc49859d44",
"e6b3b1c3-ad38-4fc1-82cf-7e301a4aba90",
"e224bfca-f917-40de-ad6d-12b70e21b456",
"f29ebef4-7f77-4048-a1e3-2af0ca3f046f"
],
"NLR trees": [
("e4f6efde-abdb-4a02-a25a-3c3859857aee",
"5022fa10-98d1-4dc0-b35e-4e800ab3cce6",
"d9e2da61-80dc-9521-2c6b-9e31c4999d81",
"3036de92-0d52-4f51-a0c0-422f5ad3d8db",
"d887d4d2-d9ab-4673-8d69-d0daf8af8551"),
("e8a80a70-18cc-44b9-8668-4f479918f13a",
"f29ebef4-7f77-4048-a1e3-2af0ca3f046f",
"11fe2186-ba63-4b69-8735-25fee5cda5d4",
"3a50216b-50b3-4212-b7fe-4bd96605556f",
"c37290e2-1f8e-8820-8ef5-9adc49859d44",
"e6b3b1c3-ad38-4fc1-82cf-7e301a4aba90"),
("398a8f61-9707-45d8-802d-84bd11179a56",
"e224bfca-f917-40de-ad6d-12b70e21b456",
"7bc7c8e8-dcc6-4317-8518-d600f26573ed",
"e0b7b18c-d025-4beb-856a-7a0f054c9ea2",
"c1e2c204-62f3-4ed6-9226-35747a43fb9c",
"b397eac8-87a0-47c4-b5ab-f9514bf50bfa"),
("e0276301-425d-4d14-9bb4-05c6e3414772",
"2983b9dd-227a-4136-83c6-473e2deac44d",
"85bf6cf2-22ac-49e8-862d-f192781f73a3",
"5b91f720-6a2f-456b-a14f-bb961d1f80dd",
"94073a4f-cd2f-46dc-95c0-37d60b32e9ad"),
]
}
@pytest.fixture
def condensed():
rval = Example("name", "parent name")
rval.read("./data/example-node-properties.xlsx")
return rval
def as_dictionary(root, uid_key):
return {n.contents[uid_key]: n.contents for n in NLRTree(root)}
def equal(A, B):
def equal_general(lhs, rhs):
try:
lhsNan = ((lhs in (None, '')) or np.isnan(lhs))
except:
lhsNan = False
try:
rhsNan = ((rhs in (None, '')) or np.isnan(rhs))
except:
rhsNan = False
if lhsNan and rhsNan:
return True
try:
return np.isclose(float(A), float(B))
except:
return A == B
def equal_examples(lhs, rhs):
left = {node.contents['name']: node.contents
for root in lhs.roots for node in NLRTree(root)}
right = {node.contents['name']: node.contents
for root in rhs.roots for node in NLRTree(root)}
# ensure both examples have an equivalent set of keys
keys = tuple(
set([k for lmap in left.values() for k in lmap.keys()])
.union(
[k for rmap in right.values() for k in rmap.keys()]))
for contents in left.values():
for k in keys:
contents[k] = contents.get(k, None)
for contents in right.values():
for k in keys:
contents[k] = contents.get(k, None)
return equal(left, right)
def equal_iter(lhs, rhs):
if len(lhs) != len(rhs):
return False
return all([equal(x, y) for x,y in zip(lhs, rhs)])
def equal_dict(lhs, rhs):
# same set of keys?
# These are equal only if both are empty, that is, they are the same.
if set(lhs.keys()) != set(rhs.keys()):
return False
try:
return all([equal(lhs[k], rhs[k]) for k in lhs.keys()])
except Exception as e:
return False
# Examples
if isinstance(A, Example):
if isinstance(B, Example):
return equal_examples(A, B)
else:
return False
# iterables (not strings)
if isinstance(A, (list, tuple)):
if isinstance(B, (list, tuple)):
return equal_iter(A, B)
else:
return False
# dictionaries
if isinstance(A, dict):
if isinstance(B, dict):
return equal_dict(A, B)
else:
return False
# anything else
else:
return equal_general(A, B)
def test_equal(structured):
filename = structured['filename']
samples = Example('name', 'parent name')
samples.read(filename)
# check general
assert equal(1.234, 1.234000001)
assert not equal(1.234, 1.324)
assert equal(None, None)
assert equal(float('nan'), float('nan'))
assert equal(None, float('nan'))
# check lists
assert equal(list(range(4)), list(range(4)))
assert not equal(list(range(4)), list(range(5)))
# check dict
lhs = {
'a': 1,
'b': 2,
'c': 3
}
rhs = {
'a': 1,
'b': 2,
'c': 3
}
assert equal(lhs, rhs)
lhs['c'] = {
'A': 100,
'B': 200,
'C': 300
}
assert not equal(lhs, rhs)
rhs['c'] = {
'A': 100,
'B': 200,
'C': 300
}
assert equal(lhs, rhs)
# samples
assert equal(samples, samples)
def test_Example():
example = Example("name", "parent name")
assert example._uid_key == "name"
assert example._parent_key == "parent name"
assert example._filenames == []
assert example._nodes == {}
assert example.roots == []
def test_filetype():
assert Example.filetype("foo.xls") == "excel"
assert Example.filetype("foo.XLS") == "excel"
assert Example.filetype("foo.xlsx") == "excel"
assert Example.filetype("foo.XLSX") == "excel"
def test_read(structured):
example = Example('name', 'parent name')
filename = structured['filename']
nodes = structured['names']
trees = structured['NLR trees']
example.read(filename)
# check filename was stored properly
diff = set(example._filenames) - set([filename])
assert len(diff) == 0, \
"Filename(s) were not stored as expected."
# check that all samples were read properly
diff = set(example._nodes.keys()) - set(nodes)
assert len(diff) == 0, \
f"Samples not ready as expected: {diff}"
# check that the expected structure was recovered
names = [tuple(node.contents['name'] for node in NLRTree(root))
for root in example.roots]
diff = set(names) - set(trees)
assert len(diff) == 0, \
f"Expected structure was not recovered: {diff}"
def strdict(d, level=0):
def truncated_obj(obj):
el = str(obj)
if len(el) > 15:
el = el[:6] + '...' + el[-6:]
return el
rval = ''
for k,v in iter(d.items()):
rval += level*' ' + str(k) + ': '
if isinstance(v, dict):
rval += '\n' + strdict(v, level=level+1)
else:
try:
if isinstance(v, str):
raise Exception()
el = [truncated_obj(x) for x in v]
el = str(el)
except:
el = truncated_obj(v)
rval += el + '\n'
return rval
def joindicts(a, b):
keys = set(a.keys()).union(b.keys())
rval = {}
for k in keys:
aval = a.get(k, None)
bval = b.get(k, None)
if (isinstance(aval, dict) and
isinstance(bval, dict)):
val = joindicts(aval, bval)
else:
val = (aval, bval)
rval[k] = val
return rval
def write(filename, example, sheetname='Sheet1'):
rval = {}
# get a complete set of keys
keys = set()
for root in example.roots:
asdict = as_dictionary(root, 'name')
for entry in asdict.values():
keys = keys.union(entry.keys())
for root in example.roots:
asdict = as_dictionary(root, 'name')
# populate each cell. Empty string if N/A
for entry in asdict.values():
for key in keys:
v = entry.get(key, '')
rval[key] = rval.get(key, []) + [v]
df = pd.DataFrame(rval)
df.set_index('name', inplace=True)
df.sort_index(inplace=True)
df.to_excel(filename, sheetname)
def save_and_check(actual, expected):
matches = equal(expected, actual)
if not matches:
amap = {node.contents['name']: node.contents
for root in actual.roots for node in NLRTree(root)}
emap = {node.contents['name']: node.contents
for root in expected.roots for node in NLRTree(root)}
names = tuple(
set(amap.keys()).union(emap.keys()))
keys = tuple(
set(k for m in amap.values() for k in m.keys())
.union([k for m in emap.values() for k in m.keys()]))
msg = '\n'.join(map(str, [(name, key,
amap.get(name, {}).get(key, None),
emap.get(name, {}).get(key, None))
for name in names for key in keys
if not equal(amap.get(name, {}).get(key, None),
emap.get(name, {}).get(key, None))]))
fname = '/Users/bkappes/Desktop/compare.xlsx'
writer = pd.ExcelWriter(fname)
write(writer, expected, 'expected')
write(writer, actual, 'actual')
writer.save()
# assert matches, f'Difference saved to "{fname}"'
assert matches, msg
def test_propagate(structured):
filename = structured['filename']
expected = Example('name', 'parent name')
expected.read(structured['propagate'])
actual = Example('name', 'parent name')
actual.read(filename)
actual.propagate()
save_and_check(actual, expected)
def test_aggregate(structured):
filename = structured['filename']
expected = Example('name', 'parent name')
expected.read(structured['aggregate'])
actual = Example('name', 'parent name')
actual.read(filename)
actual.aggregate(reduce=Example.mean)
save_and_check(actual, expected)
# def test_aggregate_and_reduce(condensed, structured):
# filename = structured['filename']
# expected = condensed
# # # test inherit-reduce
# # actual = Example("name", "parent name")
# # actual.read(filename)
# # actual.propagate()
# # actual.aggregate(reduce=Example.mean)
# # save_and_check(actual, expected)
# #assert equal(expected, actual), msg
# # test reduce-inherit
# actual = Example("name", "parent name")
# actual.read(filename)
# actual.aggregate(reduce=Example.mean)
# actual.propagate()
# save_and_check(actual, expected)
# # assert equal(expected, actual), \
# # "Reduce-Inherit does not match expected results."
# # # test inherit-reduce-inherit
# # actual = Example("name", "parent name")
# # actual.read(filename)
# # actual.propagate()
# # actual.aggregate(reduce=Example.mean)
# # actual.propagate()
# # save_and_check(actual, expected)
# # # assert equal(expected, actual), \
# # # "Inherit-Reduce-Inherit does not match expected results."
| 2.0625 | 2 |
vendoasg.py | asgard-sp-z-o-o/vendoasg | 0 | 12770334 | #! python
#-*- coding: utf-8 -*-
import requests
import json
class Vendo:
def __init__(self, url_api):
self.setHeader({'Content-Type' : 'application/json', "Content-Length" : "length"})
self.setApi(url_api)
def setApi(self,api_url):
self.API_URL = api_url
def setHeader(self, api_header):
self.API_HEADER = api_header
def getJson(self,request_url, request_data):
req_url = self.API_URL + request_url
json_data = requests.post(req_url, json=request_data, headers=self.API_HEADER)
return json_data.json()
def logInApi(self, api_login, api_pswd):
jsonData = self.getJson(
"/json/reply/Autoryzacja_Zaloguj",
{"Model":{"Login":api_login,"Haslo":api_pswd}})
self.VENDO_TOKEN = jsonData["Wynik"]["Token"]
def logOutApi(self):
jsonData = self.getJson(
"/json/reply/Autoryzacja_Wyloguj",
{"Token":self.VENDO_TOKEN})
def loginUser(self,user_login, user_pswd):
jsonData = self.getJson(
"/json/reply/Autoryzacja_ZalogujUzytkownikaVendo",
{"Token":self.VENDO_TOKEN,"Model":{"Login":user_login,"Haslo":user_pswd}})
self.USER_TOKEN = jsonData["Wynik"]["Token"]
def logOutUser(self):
jsonData = self.getJson(
"/json/reply/WylogujUzytkownikaVendo",
{"Token": self.USER_TOKEN}) | 2.390625 | 2 |
landsat_cloudScore.py | sig-gis/Ecuador_SEPAL | 13 | 12770335 | <reponame>sig-gis/Ecuador_SEPAL
# Sentinel-2 package
from paramsTemplate import *
import ee
from Py6S import *
import math
import datetime
import os, sys
from utils import *
import sun_angles
import view_angles
import time
class env(object):
def __init__(self):
"""Initialize the environment."""
# Initialize the Earth Engine object, using the authentication credentials.
ee.Initialize()
self.epsg = "EPSG:32717"
##########################################
# variable for the landsat data request #
##########################################
self.metadataCloudCoverMax = 100;
##########################################
# Export variables #
##########################################
self.assetId ="projects/Sacha/PreprocessedData/L8_Annual_CloudScore/"
self.name = "LS_CS_"
self.exportScale = 20
self.cloudScoreThresh = 1;
##########################################
# variable band selection #
##########################################
self.percentiles = [25,75]
self.divideBands = ee.List(['blue','green','red','nir','swir1','swir2'])
self.bandNamesLandsat = ee.List(['blue','green','red','nir','swir1','thermal','swir2','sr_atmos_opacity','pixel_qa','radsat_qa'])
self.sensorBandDictLandsatSR = ee.Dictionary({'L8' : ee.List([1,2,3,4,5,7,6,9,10,11]),\
'L7' : ee.List([0,1,2,3,4,5,6,7,9,10]),\
'L5' : ee.List([0,1,2,3,4,5,6,7,9,10]),\
'L4' : ee.List([0,1,2,3,4,5,6,7,9,10])})
##########################################
# enable / disable modules #
##########################################
self.cloudMask = True
class functions():
def __init__(self):
"""Initialize the Surfrace Reflectance app."""
# get the environment
self.env = env()
def main(self,studyArea,startDate,endDate,startDay,endDay,week,regionName):
self.env.startDate = startDate
self.env.endDate = endDate
self.env.startDoy = startDay
self.env.endDoy = endDay
self.env.regionName = regionName
self.studyArea = studyArea
landsat8 = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR').filterDate(self.env.startDate,self.env.endDate).filterBounds(studyArea)
landsat8 = landsat8.filterMetadata('CLOUD_COVER','less_than',self.env.metadataCloudCoverMax)
landsat8 = landsat8.select(self.env.sensorBandDictLandsatSR.get('L8'),self.env.bandNamesLandsat)
landsat5 = ee.ImageCollection('LANDSAT/LT05/C01/T1_SR').filterDate(self.env.startDate,self.env.endDate).filterBounds(studyArea)
landsat5 = landsat5.filterMetadata('CLOUD_COVER','less_than',self.env.metadataCloudCoverMax)
landsat5 = landsat5.select(self.env.sensorBandDictLandsatSR.get('L5'),self.env.bandNamesLandsat).map(self.defringe)
landsat7 = ee.ImageCollection('LANDSAT/LE07/C01/T1_SR').filterDate(self.env.startDate,self.env.endDate).filterBounds(studyArea)
landsat7 = landsat7.filterMetadata('CLOUD_COVER','less_than',self.env.metadataCloudCoverMax)
landsat7 = landsat7.select(self.env.sensorBandDictLandsatSR.get('L7'),self.env.bandNamesLandsat)
landsat = landsat5.merge(landsat7).merge(landsat8)
if landsat.size().getInfo() > 0:
landsat = landsat.map(self.scaleLandsat)
# mask clouds using cloud mask function
if self.env.cloudMask == True:
#print "removing some more clouds"
landsat = landsat.map(self.maskClouds)
landsat = landsat.select(['cloudScore','pixel_qa'])
landsat = self.percentile(landsat,self.env.percentiles)
landsat = landsat.set('system:time_start',ee.Date(self.env.startDate).millis())
self.exportMap(landsat,studyArea,week)
print(landsat.getInfo())
return landsat
def scaleLandsat(self,img):
"""Landast is scaled by factor 0.0001 """
thermal = img.select(ee.List(['thermal'])).multiply(0.1)
scaled = ee.Image(img).select(self.env.divideBands).multiply(ee.Number(0.0001))
return img.select(['pixel_qa']).addBands(scaled).addBands(thermal)
def maskClouds(self,img):
"""
Computes spectral indices of cloudyness and take the minimum of them.
Each spectral index is fairly lenient because the group minimum
is a somewhat stringent comparison policy. side note -> this seems like a job for machine learning :)
originally written by <NAME> for Landsat imageryadapted to Sentinel by <NAME> and <NAME>
"""
score = ee.Image(1.0);
# Clouds are reasonably bright in the blue band.
blue_rescale = img.select('blue').subtract(ee.Number(0.1)).divide(ee.Number(0.3).subtract(ee.Number(0.1)))
score = score.min(blue_rescale);
# Clouds are reasonably bright in all visible bands.
visible = img.select('red').add(img.select('green')).add(img.select('blue'))
visible_rescale = visible.subtract(ee.Number(0.2)).divide(ee.Number(0.8).subtract(ee.Number(0.2)))
score = score.min(visible_rescale);
# Clouds are reasonably bright in all infrared bands.
infrared = img.select('nir').add(img.select('swir1')).add(img.select('swir2'))
infrared_rescale = infrared.subtract(ee.Number(0.3)).divide(ee.Number(0.8).subtract(ee.Number(0.3)))
score = score.min(infrared_rescale);
# Clouds are reasonably cool in temperature.
temp_rescale = img.select('thermal').subtract(ee.Number(300)).divide(ee.Number(290).subtract(ee.Number(300)))
score = score.min(temp_rescale);
# However, clouds are not snow.
ndsi = img.normalizedDifference(['green', 'swir1']);
ndsi_rescale = ndsi.subtract(ee.Number(0.8)).divide(ee.Number(0.6).subtract(ee.Number(0.8)))
score = score.min(ndsi_rescale).multiply(100).byte().rename(['cloudScore']);
mask = score.lt(self.env.cloudScoreThresh).rename(['cloudMask']);
img = img.updateMask(mask).addBands([mask]).addBands([score]);
return img;
def defringe(self,img):
# threshold for defringing landsat5 and 7
fringeCountThreshold = 279
k = ee.Kernel.fixed(41, 41,
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]);
m = ee.Image(img).mask().reduce(ee.Reducer.min())
sum = m.reduceNeighborhood(ee.Reducer.sum(), k, 'kernel')
mask = sum.gte(fringeCountThreshold)
return img.updateMask(mask)
def percentile(self,collection,p):
median = ee.ImageCollection(collection).reduce(ee.Reducer.median()).rename(['cloudScore','pixel_qa']);
percentiles = collection.reduce(ee.Reducer.percentile(p))
return median.addBands(percentiles)
def exportMap(self,img,studyArea,week):
geom = studyArea.getInfo();
sd = str(self.env.startDate.getRelative('day','year').getInfo()).zfill(3);
ed = str(self.env.endDate.getRelative('day','year').getInfo()).zfill(3);
year = str(self.env.startDate.get('year').getInfo());
regionName = self.env.regionName.replace(" ",'_') + "_"
task_ordered= ee.batch.Export.image.toAsset(image=img,
description = self.env.name + regionName + str(week).zfill(3) +'_'+ year + sd + ed,
assetId= self.env.assetId + self.env.name + regionName + str(week).zfill(3)+'_'+ year + sd + ed,
region=geom['coordinates'],
maxPixels=1e13,
crs=self.env.epsg,
scale=self.env.exportScale)
task_ordered.start()
print(self.env.assetId + self.env.name + regionName + str(week).zfill(3)+'_'+ year + sd + ed)
if __name__ == "__main__":
ee.Initialize()
start = 0
for i in range(0,2,1):
#2018 starts at week 104
runNumber = start+ i
print runNumber
year = ee.Date("2009-01-01")
startDay = 0
endDay = 364
startDate = year.advance(startDay,'day').advance(i,'year')
endDate = year.advance(endDay,'day').advance(i,'year')
regionName = 'ECUADOR'
studyArea = ee.FeatureCollection("projects/Sacha/AncillaryData/StudyRegions/Ecuador_EcoRegions_Complete")
studyArea = studyArea.geometry().bounds()
functions().main(studyArea,startDate,endDate,startDay,endDay,runNumber,regionName)
| 2.234375 | 2 |
lygos/plot_tess_psfn.py | tdaylan/pandora | 2 | 12770336 | from util import *
import h5py
pathdata = os.environ['TCAT_DATA_PATH'] + '/tesspsfn/'
pathtemp = pathdata + 'temp.txt'
indxcams = range(1, 5)
indxccds = range(1, 5)
indxrows = [1, 513, 1025, 1536, 2048]
indxcols = [45, 557, 1069, 1580, 2092]
#boolplotflgt = False
boolplotflgt = True
pathsave = pathdata + 'listpsfn.h5'
if os.path.exists(pathsave):
print 'Reading from %s...' % pathsave
objth5py = h5py.File(pathsave, 'r')
listpsfn = objth5py.get('listpsfn')
listpsfn = np.array(listpsfn)
objth5py.close()
else:
listpsfn = np.empty((4, 4, 5, 5, 117, 117))
for a in indxcams:
for b in indxccds:
pathsubs = pathdata + 'tess_prf-master/cam%d_ccd%d/' % (a, b)
for k in range(len(indxrows)):
for l in range(len(indxcols)):
if not os.path.exists(pathsave):
listpsfn[a-1, b-1, k, l, :, :] = read_psfntess(a, b, indxrows[k], indxcols[l])
if boolplotflgt:
figr, axis = plt.subplots(figsize=(12, 12))
axis.set_ylabel('$y$')
axis.set_xlabel('$x$')
axis.set_title('Camera %d, CCD %d, Row %d, Column %d' % (a, b, indxrows[k], indxcols[l]))
plt.imshow(listpsfn[a-1, b-1, k, l, :, :], cmap='Greys_r', interpolation='none')
plt.tight_layout()
pathimag = pathdata + 'psfn_%d%d%d%d.pdf' % (a, b, k, l)
print 'Writing to %s...' % pathimag
print
plt.savefig(pathimag)
plt.close()
if not os.path.exists(pathsave):
print 'Writing to %s...' % pathsave
objth5py = h5py.File(pathsave, 'w')
objth5py.create_dataset('listpsfn', data=listpsfn)
objth5py.close()
#listline = open(pathdata + 'tesspsfn.txt', 'r')
##id|resolution|camera_id|ccd_id|stellar_type|stellar_temp|position|angle
#listpsid = []
#for k, line in enumerate(listline):
# if k < 30:
# print 'k'
# print k
# print 'line'
# print line
# print
# if k < 2:
# continue
#
# listcols = line.split('|')
# listpsid.append(listcols[0])
#
#listpsid = np.array(listpsid).astype(int)
#listline.close()
#
#summgene(listpsid)
#
#psfn = [[] for k in range(1)]
#psfn[0] = np.empty((187, 187, 9126))
##psfn[1] = np.empty((17, 17, 9126))
#
#os.system('mkdir -p %s' % pathdata)
#numbpsfn = 9126
#numbpsfn = 9126
#indxpsfn = np.arange(numbpsfn)
#psid = indxpsfn + 1
#temp = np.empty_like(psid)
#posi = np.empty((2, numbpsfn))
#angl = np.empty((2, numbpsfn))
#
#print 'numbpsfn'
#print numbpsfn
#indxpsfngood = indxpsfn
##indxpsfngood = np.random.choice(indxpsfn, size=30)
#
#for k in indxpsfngood:
#
# cmnd = 'tsig-psf --id %d --show-contents > %stemp.txt' % (psid[k], pathdata)
# os.system(cmnd)
# print 'k'
# print k
#
# datatemp = np.loadtxt(pathtemp, skiprows=8, delimiter=',')
# if datatemp.shape[0] == 187:
# indxreso = 0
# else:
# continue
#
# with open(pathtemp, 'r') as listline:
# for t, line in enumerate(listline):
# print line
# if t == 1:
# psidtemp = int(line.split('id=')[1])
# if t == 2:
# reso = int(line.split('resolution=')[1])
# if t == 4:
# temp[k] = int(line.split('stellar_temp=')[1])
# if t == 5:
# posi[:, k] = line.split('field_position=')[1].split('(')[1].split(')')[0].split(',')
# posi[:, k] = [float(posi[0, k]), float(posi[1, k])]
# if t == 6:
# angl[:, k] = line.split('field_angle=')[1].split('(')[1].split(')')[0].split(',')
# angl[:, k] = [float(angl[0, k]), float(angl[1, k])]
# if t == 8:
# break
#
#
# if temp[k] == 6030 and reso == 11:
#
# print 'Plotting...'
# print 'psfn[indxreso]'
# summgene(psfn[indxreso])
# print 'psidtemp'
# print psidtemp
# print 'temp[k]'
# print temp[k]
# print 'posi[:, k]'
# print posi[:, k]
# print 'angl[:, k]'
# print angl[:, k]
#
# psfn[0][:, :, k] = datatemp
#
# figr, axis = plt.subplots()
#
# axis.set_ylabel('$y$')
# axis.set_xlabel('$x$')
# axis.set_title('$T=%d, x=%.3g, y=%.3g$' % (temp[k], angl[0, k], angl[1, k]))
#
# plt.imshow(psfn[indxreso][:, :, k], cmap='Greys_r', interpolation='none')
# plt.tight_layout()
# pathimag = pathdata + 'psfn_fram%04d.png' % (k)
# print 'Writing to %s...' % pathimag
# print
# plt.savefig(pathimag)
# plt.close()
#
#cmnd = 'convert -delay 20 -density 200x200 %spsfn_fram*.png %spsfn.gif' % (pathdata, pathdata)
#print cmnd
#os.system(cmnd)
#
#import scipy
#from scipy.signal import lombscargle
## PSF difference
#figr, axis = plt.subplots(figsize=(20, 6))
#axis.set_ylabel('LS')
#axis.set_xlabel('Frequency [1/days]')
#for a in range(2):
# if a == 0:
# ydat = (gdat.lcuraperdiff[:, 0, 2, 1] - gdat.lcuraperdiff[:, 0, 2, 2])
# labl = 'x'
# else:
# ydat = (gdat.lcuraperdiff[:, 0, 2, 3] - gdat.lcuraperdiff[:, 0, 2, 4])
# labl = 'y'
# ydat -= np.mean(ydat)
# ydat /= gdat.lcuraperdiff[:, 0, 2, 0]
# ydat *= 100.
# ydat = scipy.signal.lombscargle(gdat.timedata, ydat, np.linspace(0.01, 0.5, 1000))
# axis.plot(np.linspace(0.01, 0.5, 1000), ydat, label=labl, ls='', marker='o', markersize=5, alpha=0.3)
#axis.legend()
#plt.tight_layout()
#path = gdat.pathdata + 'ffftpsfn_%s.png' % (gdat.strgsaveextn)
#print 'Writing to %s...' % path
#plt.savefig(path)
#plt.close()
# PSF difference
#figr, axis = plt.subplots(figsize=(20, 6))
#axis.set_ylabel('Diff [%]')
#axis.set_xlabel('Time since %s [days]' % objttimeinit.iso)
#for a in range(2):
# if a == 0:
# ydat = (gdat.lcuraperdiff[:, 0, 2, 1] - gdat.lcuraperdiff[:, 0, 2, 2])
# labl = 'x'
# else:
# ydat = (gdat.lcuraperdiff[:, 0, 2, 3] - gdat.lcuraperdiff[:, 0, 2, 4])
# labl = 'y'
# ydat -= np.mean(ydat)
# ydat /= gdat.lcuraperdiff[:, 0, 2, 0]
# ydat *= 100.
# axis.plot(gdat.timedata, ydat, label=labl, ls='', marker='o', markersize=5, alpha=0.3)
#axis.legend()
#axis.set_ylim([-100, 100])
#plt.tight_layout()
#path = gdat.pathdata + 'lcurpsfn_%s.png' % (gdat.strgsaveextn)
#print 'Writing to %s...' % path
#plt.savefig(path)
#plt.close()
| 2.015625 | 2 |
product_api/api.py | Lnvictor/ProductAPI | 0 | 12770337 | """
Http Server for our API
"""
from flask import Flask, jsonify, request
from controller import product_controller
app = Flask(__name__)
def serialize(products):
return list(map(lambda p: p.serialize(), products))
@app.route("/product/<name>")
def get_product(name: str):
return jsonify({"product": product_controller.get_by_name(name).serialize()})
@app.route("/products")
def get_all_products():
return jsonify({"products": serialize(product_controller.get())})
@app.route("/product", methods=["POST"])
def insert_product():
name = request.json.get("name")
desc = request.json.get("desc")
value = float(request.json.get("value"))
p = product_controller.save(name, desc, value)
return jsonify({"product": p.serialize()})
@app.route("/update_product/<p_name>", methods=["PUT"])
def update_product(p_name: str):
name = request.json.get("name")
desc = request.json.get("desc")
value = request.json.get("value")
return jsonify(
{
"product": product_controller.change(
p_name, name=name, desc=desc, value=value
).serialize()
}
)
@app.route("/delete_product/<name>", methods=["DELETE"])
def delete_product(name: str):
return jsonify({"product_deleted": product_controller.delete_by_id(id).serialize()})
if __name__ == "__main__":
app.run()
| 3.140625 | 3 |
Scripts/dk/collisionshape/__init__.py | hhg128/DKGL | 14 | 12770338 | <reponame>hhg128/DKGL<filename>Scripts/dk/collisionshape/__init__.py
import _dk_core as core
UP_AXIS_LEFT = 0
UP_AXIS_TOP = 1
UP_AXIS_FORWARD = 2
TYPE_CUSTOM = 0
TYPE_EMPTY = 1
TYPE_COMPOUND = 2
TYPE_BOX = 3
TYPE_CAPSULE = 4
TYPE_CYLINDER = 5
TYPE_CONE = 6
TYPE_SPHERE = 7
TYPE_MULTI_SPHERE = 8
TYPE_CONVEX_HULL = 9
TYPE_STATIC_PLANE = 10
TYPE_STATIC_TRIANGLE_MESH = 11
CollisionShape = core.CollisionShape
CompoundShape = core.CompoundShape
ConcaveShape = core.ConcaveShape
StaticPlaneShape = core.StaticPlaneShape
StaticTriangleMeshShape = core.StaticTriangleMeshShape
ConvexShape = core.ConvexShape
CapsuleShape = core.CapsuleShape
ConeShape = core.ConeShape
CylinderShape = core.CylinderShape
MultiSphereShape = core.MultiSphereShape
PolyhedralConvexShape = core.PolyhedralConvexShape
BoxShape = core.BoxShape
ConvexHullShape = core.ConvexHullShape
SphereShape = core.SphereShape
from .convexhull import ShapeBuilder as ConvexHullBuilder
from .multisphere import ShapeBuilder as MultiSphereBuilder | 1.71875 | 2 |
vanilla_segmentation/loss.py | drapado/densefusion | 1 | 12770339 | from torch.nn.modules.loss import _Loss
from torch.autograd import Variable
import torch
import time
import numpy as np
import torch.nn as nn
import random
import copy
import math
CEloss = nn.CrossEntropyLoss()
def loss_calculation(semantic, target):
bs = semantic.size()[0]
pix_num = 480 * 640
target = target.view(bs, -1).view(-1).contiguous()
semantic = semantic.view(bs, 2, pix_num).transpose(1, 2).contiguous().view(bs * pix_num, 2).contiguous()
semantic_loss = CEloss(semantic, target)
return semantic_loss
class Loss(_Loss):
def __init__(self):
super(Loss, self).__init__(True)
def forward(self, semantic, target):
return loss_calculation(semantic, target)
| 2.828125 | 3 |
hdlConvertorAst/translate/common/add_call_operator_for_call_without_parenthesis.py | Nic30/hdlConvertorAst | 16 | 12770340 | from hdlConvertorAst.hdlAst import HdlOp, HdlValueId, HdlFunctionDef, HdlOpType
from hdlConvertorAst.to.hdl_ast_modifier import HdlAstModifier
from hdlConvertorAst.translate.verilog_to_basic_hdl_sim_model.utils import hdl_call
class AddCallOperatorForCallWithoutParenthesis(HdlAstModifier):
"""
Verilog function call does not need to have () and it can be called just by its id.
To simplify handling we decorete each such a call with a call operator in this transformation.
"""
def __init__(self):
HdlAstModifier.__init__(self)
self._parentExpr = None
def visit_iHdlExpr(self, o):
"""
:type o: iHdlExpr
:return: iHdlExpr
"""
if isinstance(o, HdlOp):
prev_par_expr = self._parentExpr
self._parentExpr = o
try:
self.visit_HdlOp(o)
finally:
self._parentExpr = prev_par_expr
else:
if isinstance(o, HdlValueId) and\
isinstance(o.obj, HdlFunctionDef) and \
( not isinstance(self._parentExpr, HdlOp) or \
self._parentExpr.fn != HdlOpType.CALL or \
self._parentExpr.ops[0] is not o
):
# wrap function id in a call operator if parent is not a call operator
return hdl_call(o, [])
return o
| 2.453125 | 2 |
ds_queues/hotpotato.py | dileepkr/datastructures | 0 | 12770341 | <gh_stars>0
from myqueue import Queue
def hotPotato(namelist, numrounds):
namequeue = Queue()
for name in namelist:
namequeue.enqueue(name)
while namequeue.size() > 1:
for i in range(numrounds):
namequeue.enqueue(namequeue.dequeue())
namequeue.dequeue()
return namequeue.dequeue()
if __name__ == "__main__":
print(hotPotato(['a','b','c','d','e','f'], 3)) | 3.453125 | 3 |
2020/17/17.py | Sveder/advent_of_code | 0 | 12770342 | import copy
import itertools
input = """###..#..
.#######
#####...
#..##.#.
###..##.
##...#..
..#...#.
.#....##"""
# input = """.#.
# ..#
# ###"""
cycles_count = 6
def step(world):
size = len(world[0][0])
new_size = size + 2
new_world = copy.deepcopy(world)
# RESIZE PART:
# Add new planes and empty world to make sure we have enough canvas to draw on:
new_world.append([[['.'] * size] * size] * size)
new_world.insert(0, [[['.'] * size] * size] * size)
for z, cube in enumerate(new_world):
cube.append([['.'] * size] * size)
cube.insert(0, [['.'] * size] * size)
for i, plane in enumerate(cube):
new_plane = [['.'] * new_size]
for line in plane:
new_plane += [['.'] + line + ['.']]
new_plane += [['.'] * new_size]
cube[i] = new_plane
# Now we have enough room to grow, actually grow:
directions = list(itertools.product((-1, 0, 1), repeat=4))
directions.remove((0, 0, 0, 0))
newer_world = copy.deepcopy(new_world)
for w, cube in enumerate(new_world):
for z, plane in enumerate(cube):
for y, line in enumerate(plane):
for x, cell in enumerate(line):
n_count = 0
for dz, dy, dx, dw in directions:
try:
friend = new_world[w + dw][z + dz][y + dy][x + dx]
if friend == "#":
n_count += 1
except IndexError:
pass
if cell == '.' and n_count == 3:
newer_world[w][z][y][x] = '#'
elif cell == '#' and n_count not in (2, 3):
newer_world[w][z][y][x] = '.'
return newer_world
def print_world(world):
for w, cube in enumerate(world):
for i, z in enumerate(cube):
print("z=%s" % i, ' w=%s' % w)
for y in z:
print("".join(y))
print()
cur_world = []
for line in input.split('\n'):
cur_line = [i for i in line]
cur_world.append(cur_line)
cur_world = [cur_world]
cur_world = [cur_world]
for i in range(cycles_count):
print("Cycle:", i)
# print_world(cur_world)
cur_world = step(cur_world)
alive = 0
for cube in cur_world:
for plane in cube :
for line in plane:
alive += line.count('#')
print("Alive:", alive) | 3.140625 | 3 |
remote/xyzplotlyhandler.py | b38tn1k/rover | 0 | 12770343 | <gh_stars>0
# <NAME> March 2016
# simple streaming x/y/z scatter plot contructor
import plotly.plotly as py
from plotly.graph_objs import Scatter, Layout, Figure, Data, Stream, YAxis
import datetime
from time import sleep
def new_scatter(name, token):
new_scatter = Scatter(
x=[],
y=[],
name=name,
showlegend=True,
stream=dict(
token=token,
maxpoints=500
)
)
return new_scatter
class XYZPlotlyHandler(object):
def __init__(self, project_title, name, first_token, units, symm_range):
with open('stream_tokens.secret') as f:
stream_tokens = f.readlines()
x_token = stream_tokens[first_token].rstrip()
y_token = stream_tokens[first_token + 1].rstrip()
z_token = stream_tokens[first_token + 2].rstrip()
x_scatter = new_scatter('{} X'.format(name), x_token)
y_scatter = new_scatter('{} Y'.format(name), y_token)
z_scatter = new_scatter('{} Z'.format(name), z_token)
layout = Layout(
# showlegend=True,
title='{}: {}'.format(project_title, name),
yaxis=YAxis(
title=units,
range=[0-symm_range, symm_range]
)
)
data = Data([x_scatter, y_scatter, z_scatter])
fig = Figure(data=data, layout=layout)
self.x_stream = py.Stream(x_token)
self.y_stream = py.Stream(y_token)
self.z_stream = py.Stream(z_token)
self.x_stream.open()
self.y_stream.open()
self.z_stream.open()
self.plotly_address = py.plot(fig, filename='{}: {}'.format(project_title, name))
def update(self, data2plot):
now = datetime.datetime.now()
self.x_stream.write({'x': now, 'y': data2plot['X']})
self.y_stream.write({'x': now, 'y': data2plot['Y']})
self.z_stream.write({'x': now, 'y': data2plot['Z']})
sleep(0.1)
def close_streams(self):
self.x_stream.close()
self.y_stream.close()
self.z_stream.close()
| 2.71875 | 3 |
twitter/tweets/admin.py | vBubbaa/django-twitter | 1 | 12770344 | from django.contrib import admin
from tweets.models import Tweet, Comment, Likes
admin.site.register(Tweet)
admin.site.register(Comment)
admin.site.register(Likes)
| 1.507813 | 2 |
scrapenhl2/plot/team_score_shot_rate.py | muneebalam/scrapenhl2 | 17 | 12770345 | """
This module creates a scatterplot for specified team with shot attempt rates versus league median from down 3 to up 3.
"""
import matplotlib.pyplot as plt
import math
import pandas as pd
import scrapenhl2.scrape.team_info as team_info
import scrapenhl2.manipulate.manipulate as manip
import scrapenhl2.plot.visualization_helper as vhelper
def team_score_shot_rate_parallel(team, startseason, endseason=None, save_file=None):
"""
:param team:
:param startseason:
:param endseason:
:param save_file:
:return:
"""
if endseason is None:
endseason = startseason
df = pd.concat([manip.team_5v5_shot_rates_by_score(season) for season in range(startseason, endseason + 1)])
df.loc[:, 'ScoreState'] = df.ScoreState.apply(lambda x: max(min(3, x), -3)) # reduce to +/- 3
df = df.drop('Game', axis=1) \
.groupby(['Team', 'ScoreState'], as_index=False) \
.sum()
df.loc[:, 'CF%'] = df.CF / (df.CF + df.CA)
df = df[['Team', 'ScoreState', 'CF%']] \
.sort_values('ScoreState')
statelabels = {x: 'Lead{0:d}'.format(x) if x >= 1 else 'Trail{0:d}'.format(abs(x)) for x in range(-3, 4)}
statelabels[0] = 'Tied'
df.loc[:, 'ScoreState'] = df.ScoreState.apply(lambda x: statelabels[x])
# Go to wide
df = df.pivot_table(index='Team', columns='ScoreState', values='CF%').reset_index()
# Reorder columns
df = df[['Team', 'Trail3', 'Trail2', 'Trail1', 'Tied', 'Lead1', 'Lead2', 'Lead3']]
# Teams to strings
df.loc[:, 'Team'] = df.Team.apply(team_info.team_as_str)
# filter for own team
teamdf = df.query('Team == "{0:s}"'.format(team_info.team_as_str(team)))
# Make parallel coords
vhelper.parallel_coords(df, teamdf, 'Team')
# Set yticklabels
ys = (0.4, 0.5, 0.6)
plt.yticks(ys, ['{0:d}%'.format(int(y * 100)) for y in ys])
plt.ylim(0.35, 0.65)
plt.title(_team_score_shot_rate_parallel_title(team, startseason, endseason))
for direction in ['right', 'top', 'bottom', 'left']:
plt.gca().spines[direction].set_visible(False)
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
def team_score_shot_rate_scatter(team, startseason, endseason=None, save_file=None):
"""
:param team: str or int, team
:param startseason: int, the starting season (inclusive)
:param endseason: int, the ending season (inclusive)
:return: nothing
"""
if endseason is None:
endseason = startseason
df = pd.concat([manip.team_5v5_shot_rates_by_score(season) for season in range(startseason, endseason + 1)])
df.loc[:, 'ScoreState'] = df.ScoreState.apply(lambda x: max(min(3, x), -3)) # reduce to +/- 3
df = df.drop('Game', axis=1) \
.groupby(['Team', 'ScoreState'], as_index=False) \
.sum()
df.loc[:, 'CF60'] = df.CF * 3600 / df.Secs
df.loc[:, 'CA60'] = df.CA * 3600 / df.Secs
# get medians
medians = df[['ScoreState', 'CF60', 'CA60', 'Secs']].groupby('ScoreState', as_index=False).median()
# filter for own team
teamdf = df.query('Team == {0:d}'.format(int(team_info.team_as_id(team))))
statelabels = {x: 'Lead {0:d}'.format(x) if x >= 1 else 'Trail {0:d}'.format(abs(x)) for x in range(-3, 4)}
statelabels[0] = 'Tied'
for state in range(-3, 4):
teamxy = teamdf.query('ScoreState == {0:d}'.format(state))
teamx = teamxy.CF60.iloc[0]
teamy = teamxy.CA60.iloc[0]
leaguexy = medians.query('ScoreState == {0:d}'.format(state))
leaguex = leaguexy.CF60.iloc[0]
leaguey = leaguexy.CA60.iloc[0]
midx = (leaguex + teamx) / 2
midy = (leaguey + teamy) / 2
rot = _calculate_label_rotation(leaguex, leaguey, teamx, teamy)
plt.annotate('', xy=(teamx, teamy), xytext=(leaguex, leaguey), xycoords='data',
arrowprops={'arrowstyle': '-|>'})
plt.annotate(statelabels[state], xy=(midx, midy), ha="center", va="center", xycoords='data', size=8,
rotation=rot, bbox=dict(boxstyle="round", fc="w", alpha=0.9))
plt.scatter(medians.CF60.values, medians.CA60.values, s=100, color='w')
plt.scatter(teamdf.CF60.values, teamdf.CA60.values, s=100, color='w')
#bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9)
#plt.annotate('Fast', xy=(0.95, 0.95), xycoords='axes fraction', bbox=bbox_props, ha='center', va='center')
#plt.annotate('Slow', xy=(0.05, 0.05), xycoords='axes fraction', bbox=bbox_props, ha='center', va='center')
#plt.annotate('Good', xy=(0.95, 0.05), xycoords='axes fraction', bbox=bbox_props, ha='center', va='center')
#plt.annotate('Bad', xy=(0.05, 0.95), xycoords='axes fraction', bbox=bbox_props, ha='center', va='center')
vhelper.add_good_bad_fast_slow()
plt.xlabel('CF60')
plt.ylabel('CA60')
plt.title(_team_score_shot_rate_scatter_title(team, startseason, endseason))
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
def _team_score_shot_rate_scatter_title(team, startseason, endseason):
"""
:param team:
:param startseason:
:param endseason:
:return:
"""
return '{0:s} shot rate by score state, {1:s} to {2:s}'.format(team_info.team_as_str(team),
*vhelper.get_startdate_enddate_from_kwargs(
startseason=startseason,
endseason=endseason))
def _team_score_shot_rate_parallel_title(team, startseason, endseason):
"""
:param team:
:param startseason:
:param endseason:
:return:
"""
return '{0:s} CF% by score state\n{1:s} to {2:s}'.format(team_info.team_as_str(team),
*vhelper.get_startdate_enddate_from_kwargs(
startseason=startseason,
endseason=endseason))
def _calculate_label_rotation(startx, starty, endx, endy):
"""
Calculates the appropriate rotation angle for a label on an arrow (matches line, is between -90 and 90 degrees)
:param startx: start of arrow (x)
:param starty: start of arrow (y)
:param endx: end of arrow (x)
:param endy: end of arrow (y)
:return: rotation angle.
"""
return math.degrees(math.atan((endy - starty)/(endx - startx)))
| 3.109375 | 3 |
Lib/Nets/utils/generic/tile_creator.py | cattale93/pytorch_self_supervised_learning | 0 | 12770346 | import torch
import os
from Lib.Nets.utils.generic.image2tensorboard import reconstruct_tile
import pickle as pkl
path = '/home/ale/Documents/Python/13_Tesi_2/runs/agan/10_32_idt/checkpoints/args.pkl'
opt = pkl.load(open(path, "rb"))
posx = pkl.load(open(os.path.join(opt.data_dir_train, 'posx.pkl'), "rb"))
posy = pkl.load(open(os.path.join(opt.data_dir_train, 'posy.pkl'), "rb"))
file_list = os.listdir(opt.tb_dir)
tile_list = list(filter(lambda x: '.pt' in x, file_list))
name = 'RT'
par_path = '/home/ale/Documents/Python/13_Tesi_2/Data/Datasets/EUSAR/Train/'
for i in tile_list:
epoch = i.split('.')[0]
trans = torch.load(os.path.join(opt.tb_dir, epoch + '.pt'))
reconstruct_tile(name, opt.patch_size, posx, posy, opt.tb_dir, [8736, 13984], epoch, trans)#, parameter_path=par_path)
| 2.15625 | 2 |
app/models.py | awesome-archive/susnote | 0 | 12770347 | #!/usr/bin/env python
# encoding: utf-8
from peewee import *
from playhouse.postgres_ext import *
import datetime
class BaseModel(Model):
id = PrimaryKeyField()
create_time = DateTimeField(verbose_name='create_time', constraints=[SQL('DEFAULT CURRENT_TIMESTAMP')])
class Notebook(BaseModel):
name = CharField(max_length=128)
author_id = IntegerField(default='0')
class Meta:
db_table = 'notebook'
class Article(BaseModel):
title = CharField(max_length=128)
content = TextField(verbose_name='content')
author_id = IntegerField(default='0')
notebook_id = IntegerField(default='0')
source = CharField(max_length=128)
class Meta:
db_table = 'article'
class Article_History(BaseModel):
title = CharField(max_length=128)
content = TextField(verbose_name='content')
author_id = IntegerField(default='0')
article_id = IntegerField(default='0')
class Meta:
db_table = 'article_history'
class Author(BaseModel):
nickname = CharField(max_length=128)
password = CharField(max_length=128)
password_salt = CharField(max_length=128)
username = CharField(max_length=128)
class Meta:
db_table = 'author'
class Image(BaseModel):
path = CharField(max_length=128)
title = CharField(max_length=128)
article_id = IntegerField(default='0')
size = CharField(max_length=128)
related_id = IntegerField(default='0')
author_id = IntegerField(default='0')
type = CharField(max_length=128)
class Meta:
db_table = 'image'
class RSS_Source(BaseModel):
url = CharField(max_length=128)
title = CharField(max_length=128)
update_time = DateTimeField(verbose_name='create_time', default=datetime.datetime.now)
rss_category_id = IntegerField(default=0)
class Meta:
db_table = 'rss_source'
class RSS_Flow(BaseModel):
url = CharField(max_length=128)
title = CharField(max_length=128)
author = CharField(max_length=128)
is_readed = BooleanField(default=False)
content = TextField(verbose_name='content')
source_id = IntegerField(default='0')
class Meta:
db_table = 'rss_flow'
class RSS_Category(BaseModel):
title = CharField(max_length=128)
class Meta:
db_table = 'rss_category'
| 2.140625 | 2 |
tests/_namespace_util_test.py | poros/data_pipeline | 110 | 12770348 | <filename>tests/_namespace_util_test.py
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from data_pipeline._namespace_util import DBSourcedNamespace
class TestDBSourcedtNamespace(object):
def test_simple(self):
name = "refresh_primary.yelp"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name(name),
expected_name=name,
expected_cluster="refresh_primary",
expected_database="yelp"
)
def test_main_cluster(self):
name = "main.database"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name(name),
expected_name=name,
expected_cluster="main",
expected_database="database"
)
def test_environment(self):
name = "main.refresh_primary.yelp"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name(name),
expected_name=name,
expected_cluster="refresh_primary",
expected_database="yelp",
expected_environment="main"
)
def test_tranformers(self):
name = "dev.refresh_primary.yelp.heartbeat.yelp-main_transformed"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name(name),
expected_name=name,
expected_cluster="refresh_primary",
expected_database="yelp",
expected_environment="dev",
expected_suffixes=["heartbeat", "yelp-main_transformed"]
)
def test_fail_missing(self):
self._assert_failure("yelp", error_substr="not enough sections")
self._assert_failure("refresh_primary", error_substr="not enough sections")
def test_fail_invalid_chars(self):
self._assert_failure("^refresh_primary.yelp", error_substr="must contain at least")
self._assert_failure("fadjskl;.fjd", error_substr="must contain at least")
self._assert_failure("______.______", error_substr="must contain at least")
self._assert_failure("refresh_primary..yelp", error_substr="must contain at least")
def test_guarantees(self):
name = "main.database.transformer"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name_with_guarantees(
name,
expected_cluster="main"
),
expected_name=name,
expected_cluster="main",
expected_database="database",
expected_suffixes=["transformer"]
)
def test_guarantees_db(self):
name = "main.database.transformer"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name_with_guarantees(
name,
expected_database="database"
),
expected_name=name,
expected_cluster="main",
expected_database="database",
expected_suffixes=["transformer"]
)
def test_guarantees_transformer(self):
name = "main.database.transformer"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name_with_guarantees(
name,
expected_suffixes=["transformer"]
),
expected_name=name,
expected_cluster="main",
expected_database="database",
expected_suffixes=["transformer"]
)
def test_guarantees_environment(self):
name = "env.cluster.database"
self._assert_success(
actual_namespace=DBSourcedNamespace.create_from_namespace_name_with_guarantees(
name,
expected_environment="env"
),
expected_name=name,
expected_environment="env",
expected_cluster="cluster",
expected_database="database"
)
def test_fail_impossible(self):
name = "dev.refresh_primary.yelp.transformer"
self._assert_failure_with_guarantees(
name,
expected_environment="main"
)
def test_fail_impossible_suffixes(self):
name = "dev.refresh_primary.yelp.transformer"
self._assert_failure_with_guarantees(
name,
expected_suffixes=["heartbeat"]
)
def test_fail_impossible_double_cluster_env(self):
name = "dev.refresh_primary.yelp.transformer"
self._assert_failure_with_guarantees(
name,
expected_environment="dev",
expected_cluster="dev"
)
def test_fail_impossible_env_db(self):
name = "dev.refresh_primary.yelp.transformer"
self._assert_failure_with_guarantees(
name,
expected_environment="dev",
expected_database="refresh_primary"
)
def test_no_name(self):
self._assert_success(
actual_namespace=DBSourcedNamespace(
environment="main",
cluster="refresh_primary",
database="yelp"
),
expected_name="main.refresh_primary.yelp",
expected_environment="main",
expected_cluster="refresh_primary",
expected_database="yelp"
)
def test_no_name_no_env(self):
self._assert_success(
actual_namespace=DBSourcedNamespace(
cluster="refresh_primary",
database="yelp",
suffixes=["heartbeat"]
),
expected_name="refresh_primary.yelp.heartbeat",
expected_cluster="refresh_primary",
expected_database="yelp",
expected_suffixes=["heartbeat"]
)
def _assert_failure(self, name, error_substr):
with pytest.raises(ValueError) as e:
DBSourcedNamespace.create_from_namespace_name(name)
assert error_substr in e
def _assert_failure_with_guarantees(
self,
name,
expected_cluster=None,
expected_database=None,
expected_environment=None,
expected_suffixes=None
):
with pytest.raises(ValueError) as e:
DBSourcedNamespace.create_from_namespace_name_with_guarantees(
name,
expected_environment=expected_environment,
expected_cluster=expected_cluster,
expected_database=expected_database,
expected_suffixes=expected_suffixes
)
assert "impossible to rectify" in e
def _assert_success(
self,
actual_namespace,
expected_name,
expected_cluster,
expected_database,
expected_environment=None,
expected_suffixes=None
):
if not expected_suffixes:
expected_suffixes = []
assert actual_namespace.get_name() == expected_name
assert actual_namespace.cluster == expected_cluster
assert actual_namespace.database == expected_database
assert actual_namespace.environment == expected_environment
assert actual_namespace.suffixes == expected_suffixes
| 2.015625 | 2 |
tests/unit2/test_arcade.py | LiorAvrahami/arcade | 824 | 12770349 | <filename>tests/unit2/test_arcade.py
import logging
import arcade
def test_logging():
arcade.configure_logging(logging.WARNING)
logger = logging.getLogger('arcade')
assert logger.level == logging.WARNING
| 2.296875 | 2 |
app/__init__.py | ralphdc/zihan | 0 | 12770350 | <reponame>ralphdc/zihan<gh_stars>0
#!/usr/bin/env python3
from .Download import Download
class GetHandler():
_ITEM_ = {
'download': Download
}
@classmethod
def get_unit(cls, unit, **kwargs):
if unit in cls._ITEM_:
return cls._ITEM_.get(unit)(**kwargs)
else:
raise Exception("[Error] - GetHandler get unit error, unit not found...")
| 2.59375 | 3 |