max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
AtC_Beg_Con_081-090/ABC090/B.py | yosho-18/AtCoder | 0 | 12766551 | a, b = map(int, input().split())
a = str(a)
b = str(b)
count = 0
c = int(a)
d = int(b)
for i in range(c, d + 1):
i = str(i)
if i[0] == i[4] and i[1] == i[3]:
count += 1
print(count) | 3.015625 | 3 |
test/Inputs/getmtime.py | xjc90s/swift | 1 | 12766552 | <filename>test/Inputs/getmtime.py
#!/usr/bin/env python3
import os
import sys
print(os.path.getmtime(sys.argv[1]))
| 1.765625 | 2 |
tests/test_generator.py | frankie567/cookiecutter-eventually-landing | 0 | 12766553 | import importlib.util
import os.path
import pytest
import shutil
import unittest
from cookiecutter.main import cookiecutter
def load_module_from_path(module_name, path):
module_spec = importlib.util.spec_from_file_location(module_name, path)
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
return module
class TestGenerator(unittest.TestCase):
cookiecutter_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def setUp(self):
# Generate test project with Cookiecutter
cookiecutter(
self.cookiecutter_path,
no_input=True,
extra_context={
'title': 'Test project',
'mailjet_apikey_public': 'public_key',
'mailjet_apikey_private': 'private_key',
'mailjet_contactslist_id': 'contactslist_id'
}
)
self.test_project_path = os.path.join(self.cookiecutter_path, 'test_project')
def tearDown(self):
# Remove test project files
shutil.rmtree(self.test_project_path, ignore_errors=True)
def test_project_generated(self):
# Project directory
self.assertTrue(os.path.exists(self.test_project_path))
# Flask app directory
flask_app_path = os.path.join(self.test_project_path, 'test_project')
self.assertTrue(os.path.exists(flask_app_path))
def test_config_values_copied(self):
config = load_module_from_path('test_project.config', os.path.join(self.test_project_path, 'test_project', 'config.py'))
self.assertEqual(config.Config.MJ_APIKEY_PUBLIC, 'public_key')
self.assertEqual(config.Config.MJ_APIKEY_PRIVATE, 'private_key')
self.assertEqual(config.Config.MJ_CONTACTSLIST_ID, 'contactslist_id')
def test_app(self):
# Run the tests generated in test project
return_code = pytest.main([os.path.join(self.test_project_path, 'test_project')])
self.assertEqual(return_code, 0)
| 2.390625 | 2 |
datadef.py | ffee21/socialmap-korea | 0 | 12766554 | import os
from gcloud import datastore
def selectAll(client, kind, order=None):
query = client.query(kind=kind)
if order: query.order = order
resultlist = list(query.fetch())
return resultlist
def deleteAll(client, kind):
all = selectAll(client, kind)
keylist = list(map(lambda x: x.key, all))
client.delete_multi(keylist)
def fetchDataDefs():
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('socialmapkorea-credentials.json', scope)
gc = gspread.authorize(credentials)
wks = gc.open("socialmapkorea_data").sheet1
code_list = list(filter(lambda x: len(x)>0, wks.col_values(1)))
baseyear_list = list(filter(lambda x: len(x)>0, wks.col_values(2)))
name_list = list(filter(lambda x: len(x)>0, wks.col_values(3)))
data_def_list = list(map(lambda x,y,z: [x,y,z], code_list, baseyear_list, name_list))
return data_def_list
# -------------------------------------------------
def importDataDefs():
client = datastore.Client(os.environ['GCLOUD_PROJECT'])
deleteAll(client, 'Data_Def')
data_def_list = fetchDataDefs()
with client.transaction():
incomplete_keys = client.key('Data_Def')
for item in data_def_list:
if item[0] != u'data_code':
datadefEntity = datastore.Entity(key=incomplete_keys)
datadefEntity.update({
'code': item[0],
'baseyear': item[1],
'name': item[2]})
client.put(datadefEntity)
def getAllDataDefs():
client = datastore.Client(os.environ['GCLOUD_PROJECT'])
all = selectAll(client, kind='Data_Def', order='code')
allc = list(map(lambda x: {'code': x[u'code'].encode("utf-8"), 'baseyear': x[u'baseyear'].encode("utf-8"), 'name': x[u'name'].encode("utf-8")}, all))
return allc | 2.5 | 2 |
preprocessing/preprocessing.py | jwheatp/twitter-riots | 0 | 12766555 | <reponame>jwheatp/twitter-riots
# imports
import json
import math
import sys
import nltk
import re
import csv
from datetime import datetime,timedelta
# global variables
lancaster = nltk.LancasterStemmer()
def tokenize(raw) :
"""Tokenizing a string"""
return nltk.word_tokenize(raw)
def stem(tokens) :
"""Stemming tokens"""
return [lancaster.stem(t) for t in tokens]
# not used at the moment, too long
def lem(tokens) :
"""Lemmitizating tokens"""
return [wnl.lemmatize(t) for t in tokens]
def rmStops(tokens) :
"""Remove stop words"""
filtered_words = [w for w in tokens if not w in nltk.corpus.stopwords.words('english')]
return filtered_words
def rmHtml(string):
return re.findall('>(.*)<', string)
def rmPunct(string) :
"""Remove punctuation"""
return re.sub(r'[^\w\s]','',string)
def rmAts(string) :
"""Remove '@' mentions"""
return re.sub(r'@\w+', '', string)
def rmHTs(string) :
"""Remove hashtags"""
return re.sub(r'#\w+', '', string)
def rmRTs(tokens) :
"""Remove RTs"""
return [x for x in tokens if x != "rt"]
def rmLinks(string) :
"""Remove links"""
return re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', string)
def clean(raw) :
"""Clean a tweet - All the preprocess in one function"""
raw = raw.lower()
raw = rmLinks(raw)
raw = rmAts(raw)
raw = rmHTs(raw)
raw = rmPunct(raw)
tokens = tokenize(raw)
tokens = rmStops(tokens)
tokens = rmRTs(tokens)
return tokens
def processTweet(tweet) :
global i,outfile
i = i + 1
if math.fmod(i,100) == 0 :
print(i)
tweet = re.findall(r'"((?:(?!(?:",")).)*)"',tweet)
tweet[13] = rmHtml(tweet[13])
if len(tweet[13]) > 0 :
tweet[13] = tweet[13][0]
else :
tweet[13] = 'null'
rt = re.findall(r"RT @([a-zA-Z0-9-_]*):? (.*)",tweet[3])
isRT = len(rt) > 0
mentions = re.findall(r'@\w+', tweet[3])
mentions = [s[1:] for s in mentions]
mentions = ','.join(mentions)
tweet[3] = clean(tweet[3])
tweet[3] = ','.join(tweet[3])
tweet.insert(10,mentions)
if isRT :
isRT = '1'
else :
isRT = '0'
tweet.insert(8,isRT)
tweet = ['"%s"' %s for s in tweet]
tweet = ','.join(tweet)
with open(outpath, "a") as file:
file.write('%s\n' % tweet)
# counter
i = 0
filepath = str(sys.argv[1])
outpath = "%s_p" % filepath
# iterate tweets
with open(filepath) as f:
for tweet in f:
processTweet(tweet)
| 2.9375 | 3 |
setup.py | DHI/fmdap | 1 | 12766556 | <reponame>DHI/fmdap<gh_stars>1-10
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("LICENSE") as fh:
license = fh.read()
setuptools.setup(
name="fmdap",
version="0.1.dev3",
install_requires=[
"numpy",
"pandas",
"matplotlib",
"scipy",
"statsmodels",
"mikeio >= 0.7",
"fmskill >= 0.3.3",
],
extras_require={
"dev": [
"pytest",
"sphinx",
"sphinx-book-theme",
"black==20.8b1",
"shapely",
"plotly >= 4.5",
],
"test": ["pytest", "shapely"],
"notebooks": [
"nbformat",
"nbconvert",
"jupyter",
"plotly",
],
},
author="<NAME>",
author_email="<EMAIL>",
description="MIKE FM Data Assimilation pre- and post-processor.",
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/DHI/fmdap",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
],
)
| 1.617188 | 2 |
elegantrl/envs/starcraft/smac_maps.py | tnerush71/ElegantRL | 759 | 12766557 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pysc2.maps import lib
from smac.env.starcraft2.maps import smac_maps
map_param_registry = {
"1o_10b_vs_1r": {
"n_agents": 11,
"n_enemies": 1,
"limit": 50,
"a_race": "Z",
"b_race": "Z",
"unit_type_bits": 2,
"map_type": "overload_bane"
},
"1o_2r_vs_4r": {
"n_agents": 3,
"n_enemies": 4,
"limit": 50,
"a_race": "Z",
"b_race": "Z",
"unit_type_bits": 2,
"map_type": "overload_roach"
},
"bane_vs_hM": {
"n_agents": 3,
"n_enemies": 2,
"limit": 30,
"a_race": "Z",
"b_race": "T",
"unit_type_bits": 2,
"map_type": "bZ_hM"
}
}
smac_maps.map_param_registry.update(map_param_registry)
def get_map_params(map_name):
map_param_registry = smac_maps.get_smac_map_registry()
return map_param_registry[map_name]
for name in map_param_registry.keys():
globals()[name] = type(name, (smac_maps.SMACMap,), dict(filename=name))
| 2.3125 | 2 |
PowerScriptast.py | twtg93/PowerScript | 1 | 12766558 | import collections
BOOL = {"BOOLAND", "BOOLOR", "BOOLNOT"}
POW = {"POW"}
MUL_DIV = {"MUL", "DIV"}
ADD_SUB = {"ADD", "SUB"}
SHIFT = {"LSHIFT", "RSHIFT"}
BIN = {"AND", "OR", "XOR"}
CMP = {"EQ", "NEQ", "LEQ", "GEQ", "LT",
"GT", "CONTAINS", "CONTAINED"}
NODES = {
"UnaryExpr": "op val",
"BinExpr": "op left right",
"CmpExpr": "ops vals",
"DotExpr": "val name",
"CallExpr": "func args",
"ColonCallExpr": "expr name args",
"IdentExpr": "ident",
"ListExpr": "vals",
"NumLit": "val",
"BoolLit": "val",
"StrLit": "val",
"ExprLine": "expr",
"SetLine": "name expr",
"IfLine": "cond_codes",
"WhileLine": "cond line",
"FuncLine": "name arg_names line",
"ReturnLine": "val",
"Suite": "lines",
}
for name, fields in NODES.items():
globals()[name] = collections.namedtuple(name, fields) | 2.53125 | 3 |
Examples/charlcd_rpi_mono_simpletest.py | pmartel/CircuitPyDisk | 3 | 12766559 | """Simple test for monochromatic character LCD on Raspberry Pi"""
import time
import board
import digitalio
import adafruit_character_lcd.character_lcd as characterlcd
# Modify this if you have a different sized character LCD
lcd_columns = 16
lcd_rows = 2
# Raspberry Pi Pin Config:
lcd_rs = digitalio.DigitalInOut(board.D26)
lcd_en = digitalio.DigitalInOut(board.D19)
lcd_d7 = digitalio.DigitalInOut(board.D27)
lcd_d6 = digitalio.DigitalInOut(board.D22)
lcd_d5 = digitalio.DigitalInOut(board.D24)
lcd_d4 = digitalio.DigitalInOut(board.D25)
lcd_backlight = digitalio.DigitalInOut(board.D4)
# Initialise the lcd class
lcd = characterlcd.Character_LCD_Mono(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6,
lcd_d7, lcd_columns, lcd_rows, lcd_backlight)
# Turn backlight on
lcd.backlight = True
# Print a two line message
lcd.message = "Hello\nCircuitPython"
# Wait 5s
time.sleep(5)
lcd.clear()
# Print two line message right to left
lcd.text_direction = lcd.RIGHT_TO_LEFT
lcd.message = "Hello\nCircuitPython"
# Wait 5s
time.sleep(5)
# Return text direction to left to right
lcd.text_direction = lcd.LEFT_TO_RIGHT
# Display cursor
lcd.clear()
lcd.cursor = True
lcd.message = "Cursor! "
# Wait 5s
time.sleep(5)
# Display blinking cursor
lcd.clear()
lcd.blink = True
lcd.message = "Blinky Cursor!"
# Wait 5s
time.sleep(5)
lcd.blink = False
lcd.clear()
# Create message to scroll
scroll_msg = '<-- Scroll'
lcd.message = scroll_msg
# Scroll message to the left
for i in range(len(scroll_msg)):
time.sleep(0.5)
lcd.move_left()
lcd.clear()
lcd.message = "Going to sleep\nCya later!"
# Turn backlight off
lcd.backlight = False
time.sleep(2)
| 3.265625 | 3 |
src/nsupdate-interactive.py | perryflynn/nsupdate-interactive | 2 | 12766560 | #!/usr/bin/python3
import os
import sys
import re
import argparse
import subprocess
import shlex
import shutil
import datetime
from zoneutils import zonefile, zonefileformatter, nsupdate, utils
SLUG_RGX = re.compile(r"[^a-zA-Z0-9_]")
def parse_args():
""" Parse command line arguments """
parser = argparse.ArgumentParser(description='nsupdate-interactive')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--zone', type=str)
group.add_argument('--get-zone-slug', type=str)
parser.add_argument('--dnsserver', type=str, required=False)
return parser.parse_args()
def check_dependencies(editor: str):
""" Check for binaries which are required for this script """
binaries = [ editor, 'nsupdate', 'dig', 'diff', 'colordiff', 'named-checkzone' ]
binarymissing = False
for binary in binaries:
if shutil.which(binary) is None:
binarymissing = True
print("The program '"+binary+"' is required to use this script")
if binarymissing:
sys.exit(1)
def domain_slugify(domain: str) -> str:
idn = domain.encode('idna').decode('utf-8-sig')
return SLUG_RGX.sub('_', idn).upper().strip()
def press(what: str):
input(f"Press ENTER to {what}, CTRL+C to abort.")
def main():
""" Main function of the script"""
# get editor
editor = os.environ.get('EDITOR', 'nano')
# check for dependend programs
check_dependencies(editor)
# parse arguments
args = parse_args()
# print domain slug
if args.get_zone_slug:
print(f"HMAC_{domain_slugify(args.get_zone_slug)}")
sys.exit(0)
# get hmac key
zone_varname = f"HMAC_{domain_slugify(args.zone)}"
hmackey = os.environ.get('HMAC', os.environ.get(zone_varname))
if hmackey is None:
print("Environment variable 'HMAC' is required.")
sys.exit(1)
# find nameserver if no one is defined
if not args.dnsserver:
args.dnsserver = utils.dig_get_authoritative_server(args.zone)
if args.dnsserver:
print(f"Found dns server by SOA record: {args.dnsserver}")
else:
print("There was no '--dnsserver' option defined and we are unable")
print("to find the authoritative name server by SOA record.")
sys.exit(1)
# base filename
ts = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')+'Z'
filename = 'nsupdate_'+args.dnsserver+'_'+args.zone+'_'+ts+'.{0}.db'
# get zone records by calling dig
digstr = utils.dig_zonetransfer(args.dnsserver, hmackey, args.zone)
if digstr[2] == utils.ZonetransferResult.KEYINVALID:
print(digstr[1])
print("Invalid HMAC key provided or HMAC key was denied by DNS server.")
sys.exit(1)
elif digstr[2] == utils.ZonetransferResult.FAILED:
print(digstr[1])
print("Transfer failed.")
print("Maybe a typo in zone name or dns server address?")
print("Or the HMAC doesn't have the permission to access the given dns zone.")
sys.exit(1)
elif digstr[0] == False:
print("dig failed:")
print(digstr[1])
sys.exit(1)
records = zonefile.ZoneFile(digstr[1])
if len(records.records) < 1:
print("Unable to find any records in the DNS zone.")
print("There must be at least a SOA record.")
print("Maybe a typo in the zone name or dns server address?")
print("Or something wrong with your permissions?")
sys.exit(1)
# create zone files for diff and editing
formatter = zonefileformatter.ZoneFileFormatter()
for version in [ 'org', 'new' ]:
formatter.save(filename.format(version), records)
# edit and check syntax
haserrors = True
while haserrors:
# open text editor
subprocess.call([ editor, filename.format('new') ])
# check syntax
checkresult = utils.checkzone(args.zone, filename.format('new'))
if checkresult[0]:
haserrors = False
else:
print("Found syntax errors in zone file:")
print(checkresult[1])
press('correct the zone file')
# show a diff between work copy and original
diffresult = utils.diff(filename.format('org'), filename.format('new'))
if diffresult[0] == False:
print("No changes made. Exit.")
os.remove(filename.format('org'))
os.remove(filename.format('new'))
sys.exit(0)
# update soa serial
originalsoa = zonefile.SoaRecord(next(filter(lambda x: x.dnsType=='SOA', records.records)))
newrecords = zonefile.load(filename.format('new'))
editedsoa = zonefile.SoaRecord(next(filter(lambda x: x.dnsType=='SOA', newrecords.records)))
if originalsoa == editedsoa:
# update serial with the classic date format
editedsoa.apply_default_serialincrease()
# write zone file and redo diff
formatter.save(filename.format('new'), newrecords)
diffresult = utils.diff(filename.format('org'), filename.format('new'))
print(utils.colorize_diff(diffresult[1])[1])
# write diff into a patch file
with open(filename.format('patch'), 'w+') as f:
f.write(diffresult[1])
# ask befort continue with nsupdate
press('send the changes to the nameserver')
# create nsupdate batch file
minidiff = utils.diff_minimal(filename.format('org'), filename.format('new'))[1]
nsupdater = nsupdate.from_diff(minidiff)
nsupdatestr = '\n'.join(list(nsupdater.get_nsupdate_batch(args.dnsserver, args.zone)))
with open(filename.format('batch'), 'w+') as f:
f.write(nsupdatestr)
# execute nsupdate
updateresult = utils.nsupdate(hmackey, filename.format('batch'))
if updateresult[0] == False:
print("nsupdate failed:")
print(updateresult[1])
sys.exit(1)
# start main function
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
# catch exception when script was canceled by CTRL+C
pass
| 2.875 | 3 |
src/momento/simple_cache_client.py | momentohq/client-sdk-python | 0 | 12766561 | import asyncio
from types import TracebackType
from typing import Optional, Mapping, Union, Type
from .aio import simple_cache_client as aio
from ._async_utils import wait_for_coroutine
from .cache_operation_types import (
CacheGetResponse,
CacheSetResponse,
CacheDeleteResponse,
CreateCacheResponse,
CreateSigningKeyResponse,
DeleteCacheResponse,
ListCachesResponse,
CacheMultiGetResponse,
CacheMultiSetResponse,
ListSigningKeysResponse,
RevokeSigningKeyResponse,
)
from ._utilities._data_validation import _validate_request_timeout
class SimpleCacheClient:
def __init__(
self,
auth_token: str,
default_ttl_seconds: int,
data_client_operation_timeout_ms: Optional[int],
):
self._init_loop()
self._momento_async_client = aio.SimpleCacheClient(
auth_token=auth_token,
default_ttl_seconds=default_ttl_seconds,
data_client_operation_timeout_ms=data_client_operation_timeout_ms,
)
def _init_loop(self) -> None:
try:
# If the synchronous client is used inside an async application,
# use the event loop it's running within.
loop: asyncio.AbstractEventLoop = asyncio.get_running_loop()
except RuntimeError:
# Currently, we rely on asyncio's module-wide event loop due to the
# way the grpc stubs we've got are hiding the _loop parameter.
# If a separate loop is required, e.g., so you can run Simple Cache
# on a background thread, you'll want to open an issue.
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self._loop = loop
def __enter__(self) -> "SimpleCacheClient":
wait_for_coroutine(self._loop, self._momento_async_client.__aenter__())
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
wait_for_coroutine(
self._loop,
self._momento_async_client.__aexit__(exc_type, exc_value, traceback),
)
def create_cache(self, cache_name: str) -> CreateCacheResponse:
"""Creates a new cache in your Momento account.
Args:
cache_name: String used to create cache.
Returns:
CreateCacheResponse
Raises:
InvalidArgumentError: If provided cache_name is None.
BadRequestError: If the cache name provided doesn't follow the naming conventions
AlreadyExistsError: If cache with the given name already exists.
AuthenticationError: If the provided Momento Auth Token is invalid.
ClientSdkError: For any SDK checks that fail.
"""
coroutine = self._momento_async_client.create_cache(cache_name)
return wait_for_coroutine(self._loop, coroutine)
def delete_cache(self, cache_name: str) -> DeleteCacheResponse:
"""Deletes a cache and all the items within it.
Args:
cache_name: String cache name to delete.
Returns:
DeleteCacheResponse
Raises:
InvalidArgumentError: If provided cache_name is None.
BadRequestError: If the cache name provided doesn't follow the naming conventions
NotFoundError: If an attempt is made to delete a MomentoCache that doesn't exist.
AuthenticationError: If the provided Momento Auth Token is invalid.
ClientSdkError: For any SDK checks that fail.
"""
coroutine = self._momento_async_client.delete_cache(cache_name)
return wait_for_coroutine(self._loop, coroutine)
def list_caches(self, next_token: Optional[str] = None) -> ListCachesResponse:
"""Lists all caches.
Args:
next_token: Token to continue paginating through the list. It's used to handle large paginated lists.
Returns:
ListCachesResponse
Raises:
AuthenticationError: If the provided Momento Auth Token is invalid.
"""
coroutine = self._momento_async_client.list_caches(next_token)
return wait_for_coroutine(self._loop, coroutine)
def create_signing_key(self, ttl_minutes: int) -> CreateSigningKeyResponse:
"""Creates a Momento signing key
Args:
ttl_minutes: The key's time-to-live in minutes
Returns:
CreateSigningKeyResponse
Raises:
InvalidArgumentError: If provided ttl minutes is negative.
BadRequestError: If the ttl provided is not accepted
AuthenticationError: If the provided Momento Auth Token is invalid.
ClientSdkError: For any SDK checks that fail.
"""
coroutine = self._momento_async_client.create_signing_key(ttl_minutes)
return wait_for_coroutine(self._loop, coroutine)
def revoke_signing_key(self, key_id: str) -> RevokeSigningKeyResponse:
"""Revokes a Momento signing key, all tokens signed by which will be invalid
Args:
key_id: The id of the Momento signing key to revoke
Returns:
RevokeSigningKeyResponse
Raises:
AuthenticationError: If the provided Momento Auth Token is invalid.
ClientSdkError: For any SDK checks that fail.
"""
coroutine = self._momento_async_client.revoke_signing_key(key_id)
return wait_for_coroutine(self._loop, coroutine)
def list_signing_keys(
self, next_token: Optional[str] = None
) -> ListSigningKeysResponse:
"""Lists all Momento signing keys for the provided auth token.
Args:
next_token: Token to continue paginating through the list. It's used to handle large paginated lists.
Returns:
ListSigningKeysResponse
Raises:
AuthenticationError: If the provided Momento Auth Token is invalid.
ClientSdkError: For any SDK checks that fail.
"""
coroutine = self._momento_async_client.list_signing_keys(next_token)
return wait_for_coroutine(self._loop, coroutine)
def set(
self,
cache_name: str,
key: str,
value: Union[str, bytes],
ttl_seconds: Optional[int] = None,
) -> CacheSetResponse:
"""Stores an item in cache
Args:
cache_name: Name of the cache to store the item in.
key (string or bytes): The key to be used to store item.
value (string or bytes): The value to be stored.
ttl_seconds (Optional): Time to live in cache in seconds. If not provided, then default TTL for the cache
client instance is used.
Returns:
CacheSetResponse
Raises:
InvalidArgumentError: If validation fails for the provided method arguments.
BadRequestError: If the provided inputs are rejected by server because they are invalid
NotFoundError: If the cache with the given name doesn't exist.
AuthenticationError: If the provided Momento Auth Token is invalid.
InternalServerError: If server encountered an unknown error while trying to store the item.
"""
coroutine = self._momento_async_client.set(cache_name, key, value, ttl_seconds)
return wait_for_coroutine(self._loop, coroutine)
def multi_set(
self,
cache_name: str,
items: Union[Mapping[str, str], Mapping[bytes, bytes]],
ttl_seconds: Optional[int] = None,
) -> CacheMultiSetResponse:
"""Store items in the cache.
Args:
cache_name: Name of the cache to store the item in.
items: (Union[Mapping[str, str], Mapping[bytes, bytes]]): The items to store.
ttl_seconds: (Optional[int]): The TTL to apply to each item. Defaults to None.
Returns:
CacheMultiSetResponse
Raises:
InvalidArgumentError: If validation fails for the provided method arguments.
BadRequestError: If the provided inputs are rejected by server because they are invalid
NotFoundError: If the cache with the given name doesn't exist.
AuthenticationError: If the provided Momento Auth Token is invalid.
InternalServerError: If server encountered an unknown error while trying to retrieve the item.
"""
coroutine = self._momento_async_client.multi_set(cache_name, items, ttl_seconds)
return wait_for_coroutine(self._loop, coroutine)
def get(self, cache_name: str, key: str) -> CacheGetResponse:
"""Retrieve an item from the cache
Args:
cache_name: Name of the cache to get the item from
key (string or bytes): The key to be used to retrieve the item.
Returns:
CacheGetResponse
Raises:
InvalidArgumentError: If validation fails for the provided method arguments.
BadRequestError: If the provided inputs are rejected by server because they are invalid
NotFoundError: If the cache with the given name doesn't exist.
AuthenticationError: If the provided Momento Auth Token is invalid.
InternalServerError: If server encountered an unknown error while trying to retrieve the item.
"""
coroutine = self._momento_async_client.get(cache_name, key)
return wait_for_coroutine(self._loop, coroutine)
def multi_get(
self, cache_name: str, *keys: Union[str, bytes]
) -> CacheMultiGetResponse:
"""Retrieve multiple items from the cache.
Args:
cache_name (str): Name of the cache to get the item from.
keys: (Union[str, bytes]): The keys used to retrieve the items.
Returns:
CacheMultiGetResponse
Raises:
InvalidArgumentError: If validation fails for the provided method arguments.
BadRequestError: If the provided inputs are rejected by server because they are invalid
NotFoundError: If the cache with the given name doesn't exist.
AuthenticationError: If the provided Momento Auth Token is invalid.
InternalServerError: If server encountered an unknown error while trying to retrieve the item.
"""
coroutine = self._momento_async_client.multi_get(cache_name, *keys)
return wait_for_coroutine(self._loop, coroutine)
def delete(self, cache_name: str, key: str) -> CacheDeleteResponse:
"""Delete an item from the cache.
Performs a no-op if the item is not in the cache.
Args:
cache_name: Name of the cache to delete the item from.
key (string or bytes): The key to delete.
Returns:
CacheDeleteResponse
Raises:
InvalidArgumentError: If validation fails for provided method arguments.
BadRequestError: If the provided inputs are rejected by server because they are invalid
NotFoundError: If the cache with the given name doesn't exist.
AuthenticationError: If the provided Momento Auth Token is invalid.
InternalServerError: If server encountered an unknown error while trying to delete the item.
"""
coroutine = self._momento_async_client.delete(cache_name, key)
return wait_for_coroutine(self._loop, coroutine)
def init(
auth_token: str,
item_default_ttl_seconds: int,
request_timeout_ms: Optional[int] = None,
) -> SimpleCacheClient:
"""Creates a SimpleCacheClient
Args:
auth_token: Momento Token to authenticate the requests with Simple Cache Service
item_default_ttl_seconds: A default Time To Live in seconds for cache objects created by this client. It is
possible to override this setting when calling the set method.
request_timeout_ms: An optional timeout in milliseconds to allow for Get and Set operations to complete.
Defaults to 5 seconds. The request will be terminated if it takes longer than this value and will result
in TimeoutError.
Returns:
SimpleCacheClient
Raises:
IllegalArgumentError: If method arguments fail validations
"""
_validate_request_timeout(request_timeout_ms)
return SimpleCacheClient(auth_token, item_default_ttl_seconds, request_timeout_ms)
| 2.375 | 2 |
test/test-maps-map.py | bartaelterman/pygbif | 37 | 12766562 | <gh_stars>10-100
"""Tests for maps module - maps"""
import pytest
import unittest
import vcr
import requests
import matplotlib
matplotlib.use("Agg")
import pygbif
class TestMapsClass(unittest.TestCase):
@vcr.use_cassette("test/vcr_cassettes/test_map.yaml")
def test_map(self):
"maps.map - basic test"
res = pygbif.maps.map(taxonKey=2435098)
self.assertIsInstance(res, pygbif.maps.GbifMap)
self.assertIsInstance(res.response, requests.Response)
self.assertIsInstance(res.path, str)
self.assertIsInstance(res.img, matplotlib.image.AxesImage)
def test_map_year_range(self):
"maps.map - year range"
res = pygbif.maps.map(taxonKey=2435098, year=range(2007, 2011 + 1))
self.assertIsInstance(res, pygbif.maps.GbifMap)
self.assertRegex(res.response.request.path_url, "2007%2C2011")
# self.assertIsInstance(res.path, str)
# self.assertIsInstance(res.img, matplotlib.image.AxesImage)
def test_map_basisofrecord_str_class(self):
"maps.map - basisofrecord"
res = pygbif.maps.map(
taxonKey=2480498, year=2010, basisOfRecord="HUMAN_OBSERVATION"
)
self.assertIsInstance(res, pygbif.maps.GbifMap)
self.assertRegex(res.response.request.path_url, "basisOfRecord")
self.assertRegex(res.response.request.path_url, "HUMAN_OBSERVATION")
def test_map_basisofrecord_list_class(self):
"maps.map - basisofrecord"
res = pygbif.maps.map(
taxonKey=2480498,
year=2010,
basisOfRecord=["HUMAN_OBSERVATION", "LIVING_SPECIMEN"],
)
self.assertIsInstance(res, pygbif.maps.GbifMap)
self.assertRegex(res.response.request.path_url, "basisOfRecord")
self.assertRegex(res.response.request.path_url, "HUMAN_OBSERVATION")
self.assertRegex(res.response.request.path_url, "LIVING_SPECIMEN")
def test_maps_fails_well(self):
"maps.map - fails well"
with pytest.raises(ValueError):
pygbif.maps.map(year=2300)
pygbif.maps.map(year="2010")
pygbif.maps.map(basisOfRecord="foobar")
pygbif.maps.map(format="foobar")
pygbif.maps.map(source="foobar")
pygbif.maps.map(srs="foobar")
pygbif.maps.map(bin="foobar")
pygbif.maps.map(style="foobar")
| 2.4375 | 2 |
ddpro_website/base_ui/admin.py | Rolloniel/ddpro_website_backend | 0 | 12766563 | <reponame>Rolloniel/ddpro_website_backend
from django.contrib import admin
from .models import About, TeamMember, Product
@admin.register(About)
class AboutAdmin(admin.ModelAdmin):
list_display = ['heading', 'text_html']
@admin.register(TeamMember)
class TeamMemberAdmin(admin.ModelAdmin):
list_display = ['first_name', 'last_name', 'image', 'priority', 'roles']
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ['title', 'image', 'short_description'] | 1.84375 | 2 |
run.py | Divisibility/l5r-game-master-tool | 2 | 12766564 | #!/usr/bin/env python
from gmt import app
app.run(debug=app.config['DEBUG'], port=app.config['PORT'])
| 1.4375 | 1 |
worker/jobs/register.py | jlbrewe/hub | 30 | 12766565 | import io
import json
import logging
import os
import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
import httpx
from jobs.base.job import Job
from jobs.convert import Convert
logger = logging.getLogger(__name__)
class Register(Job):
"""
A job that registers a DOI with a registration agency.
Currently, the only DOI supported in Crossref, although this could be expand
to other agencies in the future.
See https://www.crossref.org/education/member-setup/direct-deposit-xml/https-post/.
"""
name = "register"
def __init__(
self, server: Optional[str] = None, credentials: Optional[str] = None,
):
super().__init__()
self.server = server
self.credentials = credentials
def do(self, node: dict, doi: str, url: str, batch: str, *args, **kwargs) -> dict: # type: ignore
assert node is not None
assert "type" in node and node["type"] in ("Article", "Review")
# Generate Crossref deposit XML
json_str = json.dumps(node).encode("utf-8")
xml = Convert().do(
json_str, "-", {"from": "json", "to": "crossref", "doi": doi, "url": url}, # type: ignore
)
if not xml:
raise RuntimeError("Failed to convert node to Crossref XML")
# Replace batch id and email
xml = re.sub(
r"<doi_batch_id>[^<]*</doi_batch_id>",
f"<doi_batch_id>{batch}</doi_batch_id>",
xml,
)
xml = re.sub(
r"<email_address>[^<]*</email_address>",
r"<email_address><EMAIL></email_address>",
xml,
)
server = self.server or os.getenv("CROSSREF_DEPOSIT_SERVER")
if not server:
# If no server explicitly defined then use test server.
# Do not fallback to production server to avoid inadvertent
# use during testing.
server = "https://test.crossref.org/servlet/deposit"
credentials = self.credentials or os.getenv("CROSSREF_DEPOSIT_CREDENTIALS")
if not credentials:
# If no credentials were available for the registration agency
# then log a warning and return an empty dictionary.
# This allows testing during development without having to have
# credentials
logger.warning("Credentials for DOI registrar are not available")
return dict()
# Deposit XML
username, password = credentials.split(":")
deposited = datetime.utcnow().isoformat()
response = httpx.post(
server,
data=dict(login_id=username, login_passwd=password),
files=dict(fname=io.BytesIO(xml.encode())),
)
# Crossref returns 200 response with an error message for bad login credentials
# so we need to check for 'SUCCESS' in the response body
deposit_success = response.status_code == 200 and "SUCCESS" in response.text
if not deposit_success:
logger.error("Unexpected response from Crossref")
# Return details of this job
return dict(
deposited=deposited,
deposit_request=dict(body=xml),
deposit_response=dict(
status=dict(code=response.status_code),
headers=dict(response.headers),
body=response.text,
),
deposit_success=deposit_success,
)
| 2.28125 | 2 |
ingenico/connect/sdk/log/sys_out_communicator_logger.py | Ingenico-ePayments/connect-sdk-python2 | 2 | 12766566 | from __future__ import print_function
from datetime import datetime
from threading import Lock
from ingenico.connect.sdk.log.python_communicator_logger import \
CommunicatorLogger
class SysOutCommunicatorLogger(CommunicatorLogger):
"""
A communicator logger that prints its message to sys.stdout
It includes a timestamp in yyyy-MM-ddTHH:mm:ss format in the system time zone.
"""
def __init__(self):
CommunicatorLogger.__init__(self)
_global_lock = Lock()
_old_print = print
@staticmethod
def INSTANCE():
return _SYS_OUT_COMMUNICATOR_LOGGER_INSTANCE
def __print(self, *a):
with self._global_lock:
self._old_print(*a)
def log(self, message, thrown=None):
# Make sure the same object is used for locking and printing
self.__print(self.__get_date_prefix() + message)
if thrown:
self.__print(str(thrown))
def __get_date_prefix(self):
return datetime.now().strftime("%Y-%m-%dT%H:%M:%S ")
_SYS_OUT_COMMUNICATOR_LOGGER_INSTANCE = SysOutCommunicatorLogger()
| 2.84375 | 3 |
MoireTest/dealjpg.py | YuLingFengSCNU2017/MoireFitting | 1 | 12766567 | <gh_stars>1-10
"""=========================================================================================
Date : 2020.03.20
Author : 玩具反斗星
WeChat : SCNU20172321114
Note : 1、本程序用于莫尔条纹的精确检测与计数。
2、请确保运行环境正确,在运行前请先测试图像是否清晰。
3、本程序针对的对象为与水平成-45°≤α≤45°的莫尔条纹的竖直运动。
4、莫尔条纹与水平面成倾斜角越接近45°越好(防止数据溢出),但是不能超过45°。
5、坐标转换说明: (x,y) = (j,-i) 。条纹位于(x,y)系的第四象限内。
How To Set Parameters:
1.按下1、2、3或4分别进入参数1、2、3、4的设置
(去噪等级,自适应区间范围,自适应偏移量)
2.进入设置后按下+或-调整参数大小
(控制台会有提示)
3.随时可以按Esc退出。当图片大小合适后,按回车完成。
========================================================================================="""
'''设置头文件和参数'''
import numpy as np
from numpy import array
import cv2
jpgfile = '1.jpg'
nl, rgs, ecs, kewd = 4, 37, 13, 0
# 去噪等级noise level,自适应区间范围ranges,自适应偏移量excursion,开运算核宽kernel width
rsp = 3 # 拟合预留像素reserved pixel
'''======================================================================================'''
'''定义函数'''
'''1,转化为灰度图;2,去椒盐噪声。'''
def gg(filename): # get gray(salt and pepper noise)
"""
输入文件名,提取灰度数据并去除椒盐噪声。输出图片的灰度。第二参数必须为奇数
"""
im = cv2.imread(filename) # 读取图片文件
im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY) # 转化为灰度图
im = cv2.medianBlur(im, 2 * nl + 1) # 去椒盐噪声
return im
'''3,直方图均衡化;4,图像二值化。'''
def ib(im, Get_equalizeHist=0): # image binaryzation
"""
输入图片1,均衡化并进行局部阈值自适应的图像二值化,输出图片1。
或者再次处理图片1,进行局部阈值自适应的图像二值化,输出图片1和2。
"""
ibim1 = cv2.equalizeHist(im)
# im = cv2.threshold(im,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) # 非区域自适应
ibim1 = cv2.adaptiveThreshold(
ibim1, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, rgs * 2 + 1, ecs)
# 说明:第2、3个参数:阈值(此处用Otsu确定)和像素的目标灰度,rgs*2+1是保证其为奇数
# Binary:二值化,Otsu:最大类间方差法
kernel = np.ones((2 * kewd + 1, 2 * kewd + 1), np.uint8) # 创建开运算的核
ibim1 = cv2.morphologyEx(ibim1, cv2.MORPH_OPEN, kernel) # 开运算
if Get_equalizeHist == 0:
return ibim1
else:
ibim2 = cv2.adaptiveThreshold(
im, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, rgs * 2 + 1, ecs)
ibim2 = cv2.morphologyEx(ibim2, cv2.MORPH_OPEN, kernel) # 开运算
return ibim1, ibim2
'''5,图像细化。''' # Zhang-Suen算法
def ts(im, pixel=2, kewd2=kewd): # thining step
"""
输入一张灰度图,开始提取莫尔条纹的骨架。输出骨架的中间部分,裁剪一定像素。
"""
kernel2 = np.ones((2 * kewd2 + 1, 2 * kewd2 + 1), np.uint8) # 创建膨胀核
im = cv2.dilate(im, kernel2) # 膨胀
delstep = [1, 1] # 该变量用于指示循环是否继续。当[0,0]时,结束循环。
step = 1 # 该变量用于指示当前标记点为步骤1还是步骤2
# im[0,:]=im[im.shape[0]-1,:]=im[:,0]=im[:,im.shape[1]-1]=255 # cdt1若取到1,可加边框骨架。
while (delstep != [0, 0]): # 只有当delstep1&2都结束才停止细化
# for ii9 in range(200):
step = 3 - step # 1:step1, 2:step2
delstep[step - 1] = 0 # 继续循环指示归0
im0 = im / 255 # 在im0判据板上判断,在im标记板上确定是否删除
countdelpoint = 0 # 计数
countpixel = 0
jump = 0
for i in range(im0.shape[0]):
if (i * (i - im0.shape[0] + 1)) == 0:
continue
for j in range(im0.shape[1]):
countpixel += 1
if (j * (j - im0.shape[1] + 1)) * (im0[i][j]) == 0:
jump += 1
continue
'''判断四个条件:condition1~4'''
[[p9, p2, p3], [p8, p1, p4], [p7, p6, p5]] = im0[i - 1:i + 2, j - 1:j + 2]
pdata = [p2, p3, p4, p5, p6, p7, p8, p9] # 将外围数据储存在pdata数组中
cdt1 = 1 if 1 <= (sum(pdata)) <= 6 else 0
cdt2 = 1 if (sum((1 - pdata[x - 1]) * pdata[x] for x in range(8))) == 1 else 0
cdt3 = 1 - (p2 * p4 * pdata[2 + 2 * step])
cdt4 = 1 - (p6 * p8 * pdata[4 - 2 * step])
'''是否删除?'''
cdt0 = cdt1 * cdt2 * cdt3 * cdt4 # cdt0=1的点符合条件,需要标记
if cdt0 == 1:
countdelpoint += 1
im[i][j] = 0 # 在im标记板上确定是否删除
delstep = [1, 1] # 触发继续循环指示
cv2.imshow('pic0', im0)
cv2.waitKey(0)
im0 = im[pixel - 1:im.shape[0] - 2 * pixel - 1, pixel - 1:im.shape[1] - 2 * pixel - 1] # 边框截取
return im0
'''6,图像选点;7,最小二乘法拟合。'''
def cf(im, totalfit=3): # catch point and fit line
"""
输入骨架图与条纹数(默认3,最大5),根据其中的像素进行直线拟合,
并求出条纹间距。注意斜率必须存在。输出[斜率,[每条线的截距]]。
"""
linedata = [] # 统计全部直线的点,元素是per_line
perline = [] # 统计一条线的点,元素是点的坐标[x,y]=[j,-i]其中i向下,j向右
point1 = [] # 记录第一个点的y值
for p1i in range(im.shape[0] - 1):
if im[p1i, 0] != 0: point1.append(p1i)
## di = [0,1,-1] # 点必须连续
i, j, k = 0, 0, 0 # 定义索引变量
midline = int(len(point1) / 2) # 取中间的那条线的序号
lines = [point1[midline], point1[midline - 1], point1[midline + 1],
point1[midline - 1], point1[midline + 1]]
lines = lines[0:totalfit - 1] # 中间的若干条线
for i0 in lines: # 若干条线逐一提取
j, i = 0, i0 # 第一个点坐标已经给出
perline = [] # 清空数组的值
while (0 <= i < im.shape[0] - rsp) and (0 <= j < im.shape[1] - rsp): # 点的坐标在范围内的时候
perline.append([j, -i]) # 填入坐标(x,y)
if im[i - 1, j + 1] == 1:
i, j = i - 1, j + 1
elif im[i + 1, j + 1] == 1:
i, j = i + 1, j + 1
elif im[i, j + 1] == 1:
j = j + 1
elif im[i - 1, j] == 1:
j = j + 1
while (im[i, j] == 0):
if (0 <= i - 1 < im.shape[0] - rsp):
i = i - 1 # 点的坐标在范围内的时候
else:
print('err - out range')
exit(0)
elif im[i + 1, j] == 1:
j = j + 1
while (im[i, j] == 0):
if (0 <= i + 1 < im.shape[0] - rsp):
i = i + 1 # 点的坐标在范围内的时候
else:
print('err - out range')
exit(0)
else:
print('all zero, return error!')
exit(0)
linedata.append(perline) # linedata里面有若干条线的数据
'''从数据矩阵linedata中利用最小二乘法多项式拟合(polyfit)分别拟合出三条直线的斜率和截距'''
linesKB_list = []
for oneline in linedata: # 对每一条线的数据都进行一个拟合
oneline_array = array(oneline)
linesKB_list.append(np.polyfit(oneline_array[:, 0], oneline_array[:, 1], 1))
linesKB = array(linesKB_list) # 转化为numpy好提取截距斜率
slope = sum(list(linesKB[:, 0])) / len(list(linesKB[:, 0])) # 平均斜率
intercept = linesKB[:, 1] # 各线截距
return [slope, list(intercept)]
'''8,平均斜率与条纹间距;9,两个图之间的最小位移。'''
def dtit(pic1, pic2): # distance & interval
"""
输入的两个参数均为[斜率,[每条线的截距]](要求线数一致),输出的是直线最小距离均值minD、
莫尔条纹的间距MoireD、以及平均斜率aveK组成的数组[minD,MoireD,aveK]
"""
aveK = 0.5 * pic1[0] + 0.5 * pic2[0]
intercepts = [] # 储存莫尔条纹移动距离
MoireDiss = [] # 储存莫尔条纹间距
for intercept1 in pic1[1]:
for intercept2 in pic2[1]:
intercepts.append(abs(intercept1 - intercept2)) # 计算每一个线的距离
for Moirenum in range(len(pic1[1]) - 1):
MoireDiss.append(abs(pic1[1][Moirenum + 1] - pic1[1][Moirenum]))
MoireDiss.append(abs(pic2[1][Moirenum + 1] - pic2[1][Moirenum]))
Distances = array((sorted(intercepts))[0:(len(pic1[1]) - 1)]) # 取最小的若干条线计算位移
slc = 1 / np.sqrt(1 + aveK ** 2) # 定义倾斜系数slc为1/√1+k²
minD = np.average(Distances) * slc # 直线间距离 = 截距差*slc
MoireD = np.average(MoireDiss) * slc # 莫尔条纹间距 = 同一图相邻条纹截距 * slc
return [minD, MoireD, aveK]
'''调整参数'''
def set_parameters(filename, param_weight=[1, 1, 2, 1]):
"""
输入图像,调节的权重,开始调参。直接改变参数值,因此无返回值。
"""
global nl, rgs, ecs, kewd
img = gg(filename) # 灰度化,去噪
img = ib(img) # 均衡化,二值化
params = [nl, rgs, ecs, kewd]
print('originparams=[{},{},{},{}]'.format(nl, rgs, ecs, kewd))
OKset = 0
while (OKset == 0):
cv2.imshow('image', img)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
OKset = 1
break
elif k == 13: # Set parameters: OK
cv2.imwrite('{}.jpg'.format(params), img)
OKset = 1
cv2.destroyAllWindows()
print('Last parameters:', params)
break
elif k in [49, 50, 51, 52]: # ord('1','2','3','4')
numpara = k - 49
paramtext = [' NoiseLevel', ' Ranges ', ' Excursion ', 'KernelWidth']
print('====set===', paramtext[numpara], '====')
continue
elif k in [43, 45]: # ord('+','-')
dvalue = 44 - k # -1 or 1
params[numpara] = params[numpara] + dvalue * param_weight[numpara]
nl, rgs, ecs, kewd = params # 参数重新赋值
img = gg(filename) # 灰度化,去噪
img = ib(img) # 均衡化,二值化
print('parameters:', params)
else:
continue
'''======================================================================================'''
'主函数'
def main():
"""
提取图片源
按步骤执行,并输出图片
"""
set_parameters(jpgfile)
im1 = gg(jpgfile)
im2 = ib(im1)
cv2.imwrite('2-01.jpg', im2)
im2 = ts(im2, 0)
cv2.imwrite('2-02.jpg', im2)
# cv2.imwrite('2-1.jpg',im2[1])
# set_parameters(im1,[1,1,1])
'主程序'
if __name__ == '__main__':
main()
| 2.1875 | 2 |
lectures/code/mpl_wire.py | naskoch/python_course | 4 | 12766568 | from mpl_toolkits.mplot3d import axes3d
ax = plt.subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.1)
ax.plot_wireframe(X, Y, Z, linewidth=0.1)
plt.savefig('wire.pdf')
| 2.203125 | 2 |
ABInBev Bot/databaseconn.py | Aryamaan23/IVR-chatbot | 33 | 12766569 | import mysql.connector
def DataUpdate(Name,Email,Contact):
mydb=mysql.connector.connect(
host="localhost",
user="root",
passwd="<PASSWORD>",
database="mail_signups",
auth_plugin='mysql_native_password'
)
mycursor=mydb.cursor()
#sql="CREATE TABLE CustomersInfo (name VARCHAR(255), email VARCHAR(255), contact VARCHAR(255));"
sql='INSERT INTO CustomersInfo (name, email, contact) VALUES ("{0}","{1}", "{2}");'.format(Name,Email,Contact)
mycursor.execute(sql)
mydb.commit()
print(mycursor.rowcount,"record inserted.")
if __name__=="__main__":
DataUpdate("Aryamaan", "<EMAIL>","7355949951")
| 3.078125 | 3 |
tests/cyclic/bar.py | topwebmaster/factory_boy | 1,932 | 12766570 | # Copyright: See the LICENSE file.
"""Helper to test circular factory dependencies."""
import factory
class Bar:
def __init__(self, foo, y):
self.foo = foo
self.y = y
class BarFactory(factory.Factory):
class Meta:
model = Bar
y = 13
foo = factory.SubFactory('cyclic.foo.FooFactory')
| 2.828125 | 3 |
django_todos/todos/migrations/0003_auto_20180418_2216.py | squadran2003/django-todos | 0 | 12766571 | # Generated by Django 2.0.4 on 2018-04-18 22:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todos', '0002_auto_20180418_2213'),
]
operations = [
migrations.AlterField(
model_name='status',
name='status',
field=models.CharField(choices=[('NOT DONE', 'NOT DONE'), ('DONE', 'DONE')], default='NOT DONE', max_length=255),
),
]
| 1.625 | 2 |
Pynq-ZU/base/notebooks/rpi/SenseHat/sensehat/lps25h.py | Zacarhay/PYNQ-ZU | 6 | 12766572 | <filename>Pynq-ZU/base/notebooks/rpi/SenseHat/sensehat/lps25h.py
# Copyright (c) 2020, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .i2c_device import *
# LPS25H I2C Slave Addresses
_LPS25H_ADDRESS0 = 0x5c
_LPS25H_ADDRESS1 = 0x5d
_LPS25H_REG_ID = 0x0f
_LPS25H_ID = 0xbd
# Register map
_LPS25H_REF_P_XL = 0x08
_LPS25H_REF_P_XH = 0x09
_LPS25H_RES_CONF = 0x10
_LPS25H_CTRL_REG_1 = 0x20
_LPS25H_CTRL_REG_2 = 0x21
_LPS25H_CTRL_REG_3 = 0x22
_LPS25H_CTRL_REG_4 = 0x23
_LPS25H_INT_CFG = 0x24
_LPS25H_INT_SOURCE = 0x25
_LPS25H_STATUS_REG = 0x27
_LPS25H_PRESS_OUT_XL = 0x28
_LPS25H_PRESS_OUT_L = 0x29
_LPS25H_PRESS_OUT_H = 0x2a
_LPS25H_TEMP_OUT_L = 0x2b
_LPS25H_TEMP_OUT_H = 0x2c
_LPS25H_FIFO_CTRL = 0x2e
_LPS25H_FIFO_STATUS = 0x2f
_LPS25H_THS_P_L = 0x30
_LPS25H_THS_P_H = 0x31
_LPS25H_RPDS_L = 0x39
_LPS25H_RPDS_H = 0x3a
class LPS25H:
_BUFFER = bytearray(6)
def __init__(self):
self._write_u8(_LPS25H_CTRL_REG_1, 0xc4)
self._write_u8(_LPS25H_RES_CONF, 0x05)
self._write_u8(_LPS25H_FIFO_CTRL, 0xc0)
self._write_u8(_LPS25H_CTRL_REG_2, 0x40)
@property
def pressure(self):
press = self.read_press_raw()
return press
def read_press_raw(self):
self._read_bytes(_LPS25H_PRESS_OUT_XL + 0x80, 3, self._BUFFER)
press = (((self._BUFFER[2] & 0xFF) << 16) |((self._BUFFER[1] & 0xFF) << 8) | (self._BUFFER[0] & 0xFF))/4096
return press
@property
def temperature(self):
temp = self.read_temp_raw()
return temp
def read_temp_raw(self):
self._read_bytes(_LPS25H_TEMP_OUT_L + 0x80, 2, self._BUFFER)
temp = ((self._BUFFER[1] & 0xFF) << 8) | (self._BUFFER[0] & 0xFF)
temp = ((temp & 0x7FFF) - (temp & 0x8000)) / 480 + 42.5
return temp
def _read_u8(self, address):
raise NotImplementedError()
def _read_bytes(self, address, count, buf):
raise NotImplementedError()
def _write_u8(self, address, val):
raise NotImplementedError()
class LPS25H_I2C(LPS25H):
def __init__(self, i2c):
self._device = I2CDevice(i2c, _LPS25H_ADDRESS0)
super().__init__()
def _read_u8(self, address):
device = self._device
with device as i2c:
self._BUFFER[0] = address & 0xFF
i2c.write(self._BUFFER, end=1, stop=False)
i2c.readinto(self._BUFFER, end=1)
return self._BUFFER[0]
def _read_bytes(self, address, count, buf):
device = self._device
with device as i2c:
buf[0] = address & 0xFF
i2c.write(buf, end=1, stop=False)
i2c.readinto(buf, end=count)
def _write_u8(self, address, val):
device = self._device
with device as i2c:
self._BUFFER[0] = address & 0xFF
self._BUFFER[1] = val & 0xFF
i2c.write(self._BUFFER, end=2) | 0.65625 | 1 |
src/view/widgets/field_lookup.py | gustavosaquetta/FrameworkSSQt-Vocatus | 0 | 12766573 | import os, sys
sys.path.append(os.getcwd())
from PyQt5.QtWidgets import QApplication, QWidget
from src.controller.lib.ssqt import SSQt
SSQt.load_uifile(True, 'src/view/widgets/ui/field_lookup.ui')
from src.view.widgets.ui.field_lookup import Ui_FieldLookup
class FieldLookupView(QWidget, Ui_FieldLookup, SSQt):
def __init__(self):
super().__init__()
self.setupUi(self)
print('FieldLookup')
if __name__ == "__main__":
app = QApplication(sys.argv)
w = FieldLookupView()
w.show()
sys.exit(app.exec_())
| 2.578125 | 3 |
utils_ucsi.py | iofthetiger/ucsi | 1 | 12766574 | <gh_stars>1-10
import os
import cv2
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import albumentations as albu
from albumentations import pytorch as AT
import torchvision
import torchvision.transforms as transforms
import torch
from torch import nn
import segmentation_models_pytorch as smp
from torch.utils.data import TensorDataset, DataLoader,Dataset
from pathlib import Path
HOME = Path(os.environ["HOME"])
path = '/data2/nelson_projs/ucsi'
INPUT_SIZE = (704,1006)
class CloudDataset(Dataset):
def __init__(self, df: pd.DataFrame = None, datatype: str = 'train', img_ids: np.array = None,
transforms = albu.Compose([albu.HorizontalFlip(),AT.ToTensor()]),
preprocessing=None):
self.df = df
if datatype != 'test':
self.data_folder = f"{path}/train_images"
else:
self.data_folder = f"{path}/test_images"
self.img_ids = img_ids
self.transforms = transforms
self.preprocessing = preprocessing
def __getitem__(self, idx):
image_name = self.img_ids[idx]
mask = make_mask(self.df, image_name)
image_path = os.path.join(self.data_folder, image_name)
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
augmented = self.transforms(image=img, mask=mask)
img = augmented['image']
mask = augmented['mask']
if self.preprocessing:
preprocessed = self.preprocessing(image=img, mask=mask)
img = preprocessed['image']
mask = preprocessed['mask']
# return img.half()if FP16 else img, mask.half() if FP16 else mask
return img,mask
def __len__(self):
return len(self.img_ids)
def get_img(x, folder: str='train_images'):
"""
Return image based on image name and folder.
"""
data_folder = f"{path}/{folder}"
image_path = os.path.join(data_folder, x)
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def rle_decode(mask_rle: str = '', shape: tuple = (1400, 2100)):
'''
Decode rle encoded mask.
:param mask_rle: run-length as string formatted (start length)
:param shape: (height, width) of array to return
Returns numpy array, 1 - mask, 0 - background
'''
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0] * shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape, order='F')
def make_mask(df: pd.DataFrame, image_name: str='img.jpg', shape: tuple = (1400, 2100)):
"""
Create mask based on df, image name and shape.
"""
encoded_masks = df.loc[df['im_id'] == image_name, 'EncodedPixels']
masks = np.zeros((shape[0], shape[1], 4), dtype=np.float32)
for idx, label in enumerate(encoded_masks.values):
if label is not np.nan:
mask = rle_decode(label)
masks[:, :, idx] = mask
return masks
def to_tensor(x, **kwargs):
"""
Convert image or mask.
"""
return x.transpose(2, 0, 1).astype('float32')
def mask2rle(img):
'''
Convert mask to rle.
img: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
pixels= img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def visualize(image, mask, original_image=None, original_mask=None):
"""
Plot image and masks.
If two pairs of images and masks are passes, show both.
"""
fontsize = 14
class_dict = {0: 'Fish', 1: 'Flower', 2: 'Gravel', 3: 'Sugar'}
if original_image is None and original_mask is None:
f, ax = plt.subplots(1, 5, figsize=(24, 24))
ax[0].imshow(image)
for i in range(4):
ax[i + 1].imshow(mask[:, :, i])
ax[i + 1].set_title(f'Mask {class_dict[i]}', fontsize=fontsize)
else:
f, ax = plt.subplots(2, 5, figsize=(24, 12))
ax[0, 0].imshow(original_image)
ax[0, 0].set_title('Original image', fontsize=fontsize)
for i in range(4):
ax[0, i + 1].imshow(original_mask[:, :, i])
ax[0, i + 1].set_title(f'Original mask {class_dict[i]}', fontsize=fontsize)
ax[1, 0].imshow(image)
ax[1, 0].set_title('Transformed image', fontsize=fontsize)
for i in range(4):
ax[1, i + 1].imshow(mask[:, :, i])
ax[1, i + 1].set_title(f'Transformed mask {class_dict[i]}', fontsize=fontsize)
def visualize_with_raw(image, mask, original_image=None, original_mask=None, raw_image=None, raw_mask=None):
"""
Plot image and masks.
If two pairs of images and masks are passes, show both.
"""
fontsize = 14
class_dict = {0: 'Fish', 1: 'Flower', 2: 'Gravel', 3: 'Sugar'}
f, ax = plt.subplots(3, 5, figsize=(24, 12))
ax[0, 0].imshow(original_image)
ax[0, 0].set_title('Original image', fontsize=fontsize)
for i in range(4):
ax[0, i + 1].imshow(original_mask[:, :, i])
ax[0, i + 1].set_title(f'Original mask {class_dict[i]}', fontsize=fontsize)
ax[1, 0].imshow(raw_image)
ax[1, 0].set_title('Original image', fontsize=fontsize)
for i in range(4):
ax[1, i + 1].imshow(raw_mask[:, :, i])
ax[1, i + 1].set_title(f'Raw predicted mask {class_dict[i]}', fontsize=fontsize)
ax[2, 0].imshow(image)
ax[2, 0].set_title('Transformed image', fontsize=fontsize)
for i in range(4):
ax[2, i + 1].imshow(mask[:, :, i])
ax[2, i + 1].set_title(f'Predicted mask with processing {class_dict[i]}', fontsize=fontsize)
def plot_with_augmentation(image, mask, augment):
"""
Wrapper for `visualize` function.
"""
augmented = augment(image=image, mask=mask)
image_flipped = augmented['image']
mask_flipped = augmented['mask']
visualize(image_flipped, mask_flipped, original_image=image, original_mask=mask)
sigmoid = lambda x: 1 / (1 + np.exp(-x))
def post_process(probability, threshold, min_size):
"""
Post processing of each predicted mask, components with lesser number of pixels
than `min_size` are ignored
"""
# don't remember where I saw it
mask = cv2.threshold(np.float32(probability), threshold, 1, cv2.THRESH_BINARY)[1]
num_component, component = cv2.connectedComponents(mask.astype(np.uint8))
predictions = np.zeros((350, 525), np.float32)
num = 0
for c in range(1, num_component):
p = (component == c)
if p.sum() > min_size:
predictions[p] = 1
num += 1
return predictions, num
def get_training_augmentation():
train_transform = [
albu.Blur(p=0.5),
albu.Flip(p=0.5),
albu.RandomBrightness(p=0.5),
albu.RandomContrast(p=0.5),
albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=0.5, border_mode=0),
albu.GridDistortion(p=0.5),
albu.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),
albu.Resize(*INPUT_SIZE)
]
return albu.Compose(train_transform)
def get_validation_augmentation():
"""Add paddings to make image shape divisible by 32"""
test_transform = [
albu.Resize(*INPUT_SIZE)
]
return albu.Compose(test_transform)
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
albu.Lambda(image=preprocessing_fn),
albu.Lambda(image=to_tensor, mask=to_tensor),
]
return albu.Compose(_transform)
def dice(img1, img2):
img1 = np.asarray(img1).astype(np.bool)
img2 = np.asarray(img2).astype(np.bool)
intersection = np.logical_and(img1, img2)
return 2. * intersection.sum() / (img1.sum() + img2.sum())
def loadModel(path, encoder):
print("loading %s from path '%s'"%(encoder,path))
model = smp.FPN(encoder_name=encoder, encoder_weights=None, classes=4, activation=None,)
model.load_state_dict(torch.load(path)['model_state_dict'])
return model
| 2.4375 | 2 |
setup.py | bhalevy/scylla-stress-orchestrator | 3 | 12766575 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='scyllaso',
version='0.1.dev3',
author='<NAME>',
author_email='<EMAIL>',
description='The Scylla Stress Orchestrator is Python 3 based framework for running various '
'benchmark tools including cassandra-stress and scylla-bench.',
long_description='The Scylla Stress Orchestrator is Python 3 based framework for running various '
'benchmark tools including cassandra-stress and scylla-bench.',
long_description_content_type='text/markdown',
url='https://github.com/scylladb/scylla-stress-orchestrator',
packages=find_packages(),
python_requires='>=3.7',
project_urls={
'Bug Tracker': 'https://github.com/scylladb/scylla-stress-orchestrator/issues',
},
license_files=('LICENSE.txt',),
classifiers=[
'Programming Language :: Python :: 3',
'Operating System :: POSIX :: Linux',
],
include_package_data=True,
entry_points={
'console_scripts': [
'make_key = scyllaso.bin.make_key:cli',
'scylla_monitoring_start = scyllaso.bin.scylla_monitoring_start:cli',
'scylla_monitoring_stop = scyllaso.bin.scylla_monitoring_stop:cli',
'make_cpu_config = scyllaso.bin.make_cpu_config:cli',
'kill_load_generators = scyllaso.bin.kill_load_generators:cli',
'generate_benchmark = scyllaso.bin.generate_benchmark:cli',
'flamegraph_cpu = scyllaso.bin.flamegraph_cpu:cli',
'provision_terraform = scyllaso.bin.provision_terraform:provision',
'unprovision_terraform = scyllaso.bin.provision_terraform:unprovision',
],
}
)
| 1.242188 | 1 |
trainQ.py | aaronworry/two-arms-climb | 6 | 12766576 | from kinematicEnv import KinematicEnv
from QL import QL
from upDDPG import DDPG as uDDPG
import tensorflow as tf
from bottomDDPG import DDPG as bDDPG
import numpy as np
env = KinematicEnv()
s_dim = env.state_dim
a_dim = env.action_dim
a_bound = env.action_bound
g1 = tf.Graph()
isess1 = tf.Session(graph=g1)
with g1.as_default():
isess1.run(tf.global_variables_initializer())
uddpg = uDDPG(a_dim, s_dim, a_bound)
uddpg.restore()
g2 = tf.Graph()
isess2 = tf.Session(graph=g2)
with g2.as_default():
isess2.run(tf.global_variables_initializer())
bddpg = bDDPG(a_dim, s_dim, a_bound)
bddpg.restore()
g3 = tf.Graph()
isess3 = tf.Session(graph=g3)
with g3.as_default():
isess3.run(tf.global_variables_initializer())
Q = QL(2, s_dim)
def initial():
tt = np.random.randint(0, 3)
if tt == 0:
s = env.initialUp()
elif tt == 1:
s = env.initialDown()
else:
s = env.initialOn()
return s
def train():
step = 0
for i_episode in range(6000):
s = initial()
j = 0
for i in range(300):
#env.render()
a0 = Q.choose_action(s)
if a0 == 0:
k = uddpg.choose_action(s)
s_, _, _ = env.stepUp(k)
else:
k = bddpg.choose_action(s)
s_, _, _ = env.stepDown(k)
#rewardReset
label1, label2, label3 = s[0], s[8], s[9] - s[1]
if -20.<label1<20. and -20.<label2<20.:
if label3 < 150.:
if a0 == 0: reward = 1
else: reward = -1
else:
if a0 == 0: reward = -1
else: reward = 1
elif -20.<label1<20. and abs(label2) >= 20.:
if a0 == 0: reward = 1
else: reward = -2
elif abs(label1) >= 20. and -20.<label2<20.:
if a0 == 0: reward = -2
else: reward = 1
Q.store_transition(s, a0, reward, s_)
if step > 300 and step % 50 == 0:
Q.learn()
step+=1
if reward == 1:
j += 1
if reward == -2 or i == 299:
print('Ep: %i | accuracy: %.2f | step: %i' % (i_episode, 1.*j/(i+1)*100, i))
break
with g3.as_default():
Q.save()
#多个计算图训练时,怎么分别存储模型
train() | 2.140625 | 2 |
internal/config.py | BhasherBEL/LDMv2 | 15 | 12766577 | <filename>internal/config.py
#!/usr/local/bin/python
# coding: utf-8
VERSION = '2.1.1'
# 0: CRITICAL, 1: INFO, 2: DEBUG
VERBOSE_LEVEL = 1
# 0: CONSOLE, 1: FILE, 2: CONSOLE & FILE
LOG_TYPE = 0
LOG_DIR = 'output'
HTML = False
DEBUG = True
| 1.421875 | 1 |
examples/cross.py | danzat/3dpp | 17 | 12766578 | from scene import *
from geometry import *
from observers import *
scene = Scene3D()
scene.line(Vector3(0, -1, 0), Vector3(0, 8, 0), style='->, dashed')
scene.line(Vector3(-2, 0, 0), Vector3(2, 0, 0), style='<->, dashed')
scene.line(Vector3(-2, 2, 0), Vector3(2, 2, 0), style='<->, dashed')
scene.line(Vector3(-2, 4, 0), Vector3(2, 4, 0), style='<->, dashed')
scene.line(Vector3(-2, 6, 0), Vector3(2, 6, 0), style='<->, dashed')
scenarios = [
(Vector3(0, 0.7, 0.3), Vector3(0, 0, 0), Vector3(0, 1, 0)),
(Vector3(0, 1.8, 0.5), Vector3(0, 2, 0), Vector3(0, 3, 0)),
(Vector3(0, 3.7, -0.5), Vector3(0, 4, 0), Vector3(0, 5, 0)),
(Vector3(0, 6.5, -0.2), Vector3(0, 6, 0), Vector3(0, 7, 0))
]
for r, a, b in scenarios:
scene.line(a, b, style='->, very thick')
n = 2 * ((r - a) ^ (b - a))
scene.line(a, r, style='->, very thick, blue')
scene.line(a, a + n, style='->, very thick, red')
scene.right_angle(a, b - a, n, 0.2)
with Scene2D(scene, YZ) as scene2d:
scene2d.polyline([(0.5, -1.5), (-7.5, -1.5), (-7.5, 1.5), (0.5, 1.5)], closed=True)
scene.render_latex(SphericalCamera(15, deg2rad(10), deg2rad(30), 4))
| 2.546875 | 3 |
pyfastrand/pcg32.py | Mukundan314/python-fastrand | 2 | 12766579 | <filename>pyfastrand/pcg32.py
import os
__all__ = ['pcg32', 'pcg32bounded', 'pcg32inc', 'pcg32_seed']
state = int.from_bytes(os.urandom(8), 'big')
inc = 0xda3e39cb94b95bdb
def pcg32():
"""Generate a random (32 bit) integer using PCG."""
global state
xorshifted = (((state >> 18) ^ state) >> 27) & 0xffffffff
rot = state >> 59
state = (state * 0x5851f42d4c957f2d + inc) & 0xffffffffffffffff
return (xorshifted >> rot) | ((xorshifted << (-rot & 31)) & 0xffffffff)
def pcg32bounded(bound):
"""Generate a random integer in the interval [0, bound] using PCG."""
random32bits = pcg32()
multiresult = random32bits * bound
if (multiresult & 0xffffffff) < bound:
threshold = ((bound - 1) ^ 0xffffffff) % bound
while (multiresult & 0xffffffff) < threshold:
random32bits = pcg32()
multiresult = random32bits * bound
return multiresult >> 32
def pcg32inc(new_inc):
"Change the increment parameter of the PCG generator."
global inc
inc = new_inc | 1
def pcg32_seed(cls, new_seed):
"Change the seed of thw PVG generator."
global seed
state = new_seed
| 2.875 | 3 |
test/gym_wrappers_test.py | Data-Science-in-Mechanical-Engineering/edge | 0 | 12766580 | import unittest
import warnings
import numpy as np
# import safety_gym
# from safety_gym.envs.engine import Engine
import gym
import gym.spaces as spaces
from gym.envs.registration import register
from edge.gym_wrappers import BoxWrapper, DiscreteWrapper, GymEnvironmentWrapper
from edge.agent import RandomAgent
class SpaceWrappers(unittest.TestCase):
def test_box_wrapper(self):
warnings.filterwarnings('ignore')
gb = spaces.Box(0, 1, (2,2))
eb = BoxWrapper(gb, (10,10,10,10))
eelem = eb.sample()
gelem = gb.sample()
self.assertEqual(eelem.shape, (4,))
self.assertEqual(eb.to_gym((2,3,4,5)).shape, gb.shape)
self.assertEqual(eb.from_gym(gelem).shape, (4,))
gb = spaces.Box(np.array([0,1]), np.array([2,3]))
eb = BoxWrapper(gb, (10, 10))
eelem = eb.sample()
gelem = gb.sample()
self.assertEqual(eelem.shape, (2,))
self.assertEqual(eb.to_gym((2,3)).shape, gb.shape)
self.assertEqual(eb.from_gym(gelem).shape, (2,))
gb = spaces.Box(-np.inf, np.inf, (1,))
eb = BoxWrapper(gb, (10, ), inf_ceiling=5)
for t in range(100):
eelem = eb.sample()
self.assertTrue(np.abs(eelem)[0] <= 5)
self.assertTrue(eelem in eb)
def test_discrete_wrapper(self):
gd = spaces.Discrete(10)
ed = DiscreteWrapper(gd)
g = gd.sample()
e = ed.sample()
self.assertEqual(ed.to_gym(e), int(e))
self.assertEqual(ed.from_gym(g), g)
# class SafetyGymEnvironmentWrappers(unittest.TestCase):
# def test_safety_gym_environment_creation(self):
# senv = gym.make('Safexp-PointGoal1-v0')
# env = GymEnvironmentWrapper(senv)
#
# config = {
# 'robot_base': 'xmls/car.xml',
# 'task': 'push',
# 'observe_goal_lidar': True,
# 'observe_box_lidar': True,
# 'observe_hazards': True,
# 'observe_vases': True,
# 'constrain_hazards': True,
# 'lidar_max_dist': 3,
# 'lidar_num_bins': 16,
# 'hazards_num': 4,
# 'vases_num': 4
# }
#
# senv = Engine(config)
# register(id='SafexpTestEnvironment-v0',
# entry_point='safety_gym.envs.mujoco:Engine',
# kwargs={'config': config})
# env = GymEnvironmentWrapper(senv, failure_critical=True)
#
# def test_safety_gym_random_agent(self):
# senv = gym.make('Safexp-PointGoal1-v0')
# env = GymEnvironmentWrapper(senv)
# random_agent = RandomAgent(env)
#
# ep_ret, ep_cost = 0, 0
# for t in range(1000):
# new_state, reward, failed = random_agent.step()
# ep_ret += reward
# ep_cost += env.info.get('cost', 0)
# env.gym_env.render()
# if env.done:
# print('Episode Return: %.3f \t Episode Cost: %.3f' % (ep_ret, ep_cost))
# ep_ret, ep_cost = 0, 0
# random_agent.reset()
class GymEnvironmentWrappers(unittest.TestCase):
def test_gym_environment_creation(self):
gymenv = gym.make('LunarLander-v2')
env = GymEnvironmentWrapper(gymenv)
env = GymEnvironmentWrapper(gymenv, failure_critical=True)
self.assertTrue(True)
def test_gym_random_agent(self):
gymenv = gym.make('LunarLander-v2')
env = GymEnvironmentWrapper(gymenv)
random_agent = RandomAgent(env)
ep_ret, ep_cost = 0, 0
for t in range(100):
new_state, reward, failed, _ = random_agent.step()
ep_ret += reward
ep_cost += env.info.get('cost', 0)
# env.gym_env.render()
if env.done:
print('Episode Return: %.3f \t Episode Cost: %.3f' % (
ep_ret, ep_cost))
ep_ret, ep_cost = 0, 0
random_agent.reset()
def test_gym_control_frequency(self):
gymenv = gym.make('CartPole-v1')
env = GymEnvironmentWrapper(gymenv, control_frequency=2)
random_agent = RandomAgent(env)
ep_ret, ep_cost = 0, 0
for t in range(100):
new_state, reward, failed, _ = random_agent.step()
ep_ret += reward
ep_cost += env.info.get('cost', 0)
# env.gym_env.render()
if env.done:
print('Episode Return: %.3f \t Episode Cost: %.3f' % (ep_ret, ep_cost))
ep_ret, ep_cost = 0, 0
random_agent.reset()
if __name__ == '__main__':
unittest.main()
| 2.265625 | 2 |
djvideomem/payment/views.py | mmg-3/dj-video-membership | 26 | 12766581 | from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib import messages
from django.http import JsonResponse, HttpResponse
from django.views import generic
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404, redirect, render
from rest_framework.views import APIView
from rest_framework.response import Response
import stripe
from djvideomem.content.models import Pricing
User = get_user_model()
stripe.api_key = settings.STRIPE_SECRET_KEY
@csrf_exempt
def webhook(request):
# You can use webhooks to receive information about asynchronous payment events.
# For more about our webhook events check out https://stripe.com/docs/webhooks.
webhook_secret = settings.STRIPE_WEBHOOK_SECRET
payload = request.body
# Retrieve the event by verifying the signature using the raw body and secret if webhook signing is configured.
signature = request.META["HTTP_STRIPE_SIGNATURE"]
try:
event = stripe.Webhook.construct_event(
payload=payload, sig_header=signature, secret=webhook_secret)
data = event['data']
except Exception as e:
return e
# Get the type of webhook event sent - used to check the status of PaymentIntents.
event_type = event['type']
data_object = data['object']
if event_type == 'invoice.paid':
# Used to provision services after the trial has ended.
# The status of the invoice will show up as paid. Store the status in your
# database to reference when a user accesses your service to avoid hitting rate
# limits.
# TODO: change the users subscription and pricing
webhook_object = data["object"]
stripe_customer_id = webhook_object["customer"]
stripe_sub = stripe.Subscription.retrieve(webhook_object["subscription"])
stripe_price_id = stripe_sub["plan"]["id"]
pricing = Pricing.objects.get(stripe_price_id=stripe_price_id)
user = User.objects.get(stripe_customer_id=stripe_customer_id)
user.subscription.status = stripe_sub["status"]
user.subscription.stripe_subscription_id = webhook_object["subscription"]
user.subscription.pricing = pricing
user.subscription.save()
if event_type == 'invoice.finalized':
# If you want to manually send out invoices to your customers
# or store them locally to reference to avoid hitting Stripe rate limits.
print(data)
if event_type == 'customer.subscription.deleted':
# handle subscription cancelled automatically based
# upon your subscription settings. Or if the user cancels it.
webhook_object = data["object"]
stripe_customer_id = webhook_object["customer"]
stripe_sub = stripe.Subscription.retrieve(webhook_object["id"])
user = User.objects.get(stripe_customer_id=stripe_customer_id)
user.subscription.status = stripe_sub["status"]
user.subscription.save()
if event_type == 'customer.subscription.trial_will_end':
# Send notification to your user that the trial will end
print(data)
if event_type == 'customer.subscription.updated':
print(data)
return HttpResponse()
class EnrollView(generic.TemplateView):
template_name = "payment/enroll.html"
def PaymentView(request, slug):
subscription = request.user.subscription
pricing = get_object_or_404(Pricing, slug=slug)
if subscription.pricing == pricing and subscription.is_active:
messages.info(request, "You are already enrolled for this package")
return redirect("payment:enroll")
context = {
"pricing_tier": pricing,
"STRIPE_PUBLIC_KEY": settings.STRIPE_PUBLIC_KEY
}
if subscription.is_active and subscription.pricing.stripe_price_id != "django-free-trial":
return render(request, "payment/change.html", context)
return render(request, "payment/checkout.html", context)
class CreateSubscriptionView(APIView):
def post(self, request, *args, **kwargs):
data = request.data
customer_id = request.user.stripe_customer_id
try:
# Attach the payment method to the customer
stripe.PaymentMethod.attach(
data['paymentMethodId'],
customer=customer_id,
)
# Set the default payment method on the customer
stripe.Customer.modify(
customer_id,
invoice_settings={
'default_payment_method': data['paymentMethodId'],
},
)
# Create the subscription
subscription = stripe.Subscription.create(
customer=customer_id,
items=[{'price': data["priceId"]}],
expand=['latest_invoice.payment_intent'],
)
data = {}
data.update(subscription)
return Response(data)
except Exception as e:
return Response({
"error": {'message': str(e)}
})
class RetryInvoiceView(APIView):
def post(self, request, *args, **kwargs):
data = request.data
customer_id = request.user.stripe_customer_id
try:
stripe.PaymentMethod.attach(
data['paymentMethodId'],
customer=customer_id,
)
# Set the default payment method on the customer
stripe.Customer.modify(
customer_id,
invoice_settings={
'default_payment_method': data['paymentMethodId'],
},
)
invoice = stripe.Invoice.retrieve(
data['invoiceId'],
expand=['payment_intent'],
)
data = {}
data.update(invoice)
return Response(data)
except Exception as e:
return Response({
"error": {'message': str(e)}
})
class ChangeSubscriptionView(APIView):
def post(self, request, *args, **kwargs):
print(request.data)
subscription_id = request.user.subscription.stripe_subscription_id
subscription = stripe.Subscription.retrieve(subscription_id)
try:
updatedSubscription = stripe.Subscription.modify(
subscription_id,
cancel_at_period_end=False,
items=[{
'id': subscription['items']['data'][0].id,
'price': request.data["priceId"],
}],
proration_behavior="always_invoice"
)
data = {}
data.update(updatedSubscription)
return Response(data)
except Exception as e:
return Response({
"error": {'message': str(e)}
})
| 2.046875 | 2 |
cellphonedb/src/api_endpoints/terminal_api/database_terminal_api_endpoints/database_terminal_commands.py | BioTuring-Notebooks/CellphoneDB | 35 | 12766582 | import os
from datetime import datetime
from typing import Optional
import click
from click import Context
from cellphonedb.src.api_endpoints.terminal_api.tools_terminal_api_endpoints.tools_terminal_commands import \
generate_proteins, generate_complex, _set_paths, generate_interactions, filter_all, generate_genes
from cellphonedb.src.app.cellphonedb_app import output_dir
from cellphonedb.src.database.manager import DatabaseVersionManager
from cellphonedb.src.database.manager.DatabaseVersionManager import collect_database
from cellphonedb.utils.utils import set_paths
@click.command("collect")
@click.option('--database', default='cellphone_custom_{}.db'.format(datetime.now().strftime("%Y-%m-%d-%H_%M")),
help='output file name [cellphone_custom_<current date_time>.db]')
@click.option('--result-path', default='', help='output folder for the collected database')
def collect(database, result_path):
output_path = set_paths(output_dir, result_path)
DatabaseVersionManager.collect_database(database, output_path)
@click.command("download")
@click.option('--version', type=str, default='latest')
def download(version: str):
DatabaseVersionManager.download_database(version)
@click.command("list_remote")
def list_remote():
DatabaseVersionManager.list_remote_database_versions()
@click.command("list_local")
def list_local():
DatabaseVersionManager.list_local_database_versions()
@click.command("generate")
@click.option('--user-protein', type=click.Path(file_okay=True, exists=True, dir_okay=False))
@click.option('--user-gene', type=click.Path(file_okay=True, exists=True, dir_okay=False))
@click.option('--user-complex', type=click.Path(file_okay=True, exists=True, dir_okay=False))
@click.option('--user-interactions', type=click.Path(file_okay=True, exists=True, dir_okay=False))
@click.option('--user-interactions-only', is_flag=True)
@click.option('--fetch', is_flag=True)
@click.option('--result-path', type=str, default=None)
@click.option('--log-file', type=str, default='log.txt')
@click.option('--project-name', type=str, default=None)
@click.pass_context
def generate(ctx: Context,
user_protein: Optional[str],
user_gene: Optional[str],
user_complex: Optional[str],
user_interactions: Optional[str],
user_interactions_only: Optional[str],
fetch: bool,
result_path: Optional[str],
log_file: str,
project_name: str
):
ctx.invoke(generate_proteins,
user_protein=user_protein,
fetch_uniprot=fetch,
result_path=result_path,
log_file=log_file,
project_name=project_name
)
ctx.invoke(generate_genes,
user_gene=user_gene,
fetch_uniprot=fetch,
fetch_ensembl=fetch,
result_path=result_path,
project_name=project_name
)
ctx.invoke(generate_complex,
user_complex=user_complex,
result_path=result_path,
log_file=log_file,
project_name=project_name
)
output_path = _set_paths(result_path, project_name)
proteins_file = os.path.join(output_path, 'protein_generated.csv')
genes_file = os.path.join(output_path, 'gene_generated.csv')
complex_file = os.path.join(output_path, 'complex_generated.csv')
ctx.invoke(generate_interactions,
proteins=proteins_file,
genes=genes_file,
complex=complex_file,
user_interactions=user_interactions,
user_interactions_only=user_interactions_only,
result_path=result_path,
fetch_imex=fetch,
fetch_iuphar=fetch,
project_name=project_name
)
ctx.invoke(filter_all, input_path=output_path, result_path=result_path)
db_name = 'cellphonedb_user_{}.db'.format(datetime.now().strftime("%Y-%m-%d-%H_%M"))
collect_database(db_name, output_path,
protein_filename='protein_input.csv',
gene_filename='gene_input.csv',
complex_filename='complex_input.csv',
interaction_filename='interaction_input.csv',
data_path=output_path)
@click.command("collect_generated")
@click.argument('path', type=str)
@click.option('--result-path', type=str, default=None)
@click.option('--project-name', type=str, default=None)
def collect_generated(path: str, result_path: Optional[str], project_name: str):
db_name = 'cellphonedb_user_{}.db'.format(datetime.now().strftime("%Y-%m-%d-%H_%M"))
output_path = _set_paths(result_path, project_name)
collect_database(db_name,
output_path,
protein_filename='{}/protein_input.csv'.format(path),
gene_filename='{}/gene_input.csv'.format(path),
complex_filename='{}/complex_input.csv'.format(path),
interaction_filename='{}/interaction_input.csv'.format(path),
data_path=output_path)
| 2.125 | 2 |
OOP/classes.py | AndreiHustiuc/IT_Factory_Course | 0 | 12766583 | <filename>OOP/classes.py
class Human:
def __init__(self, name, age):
self.name = name
self.age = age
def greeting(self):
print(f"Hello {self.name}!")
def get_name(self):
return self.name
def get_age(self):
return self.age
def set_age(self, age):
self.age = age
h = Human("Andrei", 31)
h.greeting()
print(h.get_name())
h.set_age(48)
print(h.get_age())
| 3.640625 | 4 |
ormar/decorators/__init__.py | paolodina/ormar | 0 | 12766584 | <filename>ormar/decorators/__init__.py
"""
Module with all decorators that are exposed for users.
Currently only:
* property_field - exposing @property like function as field in Model.dict()
* predefined signals decorators (pre/post + save/update/delete)
"""
from ormar.decorators.property_field import property_field
from ormar.decorators.signals import (
post_delete,
post_save,
post_update,
pre_delete,
pre_save,
pre_update,
)
__all__ = [
"property_field",
"post_delete",
"post_save",
"post_update",
"pre_delete",
"pre_save",
"pre_update",
]
| 2.25 | 2 |
olive/scripts/calibration/__init__.py | liuyenting/olive-core | 0 | 12766585 | <filename>olive/scripts/calibration/__init__.py
from .aotf import *
| 1.1875 | 1 |
python/238_product_of_array_except_self.py | liaison/LeetCode | 17 | 12766586 |
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
num_len = len(nums)
prefix_products, postfix_products = [1] * (num_len + 1), [1] * (num_len + 1)
prefix_product, postfix_product = 1, 1
for prefix_index in range(num_len):
prefix_product *= nums[prefix_index]
prefix_products[prefix_index+1] = prefix_product
postfix_index = num_len - 1 - prefix_index
postfix_product *= nums[postfix_index]
postfix_products[postfix_index] = postfix_product
infix_products = []
for index in range(num_len):
infix_products.append(prefix_products[index] * postfix_products[index+1])
return infix_products
| 3.03125 | 3 |
GestureDetection/GestureDetecttion.py | sainikhilgoud10/ComputerVision-ImageProcessing | 0 | 12766587 | import cv2
import numpy as np
import math
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
ret, img = cap.read()
cv2.rectangle(img,(300,300),(100,100),(0,255,0),0)
crop_img = img[100:300,100:300]
grey = cv2.cvtColor(crop_img,cv2.COLOR_BGR2GRAY)
value = (35,35)
blurred = cv2.GaussianBlur(grey,value,0)
_,thresh1 = cv2.threshold(blurred,127,255,
cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow('Thresholded',thresh1)
_,contours,hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE,
cv2.CHAIN_APPROX_NONE)
max_area = -1
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if(area>max_area):
max_area = area
ci = i
cnt = contours[ci]
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(crop_img,(x,y),(x+w,y+h),(0,0,255),0)
hull = cv2.convexHull(cnt)
drawing = np.zeros(crop_img.shape,np.uint8)
cv2.drawContours(drawing,[cnt],0,(0,255,0),0)
cv2.drawContours(drawing,[hull],0,(0,0,255),0)
try:
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
count_defects = 0
cv2.drawContours(thresh1,contours,-1,(0,255,0),3)
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
if angle <= 90:
count_defects += 1
cv2.circle(crop_img,far,1,[0,0,255],-1)
cv2.line(crop_img,start,end,[0,255,0],2)
except:
print "error"
if count_defects == 1:
cv2.putText(img,"this is one",(50,50),cv2.FONT_HERSHEY_SIMPLEX,2,2)
elif count_defects == 2:
cv2.putText(img,"this is two",(50,50),cv2.FONT_HERSHEY_SIMPLEX,2,2)
elif count_defects == 3:
cv2.putText(img,"this is three",(50,50),cv2.FONT_HERSHEY_SIMPLEX,2,2)
elif count_defects == 4:
cv2.putText(img,"this is four",(50,50),cv2.FONT_HERSHEY_SIMPLEX,2,2)
else:
cv2.putText(img,"this is five",(50,50),cv2.FONT_HERSHEY_SIMPLEX,2,2)
cv2.imshow('Gesture',img)
all_img = np.hstack((drawing,crop_img))
cv2.imshow('contours',all_img)
k = cv2.waitKey(10)
if k== 27:
break
cap.release()
cv2.destroyAllWindows()
| 2.734375 | 3 |
src/zope/keyreference/testing.py | zopefoundation/zope.keyreference | 1 | 12766588 | <filename>src/zope/keyreference/testing.py<gh_stars>1-10
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Testing components.
"""
import zope.interface
import zope.component
import zope.keyreference.interfaces
@zope.component.adapter(zope.interface.Interface)
@zope.interface.implementer(zope.keyreference.interfaces.IKeyReference)
class SimpleKeyReference(object):
"""An IReference for all objects. This implementation is *not*
ZODB safe.
"""
key_type_id = 'zope.app.keyreference.simple'
def __init__(self, object):
self.object = object
def __call__(self):
return self.object
def __hash__(self):
return hash(self.object)
def _get_cmp_keys(self, other):
if self.key_type_id == other.key_type_id:
return hash(self.object), hash(other)
return self.key_type_id, other.key_type_id
def __eq__(self, other):
a, b = self._get_cmp_keys(other)
return a == b
def __lt__(self, other):
a, b = self._get_cmp_keys(other)
return a < b
def __ne__(self, other):
a, b = self._get_cmp_keys(other)
return a != b
def __gt__(self, other):
a, b = self._get_cmp_keys(other)
return a > b
def __le__(self, other):
a, b = self._get_cmp_keys(other)
return a <= b
def __ge__(self, other):
a, b = self._get_cmp_keys(other)
return a >= b
| 2.03125 | 2 |
laser_escape/laser_escape.py | andrewzwicky/Laser-Escape | 0 | 12766589 | <gh_stars>0
#!/usr/bin/python
import csv
import datetime
import os
import time
from enum import Enum
import threading
import RPi.GPIO as GPIO
from Adafruit_CharLCD import Adafruit_CharLCDPlate
from getch import getch
from gpiozero import LightSensor
# Pins
from laser_beam_detection import laser_beam_penalties, LASER_BREAK_BOUNCE_TIME
BUZZER_PIN = 22
LDR_PINS = [18, 24, 12, 19, 5, 16, 23, 26, 13]
TIMER_BUTTON_PIN = 20
NAME_ENTRY_BUTTON_PIN = 21
LDR_WIRE_COLORS = ['WHITE', 'BROWN', 'GRAY', 'GREEN', 'RED', 'YELLOW', 'PURPLE', 'BLUE', 'ORANGE']
LDR_COLOR_DICT = {pin: color for pin, color in zip(LDR_PINS, LDR_WIRE_COLORS)}
"""
Wiring:
LDR - GPIO (LDR_PIN) to 3.3V (direction doesn't matter)
CAPACITOR - GPIO (LDR_PIN) (long leg), GND (short leg)
BUZZER - GPIO (OUTPUT_PIN) (long leg), GND (short leg)
"""
# LCD Colors
RED = (1, 0, 0)
GREEN = (0, 1, 0)
BLUE = (0, 0, 1)
WHITE = (1, 1, 1)
PURPLE = (1, 0, 1)
YELLOW = (1, 1, 0)
RECORD_COLORS = [RED, GREEN, BLUE, PURPLE]
RECORD_TIME_COLOR_DWELL = 0.5 #seconds
# LCD Positions
START_TOP_ROW = (0, 0)
START_BOTTOM_ROW = (0, 1)
# Thresholds & Timers
TRIP_TIME_PENALTY = 5
LDR_QUERY_DELAY = 0.005
STARTUP_DELAY = 0.4
RESULTS_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'times.csv')
class ProgramState(Enum):
IDLE = 0
NAME_ENTRY = 1
READY_TO_GO = 2
TIMING = 3
JUST_FINISHED = 4
NEW_RECORD = 5
TIMER_BUTTON_PRESSED = False
NAME_BUTTON_PRESSED = False
def name_entry_press_loop(_):
global NAME_BUTTON_PRESSED
NAME_BUTTON_PRESSED = True
def timer_button_press_loop(_):
global TIMER_BUTTON_PRESSED
TIMER_BUTTON_PRESSED = True
def setup():
GPIO.setmode(GPIO.BCM)
GPIO.setup(TIMER_BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(NAME_ENTRY_BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BUZZER_PIN, GPIO.OUT)
GPIO.add_event_detect(TIMER_BUTTON_PIN,
GPIO.BOTH,
callback=timer_button_press_loop,
bouncetime=250)
GPIO.add_event_detect(NAME_ENTRY_BUTTON_PIN,
GPIO.FALLING,
callback=name_entry_press_loop,
bouncetime=250)
light_sensors = [LightSensor(pin) for pin in LDR_PINS]
return light_sensors
def get_best_record():
try:
with open(RESULTS_FILE, 'r') as times_file:
run_reader = csv.reader(times_file)
times = [row[2] for row in run_reader] # third item should be time
return float(min(times)) if times else None
except FileNotFoundError:
return None
def high_level_loop():
try:
# logic is done in a thread so that
# the buttons can be added on interrupts
threading.Thread(target=logic_loop).start()
while True:
time.sleep(100)
finally:
GPIO.cleanup()
def name_entry(lcd):
lcd.clear()
lcd.set_color(*WHITE)
lcd.message("NAME?")
lcd.set_cursor(*START_BOTTOM_ROW)
runner_name = ''
while True:
last_key = getch()
if last_key == '\r' or last_key == '\x03':
break
if last_key == '\x7f':
cursor_spot = max(len(runner_name) - 1, 0)
lcd.set_cursor(cursor_spot, 1)
lcd.message(' ')
lcd.set_cursor(cursor_spot, 1)
if runner_name:
runner_name = runner_name[:-1]
else:
lcd.message(last_key)
runner_name += last_key
return runner_name
def set_name_and_time(lcd, color, runner_name, display_time):
lcd.clear()
lcd.set_color(*color)
lcd.set_cursor(*START_TOP_ROW)
lcd.message(format_time(display_time))
lcd.set_cursor(*START_BOTTOM_ROW)
lcd.message(runner_name)
def logic_loop():
global TIMER_BUTTON_PRESSED
global NAME_BUTTON_PRESSED
light_sensors = setup()
lcd = Adafruit_CharLCDPlate()
program_state = ProgramState.IDLE
next_state = ProgramState.IDLE
previous_state = None
runner_name = ''
start_time = None
raw_duration = 0
duration = 0
penalties = 0
laser_times = [0 for _ in range(len(light_sensors))]
while True:
if program_state != previous_state:
print("{0}->{1}".format(previous_state, program_state))
if program_state == ProgramState.IDLE:
if previous_state != ProgramState.IDLE:
lcd.set_color(*WHITE)
lcd.set_cursor(*START_TOP_ROW)
lcd.message("READY FOR")
lcd.set_cursor(*START_BOTTOM_ROW)
lcd.message("FIRST RUNNER!")
if NAME_BUTTON_PRESSED:
next_state = ProgramState.NAME_ENTRY
elif program_state == ProgramState.NAME_ENTRY:
runner_name = name_entry(lcd)
next_state = ProgramState.READY_TO_GO
elif program_state == ProgramState.READY_TO_GO:
if previous_state != ProgramState.READY_TO_GO:
record = get_best_record()
new_record = False
set_name_and_time(lcd, YELLOW, runner_name, 0)
# start executing lasers early, so there's not a big time penalty at the beginning
beams_broken, penalties, laser_times = laser_beam_penalties(laser_times,
light_sensors,
penalties,
time.time())
# This is important to make sure the LDRs don't get messed up
time.sleep(LDR_QUERY_DELAY)
if TIMER_BUTTON_PRESSED:
next_state = ProgramState.TIMING
elif program_state == ProgramState.TIMING:
if previous_state != ProgramState.TIMING:
lcd.set_color(*GREEN)
start_time = time.time()
laser_times = [start_time - 2 * LASER_BREAK_BOUNCE_TIME for _ in
range(len(light_sensors))]
penalties = 0
current_time = time.time()
beams_broken, penalties, laser_times = laser_beam_penalties(laser_times,
light_sensors,
penalties,
time.time())
if any(beams_broken) and (current_time - start_time) >= STARTUP_DELAY:
lcd.set_color(*RED)
GPIO.output(BUZZER_PIN, True)
else:
lcd.set_color(*GREEN)
GPIO.output(BUZZER_PIN, False)
raw_duration = current_time - start_time
duration = raw_duration + (penalties * TRIP_TIME_PENALTY)
lcd.set_cursor(*START_TOP_ROW)
lcd.message(format_time(duration))
if TIMER_BUTTON_PRESSED:
next_state = ProgramState.JUST_FINISHED
else:
time.sleep(LDR_QUERY_DELAY)
elif program_state == ProgramState.JUST_FINISHED:
if previous_state != ProgramState.JUST_FINISHED:
set_name_and_time(lcd, WHITE, runner_name, duration)
write_attempt_to_file(runner_name,
duration,
raw_duration,
penalties,
TRIP_TIME_PENALTY)
if record is None or (duration < record):
new_record = True
color_index = 0
record_color_time_swap = time.time()
if new_record and (record_color_time_swap < time.time()):
lcd.set_color(*RECORD_COLORS[color_index])
record_color_time_swap = time.time() + RECORD_TIME_COLOR_DWELL
color_index = (color_index + 1) % len(RECORD_COLORS)
if NAME_BUTTON_PRESSED:
next_state = ProgramState.NAME_ENTRY
previous_state = program_state
program_state = next_state
TIMER_BUTTON_PRESSED = False
NAME_BUTTON_PRESSED = False
def format_time(duration: float):
return str(datetime.timedelta(seconds=duration))[2:9]
def write_attempt_to_file(*items):
with open(RESULTS_FILE, 'a') as times_file:
run_writer = csv.writer(times_file)
row_to_write = [datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")]
for item in items:
row_to_write.append(item)
run_writer.writerow(row_to_write)
if __name__ == "__main__":
high_level_loop()
| 2.5 | 2 |
PK.py | pablovin/P-AffMem | 7 | 12766590 | """
Implementation of the model.
Parts of the code are inherited from the official CAAE implementation (https://arxiv.org/abs/1702.08423, https://github.com/ZZUTK/Face-Aging-CAAE).
"""
import os
import sys
import time
from glob import glob
import numpy as np
import tensorflow as tf
from scipy.io import loadmat, savemat
from PK_Utils.PK_config import *
from PK_Utils.PK_image_ops import *
from PK_Utils.PK_subnetworks import encoder, generator, d_img, d_prior, d_em
from PK_Utils.PK_vgg_face import face_embedding
from metrics import concordance_cc
class Model(object):
"""
Implementation of the model used.
"""
def __init__(self, session, useEmotion=False):
self.useEmotion = useEmotion
self.session = session
self.vgg_weights = loadmat(vggMat)
# -- INPUT PLACEHOLDERS -----------------------------------------------------------
# ---------------------------------------------------------------------------------
self.input_image = tf.compat.v1.placeholder(
tf.float32,
[size_batch, size_image, size_image, 3],
name='input_images'
)
self.valence = tf.compat.v1.placeholder(
tf.float32,
[size_batch, 1],
name='valence_labels'
)
self.arousal = tf.compat.v1.placeholder(
tf.float32,
[size_batch, 1],
name='arousal_labels'
)
self.z_prior = tf.compat.v1.placeholder(
tf.float32,
[size_batch, num_z_channels],
name='z_prior'
)
# -- GRAPH ------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
print ('\n\t SETTING UP THE GRAPH')
with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope()):
# with tf.device('/device:GPU:0'):
with tf.device(device):
# -- NETWORKS -------------------------------------------------------------
# -------------------------------------------------------------------------
# encoder:
self.z = encoder(self.input_image)
# generator: z + arousal + valence --> generated image
self.G = generator(self.z,
valence=self.valence,
arousal=self.arousal)
# Discriminator Z
self.Dz, self.Dz_logits = d_prior(self.z) # Discriminator_Z on encoded image
self.Dz_prior, self.Dz_prior_logits = d_prior(self.z_prior,
reuse_variables=True) # Discriminator_Z on prior image
#Discriminator Image
self.Dimg_G, self.Dimg_G_logits = d_img(self.G,
valence=self.valence,
arousal=self.arousal) # discriminator on Generated
# discriminator on input image
self.Dimg_Original, self.Dimg_Original_logits = d_img(self.input_image,
valence=self.valence,
arousal=self.arousal,
reuse_variables=True) # discriminator on original image
# # discriminator on arousal/valence
# #
if self.useEmotion:
self.D_emArousal, self.D_emValence, self.D_em_arousal_logits, self.D_em_valence_logits = d_em(self.z, reuse_variables=True)
# self.D_emArousal_G, self.D_emValence_G, self.D_em_arousal_logits_G, self.D_em_valence_logits_G = d_em(self.G, reuse_variables=True)
# -- LOSSES ---------------------------------------------------------------
# -------------------------------------------------------------------------
# ---- VGG LOSS ---------------------------------------------------------
# The computation of this loss is inherited from the official ExprGan implementation (https://arxiv.org/abs/1709.03842, https://github.com/HuiDingUMD/ExprGAN).
real_conv1_2, real_conv2_2, real_conv3_2, real_conv4_2, real_conv5_2 = face_embedding(self.vgg_weights, self.input_image)
fake_conv1_2, fake_conv2_2, fake_conv3_2, fake_conv4_2, fake_conv5_2 = face_embedding(self.vgg_weights, self.G)
conv1_2_loss = tf.reduce_mean(tf.abs(real_conv1_2 - fake_conv1_2)) / 224. / 224.
conv2_2_loss = tf.reduce_mean(tf.abs(real_conv2_2 - fake_conv2_2)) / 112. / 112.
conv3_2_loss = tf.reduce_mean(tf.abs(real_conv3_2 - fake_conv3_2)) / 56. / 56.
conv4_2_loss = tf.reduce_mean(tf.abs(real_conv4_2 - fake_conv4_2)) / 28. / 28.
conv5_2_loss = tf.reduce_mean(tf.abs(real_conv5_2 - fake_conv5_2)) / 14. / 14.
# -----------------------------------------------------------------------
# loss function of discriminator on z
self.D_z_loss_z = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits= self.Dz_logits, labels=tf.ones_like(self.Dz_logits))
)
self.D_z_loss_prior = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=1 - self.Dz_prior_logits,
labels=tf.zeros_like(self.Dz_prior_logits))
)
# self.E_z_loss = tf.reduce_mean(
# tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_z_logits, labels=tf.ones_like(self.D_z_logits))
# )
# loss function of discriminator on image
self.D_img_loss_input = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.Dimg_Original_logits, labels=tf.ones_like(self.Dimg_Original_logits))
)
self.D_img_loss_G = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=1 - self.Dimg_G_logits, labels=tf.zeros_like(self.Dimg_G_logits))
)
if self.useEmotion:
# loss function of discriminator on emotion
self.D_em_arousal_loss = tf.compat.v1.losses.mean_squared_error(predictions=self.D_em_arousal_logits, labels=self.arousal)
self.D_em_valence_loss = tf.compat.v1.losses.mean_squared_error(predictions=self.D_em_valence_logits,
labels=self.valence)
# self.G_img_loss = tf.reduce_mean(
# tf.nn.sigmoid_cross_entropy_with_logits(logits=self.Dimg_G_logits, labels=tf.ones_like(self.Dimg_G_logits))
# )
# # loss function of d_em on arousal and valence values
# self.D_em_loss = tf.compat.v1.losses.mean_squared_error(predictions=self.D_em_valence_logits, labels=self.valence) + tf.compat.v1.losses.mean_squared_error(self.D_em_arousal_logits, self.arousal)
#
#
# #CCC for arousal and valence
# self.D_em_ccc_arousal = concordance_cc(self.D_em_arousal_logits, self.arousal)
# self.D_em_ccc_valence = concordance_cc(self.D_em_valence_logits, self.valence)
# ---------------------------------------------------------------------------------
# Losses
# ---------------------------------------------------------------------------------
# reconstruction loss of encoder+generator
# self.loss_rec = tf.reduce_mean(tf.abs(self.input_image - self.G)) # L1 loss
self.loss_rec = tf.reduce_mean(tf.abs(self.input_image - self.G)) # L1 loss
self.loss_Iden = conv1_2_loss + conv2_2_loss + conv3_2_loss + conv4_2_loss + conv5_2_loss
self.loss_Lz = self.D_z_loss_prior + self.D_z_loss_z
self.loss_Di = self.D_img_loss_input + self.D_img_loss_G
if self.useEmotion:
self.loss_Dem = self.D_em_arousal_loss + self.D_em_valence_loss
self.loss_Total = self.loss_rec + self.loss_Iden * 0.3 + self.loss_Lz * 0.01 + self.loss_Di * 0.01 + self.loss_Dem*0.001
else:
self.loss_Total = self.loss_rec + self.loss_Iden * 0.3 + self.loss_Lz * 0.01 + self.loss_Di * 0.01
# self.loss_EG = self.EG_loss + self.D_em_loss * 0.02 + self.vgg_loss * 0.3 + 0.01 * self.E_z_loss + 0.01 * self.G_img_loss
# -- TRAINABLE VARIABLES ----------------------------------------------------------
# ---------------------------------------------------------------------------------
trainable_variables =tf.compat.v1.trainable_variables()
# variables of encoder
self.E_variables = [var for var in trainable_variables if 'E_' in var.name]
# variables of generator
self.G_variables = [var for var in trainable_variables if 'G_' in var.name]
# variables of discriminator on prior
self.D_z_variables = [var for var in trainable_variables if 'D_prior_' in var.name]
# variables of discriminator on realImage
self.D_img_variables = [var for var in trainable_variables if 'D_img_' in var.name]
# # variables of discriminator on emotions
# self.D_em_variables = [var for var in trainable_variables if 'D_em_' in var.name]
# -- SUMMARY ----------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# with tf.device('/device:CPU:0'):
# self.z_summary = tf.compat.v1.summary.histogram('z', self.z)
# self.z_prior_summary = tf.compat.v1.summary.histogram('z_prior', self.z_prior)
# self.EG_loss_summary = tf.summary.scalar('EG_loss', self.EG_loss)
# self.D_z_loss_z_summary = tf.summary.scalar('D_z_loss_z', self.D_z_loss_z)
# self.D_z_loss_prior_summary = tf.summary.scalar('D_z_loss_prior', self.D_z_loss_prior)
# self.E_z_loss_summary = tf.summary.scalar('E_z_loss', self.E_z_loss)
# self.D_z_logits_summary = tf.compat.v1.summary.histogram('D_z_logits', self.D_z_logits)
# self.D_z_prior_logits_summary = tf.compat.v1.summary.histogram('D_z_prior_logits', self.D_z_prior_logits)
# self.D_img_loss_input_summary = tf.summary.scalar('D_img_loss_input', self.D_img_loss_input)
# self.D_img_loss_G_summary = tf.summary.scalar('D_img_loss_G', self.D_img_loss_G)
# self.G_img_loss_summary = tf.summary.scalar('G_img_loss', self.G_img_loss)
# self.D_G_logits_summary = tf.compat.v1.summary.histogram('D_G_logits', self.D_G_logits)
# self.D_input_logits_summary = tf.compat.v1.summary.histogram('D_input_logits', self.D_input_logits)
# self.D_em_arousal_logits_summary = tf.compat.v1.summary.histogram('D_em_arousal_logits', self.D_em_arousal_logits)
# self.D_em_valence_logits_summary = tf.compat.v1.summary.histogram('D_em_valence_logits',
# self.D_em_valence_logits)
# self.D_em_loss_summary = tf.compat.v1.summary.histogram('D_em_loss', self.D_em_loss)
# self.D_em_ccc_arousal_summary = tf.compat.v1.summary.histogram('D_em_ccc_arousal', self.D_em_ccc_arousal)
# self.D_em_ccc_valence_summary = tf.compat.v1.summary.histogram('D_em_ccc_valence', self.D_em_ccc_valence)
# self.vgg_loss_summary = tf.summary.scalar('VGG_loss', self.vgg_loss)
#
# for saving the graph and variables
self.saver = tf.compat.v1.train.Saver(max_to_keep=10)
def train(self,
num_epochs=2, # number of epochs
learning_rate=0.0002, # learning rate of optimizer
beta1=0.5, # parameter for Adam optimizer
decay_rate=1.0, # learning rate decay (0, 1], 1 means no decay
use_trained_model=False, # used the saved checkpoint to initialize the model
):
enable_shuffle = True
# set learning rate decay
with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope()):
with tf.device('/device:CPU:0'):
self.EG_global_step = tf.Variable(0, trainable=False, name='global_step')
# -- LOAD FILE NAMES --------------------------------------------------------------
# ---------------------------------------------------------------------------------
# ---- TRAINING DATA
file_names = [data_path + x for x in os.listdir(data_path)]
file_names = self.fill_up_equally(file_names)
size_data = len(file_names)
np.random.shuffle(file_names)
# ---- VALIDATION DATA
self.validation_files = [validation_path + v for v in os.listdir(validation_path)]
# -- OPTIMIZERS -------------------------------------------------------------------
# ---------------------------------------------------------------------------------
with tf.device(device):
# with tf.device('/device:GPU:0'):
EG_learning_rate = tf.compat.v1.train.exponential_decay(
learning_rate=learning_rate,
global_step=self.EG_global_step,
decay_steps=size_data / size_batch * 2,
decay_rate=decay_rate,
staircase=True
)
# optimizer for encoder + generator
self.EG_optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=EG_learning_rate,
beta1=beta1
).minimize(
loss=self.loss_Total,
global_step=self.EG_global_step,
var_list=self.E_variables + self.G_variables
)
# # optimizer for discriminator on z
# self.D_z_optimizer = tf.compat.v1.train.AdamOptimizer(
# learning_rate=EG_learning_rate,
# beta1=beta1
# ).minimize(
# loss=self.loss_Lz,
# var_list=self.D_z_variables
# )
#
# # optimizer for discriminator on image
# self.D_img_optimizer = tf.compat.v1.train.AdamOptimizer(
# learning_rate=EG_learning_rate,
# beta1=beta1
# ).minimize(
# loss=self.loss_Di,
# var_list=self.D_img_variables
# )
# # optimizer for emotion
# self.D_em_optimizer = tf.compat.v1.train.AdamOptimizer(
# learning_rate=EG_learning_rate,
# beta1=beta1
# ).minimize(
# loss=self.D_em_loss,
# var_list=self.D_em_variables
# )
# # -- TENSORBOARD WRITER ----------------------------------------------------------
# # ---------------------------------------------------------------------------------
# self.writer = tf.summary.create_file_writer(save_dir)
# -- TENSORBOARD SUMMARY ----------------------------------------------------------
# ---------------------------------------------------------------------------------
# with tf.device('/device:CPU:0'):
# self.EG_learning_rate_summary = tf.summary.scalar('EG_learning_rate', EG_learning_rate)
# self.summary = tf.compat.v1.summary.merge([
# self.z_summary, self.z_prior_summary,
# self.D_z_loss_z_summary, self.D_z_loss_prior_summary,
# self.D_z_logits_summary, self.D_z_prior_logits_summary,
# self.EG_loss_summary, self.E_z_loss_summary,
# self.D_img_loss_input_summary, self.D_img_loss_G_summary,
# self.G_img_loss_summary, self.EG_learning_rate_summary,
# self.D_G_logits_summary, self.D_input_logits_summary,
# self.vgg_loss_summary, self.D_em_arousal_logits_summary, self.D_em_valence_logits_summary, self.D_em_loss_summary, self.D_em_ccc_arousal_summary, self.D_em_ccc_valence_summary
# ])
# self.writer = tf.summary.FileWriter(os.path.join(save_dir, 'summary'), self.session.graph)
# ************* get some random samples as testing data to visualize the learning process *********************
sample_files = file_names[0:size_batch]
file_names[0:size_batch] = []
sample = [load_image(
image_path=sample_file,
image_size=size_image,
image_value_range=image_value_range,
is_gray=False,
) for sample_file in sample_files]
sample_images = np.array(sample).astype(np.float32)
sample_label_arousal = np.asarray([[float(x.split('__')[2])] for x in sample_files])
sample_label_valence = np.asarray([[float(x.split('__')[3][0:-4])] for x in sample_files])
# ******************************************* training *******************************************************
print('\n\tPreparing for training ...')
# initialize the graph
tf.compat.v1.global_variables_initializer().run()
# load check point
if use_trained_model:
if self.load_checkpoint():
print("\tSUCCESS ^_^")
else:
print("\tFAILED >_<!")
# epoch iteration
num_batches = len(file_names) // size_batch
for epoch in range(num_epochs):
if enable_shuffle:
np.random.shuffle(file_names)
for ind_batch in range(num_batches):
start_time = time.time()
# read batch images and labels
batch_files = file_names[ind_batch*size_batch:(ind_batch+1)*size_batch]
batch = [load_image(
image_path=batch_file,
image_size=size_image,
image_value_range=image_value_range,
is_gray=False,
) for batch_file in batch_files]
batch_images = np.array(batch).astype(np.float32)
batch_label_valence = np.asarray([[float(x.split('__')[2])] for x in batch_files])
batch_label_arousal = np.asarray([[float(x.split('__')[3][0:-4])] for x in batch_files])
# prior distribution on the prior of z
batch_z_prior = np.random.uniform(
image_value_range[0],
image_value_range[-1],
[size_batch, num_z_channels]
).astype(np.float32)
# # update
# _, _, _, EG_err, Ez_err, Dz_err, Dzp_err, Gi_err, DiG_err, Di_err, vgg, em, arousalCCC, valenceCCC = self.session.run(
# fetches = [
# self.EG_optimizer,
# self.D_z_optimizer,
# self.D_img_optimizer,
# self.EG_loss,
# self.E_z_loss,
# self.D_z_loss_z,
# self.D_z_loss_prior,
# self.G_img_loss,
# self.D_img_loss_G,
# self.D_img_loss_input,
# # self.tv_loss,
# self.vgg_loss,
# self.D_em_loss,
# self.D_em_ccc_arousal,
# self.D_em_ccc_valence
# ],
# feed_dict={
# self.input_image: batch_images,
# self.valence: batch_label_valence,
# self.arousal: batch_label_arousal,
# self.z_prior: batch_z_prior
# }
# )
# update
# _, _, _, EG_err, Ez_err, Dz_err, Dzp_err, Gi_err, DiG_err, Di_err, vgg = self.session.run(
# fetches=[
# self.EG_optimizer,
# self.D_z_optimizer,
# self.D_img_optimizer,
# self.loss_rec,
# self.E_z_loss,
# self.D_z_loss_z,
# self.D_z_loss_prior,
# self.G_img_loss,
# self.D_img_loss_G,
# self.D_img_loss_input,
# # self.tv_loss,
# self.loss_Iden
# ],
# feed_dict={
# self.input_image: batch_images,
# self.valence: batch_label_valence,
# self.arousal: batch_label_arousal,
# self.z_prior: batch_z_prior
# }
# )
# print("\nEpoch: [%3d/%3d] Batch: [%3d/%3d]\n\tEG_err=%.4f\tVGG=%.4f" %
# (epoch + 1, num_epochs, ind_batch + 1, num_batches, EG_err, vgg))
# print("\tEz=%.4f\tDz=%.4f\tDzp=%.4f" % (Ez_err, Dz_err, Dzp_err))
# print("\tGi=%.4f\tDi=%.4f\tDiG=%.4f" % (Gi_err, Di_err, DiG_err))
#
#
# update
if self.useEmotion:
_, lossTotal, lossRec, lossIden, lossLz, lossLzPrior, lossLzOriginal, lossDimg, lossDimgInput, lossDimgGenerated, lossDem, lossDemArousal, lossDemValence = self.session.run(
fetches=[
self.EG_optimizer,
self.loss_Total,
self.loss_rec,
self.loss_Iden,
self.loss_Lz,
self.D_z_loss_prior,
self.D_z_loss_z,
self.loss_Di,
self.D_img_loss_input,
self.D_img_loss_G,
self.loss_Dem,
self.D_em_arousal_loss,
self.D_em_valence_loss
],
feed_dict={
self.input_image: batch_images,
self.valence: batch_label_valence,
self.arousal: batch_label_arousal,
self.z_prior: batch_z_prior
}
)
print("\nEpoch: [%3d/%3d] Batch: [%3d/%3d]\n\tLoss_Total=%.4f" %
(epoch + 1, num_epochs, ind_batch + 1, num_batches, lossTotal), file=open(save_dir+"Log.txt", "a"))
print("\tL_rec=%.4f\tL_Iden=%.4f\tL_Z=%.4f\tL_Img=%.4f\tL_em=%.4f" % (lossRec, lossIden, lossLz, lossDimg,lossDem), file=open(save_dir+"Log.txt", "a"))
print("\tL_Z_Prior=%.4f\tL_Z_original=%.4f" % (lossLzPrior, lossLzOriginal), file=open(save_dir+"Log.txt", "a"))
print("\tL_Img_Input=%.4f\tL_Img_Generated=%.4f" % (lossDimgInput, lossDimgGenerated), file=open(save_dir+"Log.txt", "a"))
print("\tL_Dem_Arousal=%.4f\tL_Dem_Valence=%.4f" % (lossDemArousal, lossDemValence), file=open(save_dir+"Log.txt", "a"))
else:
_, lossTotal, lossRec, lossIden, lossLz, lossLzPrior, lossLzOriginal, lossDimg, lossDimgInput, lossDimgGenerated = self.session.run(
fetches=[
self.EG_optimizer,
self.loss_Total,
self.loss_rec,
self.loss_Iden,
self.loss_Lz,
self.D_z_loss_prior,
self.D_z_loss_z,
self.loss_Di,
self.D_img_loss_input,
self.D_img_loss_G,
],
feed_dict={
self.input_image: batch_images,
self.valence: batch_label_valence,
self.arousal: batch_label_arousal,
self.z_prior: batch_z_prior
}
)
print("\nEpoch: [%3d/%3d] Batch: [%3d/%3d]\n\tLoss_Total=%.4f"%
(epoch + 1, num_epochs, ind_batch + 1, num_batches, lossTotal), file=open(save_dir+"Log.txt", "a"))
print("\tL_rec=%.4f\tL_Iden=%.4f\tL_Z=%.4f\tL_Img=%.4f" % (lossRec, lossIden, lossLz,lossDimg), file=open(save_dir+"Log.txt", "a"))
print("\tL_Z_Prior=%.4f\tL_Z_original=%.4f" % (lossLzPrior, lossLzOriginal), file=open(save_dir+"Log.txt", "a"))
print("\tL_Img_Input=%.4f\tL_Img_Generated=%.4f" % (lossDimgInput, lossDimgGenerated), file=open(save_dir+"Log.txt", "a"))
# print("\nEpoch: [%3d/%3d] Batch: [%3d/%3d]\n\tEG_err=%.4f\tVGG=%.4f\tEm=%.4f" %
# (epoch+1, num_epochs, ind_batch+1, num_batches, EG_err, vgg, em))
# print("\tArousalCCC=%.4f\tValenceCCC=%.4f" % (arousalCCC, valenceCCC))
# estimate left run time
elapse = time.time() - start_time
time_left = ((num_epochs - epoch - 1) * num_batches + (num_batches - ind_batch - 1)) * elapse
print("\tTime left: %02d:%02d:%02d" %
(int(time_left / 3600), int(time_left % 3600 / 60), time_left % 60))
# # add to summary
# summary = self.summary.eval(
# feed_dict={
# self.input_image: batch_images,
# self.valence: batch_label_valence,
# self.arousal: batch_label_arousal,
# self.z_prior: batch_z_prior
# }
# )
# self.writer.add_summary(summary, self.EG_global_step.eval())
if ind_batch%500 == 0:
# save sample images for each epoch
name = '{:02d}_{:02d}'.format(epoch+1, ind_batch)
self.sample(sample_images, sample_label_valence, sample_label_arousal, name+'.png')
# TEST
test_dir = os.path.join(save_dir, 'test')
if not os.path.exists(test_dir):
os.makedirs(test_dir)
self.test(sample_images, test_dir, name+'.png')
# save checkpoint for each epoch
# VALIDATE
name = '{:02d}_model'.format(epoch+1)
self.validate(name)
self.save_checkpoint(name=name)
def save_checkpoint(self, name=''):
checkpoint_dir = os.path.join(save_dir, 'checkpoint')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(
sess=self.session,
save_path=os.path.join(checkpoint_dir, name)
)
def load_checkpoint(self):
print("\n\tLoading pre-trained model ...")
checkpoint_dir = os.path.join(save_dir, 'checkpoint')
checkpoints = tf.train.get_checkpoint_state(checkpoint_dir)
if checkpoints and checkpoints.model_checkpoint_path:
checkpoints_name = os.path.basename(checkpoints.model_checkpoint_path)
self.saver.restore(self.session, os.path.join(checkpoint_dir, checkpoints_name))
return True
else:
return False
def sample(self, images, valence, arousal, name):
sample_dir = os.path.join(save_dir, 'samples')
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
z, G = self.session.run(
[self.z, self.G],
feed_dict={
self.input_image: images,
self.valence: valence,
self.arousal: arousal
}
)
size_frame = int(np.sqrt(size_batch))+1
save_batch_images(
batch_images=G,
save_path=os.path.join(sample_dir, name),
image_value_range=image_value_range,
size_frame=[size_frame, size_frame]
)
save_batch_images(
batch_images=images,
save_path=os.path.join(sample_dir, "input.png"),
image_value_range=image_value_range,
size_frame=[size_frame, size_frame]
)
def validate(self, name):
# Create Validation Directory if needed
val_dir = os.path.join(save_dir, 'validation')
if not os.path.exists(val_dir):
os.makedirs(val_dir)
# Create Name Directory if needed
name_dir = os.path.join(val_dir, name)
if not os.path.exists(name_dir):
os.makedirs(name_dir)
# validate
testFile = self.validation_files[0:10]
for image_path in testFile:
n = image_path.split("/")[-1]+".png"
self.test(np.array([load_image(image_path, image_size=96)]), name_dir, n)
def test(self, images, test_dir, name):
images = images[:1, :, :, :]
# valence
if size_batch == 25:
valence = np.arange(0.75, -0.751, -0.375)
valence = np.repeat(valence, 5).reshape((25, 1))
# valence = np.repeat(valence, 7, axis=0)
# arousal
arousal = [np.arange(0.75, -0.751, -0.375)]
arousal = np.repeat(arousal, 5).reshape((25, 1))
arousal = np.asarray([item for sublist in arousal for item in sublist]).reshape((25, 1))
query_images = np.tile(images, (25, 1, 1, 1))
size_frame = (6,7)
elif size_batch == 49:
valence = np.arange(0.75, -0.751, -0.25)
valence = np.repeat(valence, 7).reshape((49, 1))
# valence = np.repeat(valence, 7, axis=0)
# arousal
arousal = [np.arange(0.75, -0.751, -0.25)]
arousal = np.repeat(arousal, 7).reshape((49, 1))
arousal = np.asarray([item for sublist in arousal for item in sublist]).reshape((49, 1))
query_images = np.tile(images, (49, 1, 1, 1))
size_frame = (8, 9)
z, G = self.session.run(
[self.z, self.G],
feed_dict={
self.input_image: query_images,
self.valence: valence,
self.arousal: arousal
}
)
save_output(
input_image=images,
output=G,
path=os.path.join(test_dir, name),
image_value_range = image_value_range, size_frame = size_frame
)
def fill_up_equally(self, X):
# print ("Value:", X[0])
# print ("Value:", X[0].split("s"))
# input("here")
sorted_samples = [[x for x in X if int(x.split('__')[1]) == r] for r in range(8)]
amounts = [len(x) for x in sorted_samples]
differences = [max(amounts) - a for a in amounts]
for i, d in enumerate(differences):
samples = sorted_samples[i]
added = [samples[x] for x in np.random.choice(range(len(samples)), d)]
sorted_samples[i] = sorted_samples[i] + added
sorted_samples_flat = [item for sublist in sorted_samples for item in sublist]
np.random.seed = 1234567
np.random.shuffle(sorted_samples_flat)
return sorted_samples_flat
class Logger(object):
def __init__(self, output_file):
self.terminal = sys.stdout
self.log = open(output_file, "a")
def write(self, message):
self.terminal.write(message)
if not self.log.closed:
self.log.write(message)
def close(self):
self.log.close()
def flush(self):
self.close()
# needed for python 3 compatibility
pass
| 2.34375 | 2 |
common/messages.py | ne0h/battleship- | 0 | 12766591 | <reponame>ne0h/battleship-
CREATE_GAME = 'game_create'
JOIN_GAME = 'game_join'
LEAVE_GAME = 'game_abort'
SET_NICK = 'nickname_set'
INIT_BOARD = 'board_init'
FIRE = 'attack'
NUKE = 'special_attack'
MOVE = 'move'
SURRENDER = 'surrender'
CHAT_SEND = 'chat_send'
| 1.296875 | 1 |
fix_osm.py | MatheusMaciel/wrangle-openstreetmaps-data | 0 | 12766592 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains functions used to fix a osm file.
Attributes:
postcode_re (:re obj:): Regex compiler to USA postal codes.
"""
import xml.etree.cElementTree as ET # Use cElementTree or lxml if too slow
import pprint
import re
#postcode regex https://stackoverflow.com/questions/7425860/regular-expression-get-us-zip-code
postcode_re = re.compile(r'.*(\d{5}(\-\d{4})?)$', re.IGNORECASE)
# Como e WAY so existe 'US' defini que o padrao é US
def fix_country(tag):
fix_dict = {'USA': 'US'}
if tag.get('v') in fix_dict:
tag.attrib['v'] = fix_dict[tag.get('v')]
#O padrao é Florida ou FL
def fix_state(tag):
fix_dict = {'florida': 'Florida', 'F': 'Florida', 'fl': 'Florida', 'Fl': 'Florida', 'FL.': 'Florida'}
if tag.get('v') in fix_dict:
tag.attrib['v'] = fix_dict[tag.get('v')]
# http://mentalfloss.com/article/53384/what%E2%80%99s-deal-those-last-4-digits-zip-codes
def fix_postcode(tag):
postal_code = postcode_re.search(tag.get('v'))
if postal_code:
tag.attrib['v'] = postal_code.groups()[0]
else:
tag.clear()
def fix_node(element):
""" Function that fixes NODE tags.
Args:
element (:obj:): cElementTree object. In this case, a NODE tag.
"""
for tag in element.iter("tag"):
if tag.get('k') == 'addr:country':
fix_country(tag)
elif tag.get('k') == 'addr:state':
fix_state(tag)
if tag.get('k') == 'addr:postcode':
fix_postcode(tag)
def fix_way(element):
""" Function that fixes WAY tags.
Args:
element (:obj:): cElementTree object. In this case, a WAY tag.
"""
for tag in element.iter("tag"):
if tag.get('k') == 'addr:country':
fix_country(tag)
elif tag.get('k') == 'addr:state':
fix_state(tag)
elif tag.get('k') == 'addr:postcode':
fix_postcode(tag)
def fix_data(osm):
"""Main function used to fix the osm data. This function call all other specific audition funcitons and save the results to a file.
Args:
osm (string): Path to the osm file.
"""
NODE_TAG = 'node'
WAY_TAG = 'way'
context = ET.iterparse(osm, events=("start",))
for event, elem in context:
if elem.tag == NODE_TAG:
fix_node(elem)
if elem.tag == WAY_TAG:
fix_way(elem)
ET.ElementTree(context.root).write('miami_florida_v1.osm') | 3.109375 | 3 |
algo/src/stack.py | ssavinash1/Algorithm_stanford | 24 | 12766593 | # -*- coding: utf-8 -*-
class Stack(object):
""" Implements a simple stack data structure.
Attrs:
top: object, a pointer to the top object in the stack.
count: int, a counter of the number of elements in the stack.
"""
def __init__(self):
self.top = None
self.count = 0
def __len__(self):
return self.count
def is_empty(self):
return self.count == 0
def pop(self):
""" Returns the value at the top of the stack. """
if self.top == None:
return None
value = self.top['value']
self.top = self.top['prev']
self.count -= 1
return value
def push(self, value):
""" Adds a new value on top of the stack. """
node = {'value': value, 'prev': None}
if self.top == None:
self.top = node
else:
node['prev'] = self.top
self.top = node
self.count += 1
def peek(self):
""" Returns the value of the top element in the stack without removing
it from the data structure.
"""
if self.top == None:
return None
return self.top['value']
| 4.125 | 4 |
code/21.py | nonword1/LiuBoss.LeetCode | 0 | 12766594 | <reponame>nonword1/LiuBoss.LeetCode
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
if not l1:
return l2
if not l2:
return l1
res = ListNode(-1)
if l1.val<l2.val:
res.val = l1.val
res.next = self.mergeTwoLists(l1.next,l2)
else:
res.val = l2.val
res.next = self.mergeTwoLists(l1,l2.next)
return res | 3.796875 | 4 |
prac/ch1/nand_gate.py | soyeonjangg/deep-learning-from-scratch | 0 | 12766595 | <reponame>soyeonjangg/deep-learning-from-scratch<filename>prac/ch1/nand_gate.py<gh_stars>0
import numpy as np
def NAND(x1, x2):
x = np.array([x1, x2])
w = np.array(
[-0.5, -0.5]
) # the only diff between AND is that weight and bias are different
b = 0.7
tmp = np.sum(x, w) + b
if tmp <= 0:
return 0
else:
return 1
| 3.0625 | 3 |
LeetCode/491 Increasing Subsequences.py | gesuwen/Algorithms | 0 | 12766596 | <gh_stars>0
# Depth-first Search
# Given an integer array, your task is to find all the different possible increasing subsequences of the given array, and the length of an increasing subsequence should be at least 2 .
#
# Example:
# Input: [4, 6, 7, 7]
# Output: [[4, 6], [4, 7], [4, 6, 7], [4, 6, 7, 7], [6, 7], [6, 7, 7], [7,7], [4,7,7]]
# Note:
# The length of the given array will not exceed 15.
# The range of integer in the given array is [-100,100].
# The given array may contain duplicates, and two equal integers should also be considered as a special case of increasing sequence.
class Solution:
def findSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
self.subsets(nums, 0, [], res)
return res
def subsets(self, nums, index, temp, res):
if len(nums) >= index and len(temp) >= 2:
res.append(temp[:])
used = {}
for i in range(index, len(nums)):
if len(temp) > 0 and temp[-1] > nums[i]:
continue
if nums[i] in used:
continue
used[nums[i]] = True
temp.append(nums[i])
self.subsets(nums, i+1, temp, res)
temp.pop()
| 3.8125 | 4 |
examples/plot_sine_wave_2d.py | ktanishqk/py-earth | 360 | 12766597 | """
==================================
Plotting two simple sine functions
==================================
A simple example plotting a fit of two sine functions.
"""
import numpy
import matplotlib.pyplot as plt
from pyearth import Earth
# Create some fake data
numpy.random.seed(2)
m = 10000
n = 10
X = 80 * numpy.random.uniform(size=(m, n)) - 40
y1 = 100 * \
numpy.abs(numpy.sin((X[:, 6]) / 10) - 4.0) + \
10 * numpy.random.normal(size=m)
y2 = 100 * \
numpy.abs(numpy.sin((X[:, 6]) / 2) - 8.0) + \
5 * numpy.random.normal(size=m)
# Fit an Earth model
model = Earth(max_degree=3, minspan_alpha=.5)
y_mix = numpy.concatenate((y1[:, numpy.newaxis], y2[:, numpy.newaxis]), axis=1)
model.fit(X, y_mix)
# Print the model
print(model.trace())
print(model.summary())
# Plot the model
y_hat = model.predict(X)
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
ax.plot(X[:, 6], y_mix[:, 0], 'r.')
ax.plot(X[:, 6], model.predict(X)[:, 0], 'b.')
ax = fig.add_subplot(1, 2, 2)
ax.plot(X[:, 6], y_mix[:, 1], 'r.')
ax.plot(X[:, 6], model.predict(X)[:, 1], 'b.')
plt.show()
| 3.703125 | 4 |
pandera/external_config.py | matthiashuschle/pandera | 1 | 12766598 | <reponame>matthiashuschle/pandera<filename>pandera/external_config.py
"""Configuration for external packages."""
import os
try:
# try importing koalas to see if it exists. This is important because the
# pandera.typing module defines a Series type that inherits from
# pandas.Series, and koalas v1+ injects a __getitem__ method to pandas
# Series and DataFrames to support type hinting:
# https://koalas.readthedocs.io/en/latest/user_guide/typehints.html#type-hinting-with-names
# pylint: disable=unused-import
if os.getenv("SPARK_LOCAL_IP") is None:
os.environ["SPARK_LOCAL_IP"] = "127.0.0.1"
if os.getenv("PYARROW_IGNORE_TIMEZONE") is None:
# This can be overriden by the user
os.environ["PYARROW_IGNORE_TIMEZONE"] = "1"
import databricks.koalas as ks
except ImportError:
os.environ.pop("SPARK_LOCAL_IP")
os.environ.pop("PYARROW_IGNORE_TIMEZONE")
| 2.03125 | 2 |
setup.py | walles/px | 149 | 12766599 | #!/usr/bin/env python
import os
import re
import shutil
import filecmp
import tempfile
import subprocess
from setuptools import setup
VERSIONFILE = "px/version.py"
git_version = (
subprocess.check_output(["git", "describe", "--dirty"]).decode("utf-8").strip()
)
with tempfile.NamedTemporaryFile(suffix=".py", delete=False) as tmp:
tmp.write(b"# NOTE: Auto generated by setup.py, no touchie!\n")
tmp.write(b'VERSION = "%s"\n' % bytearray(git_version, "utf_8"))
# Flushing is required for filecmp.cmp() to work (below)
tmp.flush()
if not os.path.isfile(VERSIONFILE):
# No version file found
shutil.move(tmp.name, VERSIONFILE)
elif not filecmp.cmp(tmp.name, VERSIONFILE):
# Version file needs updating
shutil.move(tmp.name, VERSIONFILE)
else:
# VERSIONFILE was already up to date. If we touch it in this
# case, it will have its file timestamp updated, which will
# force the slow px_integration_test.py tests to get rerun.
#
# Just clean up our tempfile and be merry.
os.remove(tmp.name)
requirements = None
with open("requirements.txt") as reqsfile:
requirements = reqsfile.readlines()
with open(os.path.join(os.path.dirname(__file__), "README.rst")) as fp:
LONG_DESCRIPTION = fp.read()
if not re.match(r"^[0-9]+\.[0-9]+\.[0-9]+$", git_version):
# Setuptools wants nice version numbers
git_version = "0.0.0"
setup(
name="pxpx",
version=git_version,
description="ps and top for Human Beings",
long_description=LONG_DESCRIPTION,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/walles/px",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: System :: Monitoring",
"Topic :: System :: Systems Administration",
"Topic :: Utilities",
],
packages=["px"],
install_requires=requirements,
# See: http://setuptools.readthedocs.io/en/latest/setuptools.html#setting-the-zip-safe-flag
zip_safe=True,
setup_requires=[
"pytest-runner",
],
tests_require=[
"pytest",
],
entry_points={
"console_scripts": ["px = px.px:main", "ptop = px.px:main"],
}
# Note that we're by design *not* installing man pages here.
# Using "data_files=" only puts the man pages in the egg file,
# and installing that egg doesn't put them on the destination
# system.
#
# After trying to figure this out for a bit, my conclusion is
# that "pip install" simply isn't meant for installing any man
# pages.
#
# /<EMAIL> 2018aug27
)
| 1.84375 | 2 |
zproc/context.py | pycampers/zproc | 106 | 12766600 | import atexit
import multiprocessing
import pprint
import signal
import time
from contextlib import suppress
from typing import Callable, Union, Any, List, Mapping, Sequence, Tuple, cast
from . import util
from .consts import DEFAULT_NAMESPACE
from .process import Process
from .server import tools
from .state.state import State
from .task.map_plus import map_plus
from .task.swarm import Swarm
class ProcessList(list):
def __str__(self):
return ProcessList.__qualname__ + ": " + pprint.pformat(list(self))
def __repr__(self):
return "<" + self.__str__() + ">"
@staticmethod
def _wait_or_catch_exc(
process: Process, timeout: Union[int, float] = None
) -> Union[Exception, Any]:
try:
return process.wait(timeout)
except Exception as e:
return e
def wait(
self, timeout: Union[int, float] = None, safe: bool = False
) -> List[Union[Any, Exception]]:
"""
Call :py:meth:`~Process.wait()` on all the Processes in this list.
:param timeout:
Same as :py:meth:`~Process.wait()`.
This parameter controls the timeout for all the Processes combined,
not a single :py:meth:`~Process.wait()` call.
:param safe:
Suppress any errors that occur while waiting for a Process.
The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred.
:return:
A ``list`` containing the values returned by child Processes of this Context.
"""
if safe:
_wait = self._wait_or_catch_exc
else:
_wait = Process.wait
if timeout is None:
return [_wait(process) for process in self]
else:
final = time.time() + timeout
return [_wait(process, final - time.time()) for process in self]
def start(self):
"""
Call :py:meth:`~Process.start()` on all the child processes of this Context
Ignores if a Process is already started, unlike :py:meth:`~Process.start()`,
which throws an ``AssertionError``.
"""
with suppress(AssertionError):
for process in self:
process.start()
def stop(self):
"""
Call :py:meth:`~Process.stop()` on all the Processes in this list.
Retains the same order as ``Context.process_list``.
:return:
A ``list`` containing the exitcodes of the child Processes of this Context.
"""
return [proc.stop() for proc in self]
class Context:
#: The :py:class:`multiprocessing.Process` object for the server.
server_process: multiprocessing.Process
def __init__(
self,
server_address: str = None,
*,
start_server: bool = True,
backend: Callable = multiprocessing.Process,
wait: bool = False,
cleanup: bool = True,
namespace: str = DEFAULT_NAMESPACE,
**process_kwargs
) -> None:
r"""
Provides a high level interface to :py:class:`State` and :py:class:`Process`.
Primarily used to manage and launch processes.
All processes launched using a Context, share the same state.
Don't share a Context object between Processes / Threads.
A Context object is not thread-safe.
:param server_address:
The address of the server.
If this is set to ``None``, a random address will be generated.
:param start_server:
Whether to start the ZProc server.
It is started automatically by default.
If this is set to ``None``, then you must either -
- Start a server using a different Context object.
- Start one manually, using :py:func:`start_server`.
In both cases,
it the user's responsibility to make sure that the ``server_address`` argument
is satisfied.
.. note::
If the server is not started before-hand,
the Context object will block infinitely, waiting for the server to respond.
In case you want to play around,
the :py:func:`ping` function is handy,
since it let's you *detect* the presence of a server at a given address.
:param backend:
.. include:: /api/snippets/backend.rst
:param wait:
Wait for all running process to finish their work before exiting.
Alternative to manually calling :py:meth:`~Context.wait` at exit.
:param cleanup:
Whether to cleanup the process tree before exiting.
Registers a signal handler for ``SIGTERM``, and an ``atexit`` handler.
:param \*\*process_kwargs:
Keyword arguments that :py:class:`~Process` takes,
except ``server_address`` and ``target``.
If provided,
these will be used while creating processes using this Context.
"""
#: A :py:class:`ProcessList` object containing all Processes created under this Context.
self.process_list = ProcessList()
#: Passed on from the constructor. This is read-only.
self.backend = backend
#: Passed on from the constructor. This is read-only.
self.namespace = namespace
#: Passed on from the constructor.
self.process_kwargs = process_kwargs
self.process_kwargs.setdefault("namespace", self.namespace)
self.process_kwargs.setdefault("backend", self.backend)
self.server_address = cast(str, server_address)
"""The server's address.
This holds the address this Context is connected to,
not necessarily the value provided in the constructor.
This is read-only."""
if start_server:
self.start_server()
assert self.server_address is not None, (
"Couldn't determine the server address. "
"Hint: Either provide the `server_address` parameter, "
"or pass `start_server=True`."
)
# register cleanup before wait, so that wait runs before cleanup.
# (order of execution is reversed)
if cleanup:
atexit.register(util.clean_process_tree)
if util.is_main_thread():
signal.signal(signal.SIGTERM, util.clean_process_tree)
if wait:
atexit.register(self.wait)
def __str__(self):
return "%s - server: %r at %#x" % (
self.__class__.__qualname__,
self.server_address,
id(self),
)
def __repr__(self):
return util.enclose_in_brackets(self.__str__())
def create_state(self, value: dict = None, *, namespace: str = None):
"""
Creates a new :py:class:`State` object, sharing the same zproc server as this Context.
:param value:
If provided, call ``state.update(value)``.
:param namespace:
Use this as the namespace for the :py:class:`State` object,
instead of this :py:class:`Context`\ 's namespace.
:return:
A :py:class:`State` object.
"""
if namespace is None:
namespace = self.namespace
state = State(self.server_address, namespace=namespace)
if value is not None:
state.update(value)
return state
def create_swarm(self, count: int = None):
swarm = Swarm(self.server_address, namespace=self.namespace)
swarm.start(count)
return swarm
def start_server(self) -> Tuple[multiprocessing.Process, str]:
out = tools.start_server(self.server_address, backend=self.backend)
self.server_process, self.server_address = out
return out
def _process(
self, target: Callable = None, **process_kwargs
) -> Union[Process, Callable]:
r"""
Produce a child process bound to this context.
Can be used both as a function and decorator:
.. code-block:: python
:caption: Usage
@zproc.process(pass_context=True) # you may pass some arguments here
def p1(ctx):
print('hello', ctx)
@zproc.process # or not...
def p2(state):
print('hello', state)
def p3(state):
print('hello', state)
zproc.process(p3) # or just use as a good ol' function
:param target:
Passed on to the :py:class:`Process` constructor.
*Must be omitted when using this as a decorator.*
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return: The :py:class:`Process` instance produced.
"""
process = Process(
self.server_address, target, **{**self.process_kwargs, **process_kwargs}
)
self.process_list.append(process)
return process
def spawn(self, *targets: Callable, count: int = 1, **process_kwargs):
r"""
Produce one or many child process(s) bound to this context.
:param \*targets:
Passed on to the :py:class:`Process` constructor, one at a time.
:param count:
The number of processes to spawn for each item in ``targets``.
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return:
A ``ProcessList`` of the :py:class:`Process` instance(s) produced.
"""
if not targets:
def wrapper(target: Callable):
return self.spawn(target, count=count, **process_kwargs)
return wrapper
if len(targets) * count == 1:
return self._process(targets[0], **process_kwargs)
return ProcessList(
self._process(target, **process_kwargs)
for _ in range(count)
for target in targets
)
def spawn_map(
self,
target: Callable,
map_iter: Sequence[Any] = None,
*,
map_args: Sequence[Sequence[Any]] = None,
args: Sequence = None,
map_kwargs: Sequence[Mapping[str, Any]] = None,
kwargs: Mapping = None,
**process_kwargs
):
return ProcessList(
map_plus(
lambda *args, **kwargs: self._process(
target, args=args, kwargs=kwargs, **process_kwargs
),
map_iter,
map_args,
args,
map_kwargs,
kwargs,
)
)
def wait(
self, timeout: Union[int, float] = None, safe: bool = False
) -> List[Union[Any, Exception]]:
"""
alias for :py:meth:`ProcessList.wait()`
"""
return self.process_list.wait(timeout, safe)
def start_all(self):
"""
alias for :py:meth:`ProcessList.start_all()`
"""
return self.process_list.start()
def stop_all(self):
"""
alias for :py:meth:`ProcessList.stop_all()`
"""
return self.process_list.stop()
def ping(self, **kwargs):
r"""
Ping the zproc server.
:param \*\*kwargs: Keyword arguments that :py:func:`ping` takes, except ``server_address``.
:return: Same as :py:func:`ping`
"""
return tools.ping(self.server_address, **kwargs)
| 2.53125 | 3 |
HeadFirstPython/code/chapter3/setTest.py | BlossomRain/Read | 0 | 12766601 | <reponame>BlossomRain/Read<filename>HeadFirstPython/code/chapter3/setTest.py
odds = {1, 3, 5, 7}
evens = set([0, 2, 4, 6, 8])
print(odds.union(evens))
odds.intersection()
odds.difference() | 3.34375 | 3 |
mindsdb/interfaces/storage/db.py | yarenty/mindsdb | 0 | 12766602 | <filename>mindsdb/interfaces/storage/db.py
import os
import json
import datetime
import numpy as np
from sqlalchemy import create_engine, orm, types, UniqueConstraint
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime, Boolean, Index
from sqlalchemy.sql.expression import null
from sqlalchemy.sql.schema import ForeignKey
if os.environ['MINDSDB_DB_CON'].startswith('sqlite:'):
engine = create_engine(os.environ['MINDSDB_DB_CON'], echo=False)
else:
engine = create_engine(os.environ['MINDSDB_DB_CON'], convert_unicode=True, pool_size=30, max_overflow=200, echo=False)
Base = declarative_base()
session = scoped_session(sessionmaker(bind=engine, autoflush=True))
Base.query = session.query_property()
# Source: https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
class Array(types.TypeDecorator):
''' Float Type that replaces commas with dots on input '''
impl = types.String
def process_bind_param(self, value, dialect): # insert
if isinstance(value, str):
return value
elif value is None:
return value
else:
return ',|,|,'.join(value)
def process_result_value(self, value, dialect): # select
return value.split(',|,|,') if value is not None else None
class Json(types.TypeDecorator):
''' Float Type that replaces commas with dots on input '''
impl = types.String
def process_bind_param(self, value, dialect): # insert
return json.dumps(value, cls=NumpyEncoder) if value is not None else None
def process_result_value(self, value, dialect): # select
return json.loads(value) if value is not None else None
class Semaphor(Base):
__tablename__ = 'semaphor'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
entity_type = Column('entity_type', String)
entity_id = Column('entity_id', Integer)
action = Column(String)
company_id = Column(Integer)
uniq_const = UniqueConstraint('entity_type', 'entity_id')
class Datasource(Base):
__tablename__ = 'datasource'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
name = Column(String)
data = Column(String) # Including, e.g. the query used to create it and even the connection info when there's no integration associated with it -- A JSON
creation_info = Column(String)
analysis = Column(String) # A JSON
company_id = Column(Integer)
mindsdb_version = Column(String)
datasources_version = Column(String)
integration_id = Column(Integer)
class Predictor(Base):
__tablename__ = 'predictor'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
name = Column(String)
data = Column(Json) # A JSON -- should be everything returned by `get_model_data`, I think
to_predict = Column(Array)
company_id = Column(Integer)
mindsdb_version = Column(String)
native_version = Column(String)
datasource_id = Column(Integer)
is_custom = Column(Boolean) # to del
learn_args = Column(Json)
update_status = Column(String, default='up_to_date')
json_ai = Column(Json, nullable=True)
code = Column(String, nullable=True)
lightwood_version = Column(String, nullable=True)
dtype_dict = Column(Json, nullable=True)
class AITable(Base):
__tablename__ = 'ai_table'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
name = Column(String)
integration_name = Column(String)
integration_query = Column(String)
query_fields = Column(Json)
predictor_name = Column(String)
predictor_columns = Column(Json)
company_id = Column(Integer)
class Log(Base):
__tablename__ = 'log'
id = Column(Integer, primary_key=True)
created_at = Column(DateTime, default=datetime.datetime.now)
log_type = Column(String) # log, info, warning, traceback etc
source = Column(String) # file + line
company_id = Column(Integer)
payload = Column(String)
created_at_index = Index("some_index", "created_at_index")
class Integration(Base):
__tablename__ = 'integration'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
name = Column(String, nullable=False)
data = Column(Json)
company_id = Column(Integer)
class Stream(Base):
__tablename__ = 'stream'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
stream_in = Column(String, nullable=False)
stream_out = Column(String, nullable=False)
anomaly_stream = Column(String)
integration = Column(String)
predictor = Column(String, nullable=False)
company_id = Column(Integer)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
type = Column(String, default='unknown')
connection_info = Column(Json, default={})
learning_params = Column(Json, default={})
learning_threshold = Column(Integer, default=0)
# DDL is changing through migrations
# Base.metadata.create_all(engine)
# orm.configure_mappers()
| 2.46875 | 2 |
chart_studio/presentation_objs/presentation_objs.py | piyush1301/plotly.py | 6 | 12766603 | <filename>chart_studio/presentation_objs/presentation_objs.py
"""
dashboard_objs
==========
A module for creating and manipulating spectacle-presentation dashboards.
"""
import copy
import random
import re
import string
import warnings
import _plotly_utils.exceptions
from chart_studio import exceptions
from chart_studio.config import get_config
HEIGHT = 700.0
WIDTH = 1000.0
CODEPANE_THEMES = ['tomorrow', 'tomorrowNight']
VALID_LANGUAGES = ['cpp', 'cs', 'css', 'fsharp', 'go', 'haskell', 'java',
'javascript', 'jsx', 'julia', 'xml', 'matlab', 'php',
'python', 'r', 'ruby', 'scala', 'sql', 'yaml']
VALID_TRANSITIONS = ['slide', 'zoom', 'fade', 'spin']
PRES_THEMES = ['moods', 'martik']
VALID_GROUPTYPES = [
'leftgroup_v', 'rightgroup_v', 'middle', 'checkerboard_topleft',
'checkerboard_topright'
]
fontWeight_dict = {
'Thin': {'fontWeight': 100},
'Thin Italic': {'fontWeight': 100, 'fontStyle': 'italic'},
'Light': {'fontWeight': 300},
'Light Italic': {'fontWeight': 300, 'fontStyle': 'italic'},
'Regular': {'fontWeight': 400},
'Regular Italic': {'fontWeight': 400, 'fontStyle': 'italic'},
'Medium': {'fontWeight': 500},
'Medium Italic': {'fontWeight': 500, 'fontStyle': 'italic'},
'Bold': {'fontWeight': 700},
'Bold Italic': {'fontWeight': 700, 'fontStyle': 'italic'},
'Black': {'fontWeight': 900},
'Black Italic': {'fontWeight': 900, 'fontStyle': 'italic'},
}
def list_of_options(iterable, conj='and', period=True):
"""
Returns an English listing of objects seperated by commas ','
For example, ['foo', 'bar', 'baz'] becomes 'foo, bar and baz'
if the conjunction 'and' is selected.
"""
if len(iterable) < 2:
raise _plotly_utils.exceptions.PlotlyError(
'Your list or tuple must contain at least 2 items.'
)
template = (len(iterable) - 2)*'{}, ' + '{} ' + conj + ' {}' + period*'.'
return template.format(*iterable)
# Error Messages
STYLE_ERROR = "Your presentation style must be {}".format(
list_of_options(PRES_THEMES, conj='or', period=True)
)
CODE_ENV_ERROR = (
"If you are putting a block of code into your markdown "
"presentation, make sure your denote the start and end "
"of the code environment with the '```' characters. For "
"example, your markdown string would include something "
"like:\n\n```python\nx = 2\ny = 1\nprint x\n```\n\n"
"Notice how the language that you want the code to be "
"displayed in is immediately to the right of first "
"entering '```', i.e. '```python'."
)
LANG_ERROR = (
"The language of your code block should be "
"clearly indicated after the first ``` that "
"begins the code block. The valid languages to "
"choose from are" + list_of_options(
VALID_LANGUAGES
)
)
def _generate_id(size):
letters_and_numbers = string.ascii_letters
for num in range(10):
letters_and_numbers += str(num)
letters_and_numbers += str(num)
id_str = ''
for _ in range(size):
id_str += random.choice(list(letters_and_numbers))
return id_str
paragraph_styles = {
'Body': {
'color': '#3d3d3d',
'fontFamily': 'Open Sans',
'fontSize': 11,
'fontStyle': 'normal',
'fontWeight': 400,
'lineHeight': 'normal',
'minWidth': 20,
'opacity': 1,
'textAlign': 'center',
'textDecoration': 'none',
'wordBreak': 'break-word'
},
'Body Small': {
'color': '#3d3d3d',
'fontFamily': 'Open Sans',
'fontSize': 10,
'fontStyle': 'normal',
'fontWeight': 400,
'lineHeight': 'normal',
'minWidth': 20,
'opacity': 1,
'textAlign': 'center',
'textDecoration': 'none'
},
'Caption': {
'color': '#3d3d3d',
'fontFamily': 'Open Sans',
'fontSize': 11,
'fontStyle': 'italic',
'fontWeight': 400,
'lineHeight': 'normal',
'minWidth': 20,
'opacity': 1,
'textAlign': 'center',
'textDecoration': 'none'
},
'Heading 1': {
'color': '#3d3d3d',
'fontFamily': 'Open Sans',
'fontSize': 26,
'fontStyle': 'normal',
'fontWeight': 400,
'lineHeight': 'normal',
'minWidth': 20,
'opacity': 1,
'textAlign': 'center',
'textDecoration': 'none',
},
'Heading 2': {
'color': '#3d3d3d',
'fontFamily': 'Open Sans',
'fontSize': 20,
'fontStyle': 'normal',
'fontWeight': 400,
'lineHeight': 'normal',
'minWidth': 20,
'opacity': 1,
'textAlign': 'center',
'textDecoration': 'none'
},
'Heading 3': {
'color': '#3d3d3d',
'fontFamily': 'Open Sans',
'fontSize': 11,
'fontStyle': 'normal',
'fontWeight': 700,
'lineHeight': 'normal',
'minWidth': 20,
'opacity': 1,
'textAlign': 'center',
'textDecoration': 'none'
}
}
def _empty_slide(transition, id):
empty_slide = {'children': [],
'id': id,
'props': {'style': {}, 'transition': transition}}
return empty_slide
def _box(boxtype, text_or_url, left, top, height, width, id, props_attr,
style_attr, paragraphStyle):
children_list = []
fontFamily = "Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace"
if boxtype == 'Text':
children_list = text_or_url.split('\n')
props = {
'isQuote': False,
'listType': None,
'paragraphStyle': paragraphStyle,
'size': 4,
'style': copy.deepcopy(paragraph_styles[paragraphStyle])
}
props['style'].update(
{'height': height,
'left': left,
'top': top,
'width': width,
'position': 'absolute'}
)
elif boxtype == 'Image':
# height, width are set to default 512
# as set by the Presentation Editor
props = {
'height': 512,
'imageName': None,
'src': text_or_url,
'style': {'height': height,
'left': left,
'opacity': 1,
'position': 'absolute',
'top': top,
'width': width},
'width': 512
}
elif boxtype == 'Plotly':
if '?share_key' in text_or_url:
src = text_or_url
else:
src = text_or_url + '.embed?link=false'
props = {
'frameBorder': 0,
'scrolling': 'no',
'src': src,
'style': {'height': height,
'left': left,
'position': 'absolute',
'top': top,
'width': width}
}
elif boxtype == 'CodePane':
props = {
'language': 'python',
'source': text_or_url,
'style': {'fontFamily': fontFamily,
'fontSize': 13,
'height': height,
'left': left,
'margin': 0,
'position': 'absolute',
'textAlign': 'left',
'top': top,
'width': width},
'theme': 'tomorrowNight'
}
# update props and style attributes
for item in props_attr.items():
props[item[0]] = item[1]
for item in style_attr.items():
props['style'][item[0]] = item[1]
child = {
'children': children_list,
'id': id,
'props': props,
'type': boxtype
}
if boxtype == 'Text':
child['defaultHeight'] = 36
child['defaultWidth'] = 52
child['resizeVertical'] = False
if boxtype == 'CodePane':
child['defaultText'] = 'Code'
return child
def _percentage_to_pixel(value, side):
if side == 'left':
return WIDTH * (0.01 * value)
elif side == 'top':
return HEIGHT * (0.01 * value)
elif side == 'height':
return HEIGHT * (0.01 * value)
elif side == 'width':
return WIDTH * (0.01 * value)
def _return_box_position(left, top, height, width):
values_dict = {
'left': left,
'top': top,
'height': height,
'width': width,
}
for key in iter(values_dict):
if isinstance(values_dict[key], str):
var = float(values_dict[key][: -2])
else:
var = _percentage_to_pixel(values_dict[key], key)
values_dict[key] = var
return (values_dict['left'], values_dict['top'],
values_dict['height'], values_dict['width'])
def _remove_extra_whitespace_from_line(line):
line = line.lstrip()
line = line.rstrip()
return line
def _list_of_slides(markdown_string):
if not markdown_string.endswith('\n---\n'):
markdown_string += '\n---\n'
text_blocks = re.split('\n-{2,}\n', markdown_string)
list_of_slides = []
for text in text_blocks:
if not all(char in ['\n', '-', ' '] for char in text):
list_of_slides.append(text)
if '\n-\n' in markdown_string:
msg = ("You have at least one '-' by itself on its own line in your "
"markdown string. If you are trying to denote a new slide, "
"make sure that the line has 3 '-'s like this: \n\n---\n\n"
"A new slide will NOT be created here.")
warnings.warn(msg)
return list_of_slides
def _top_spec_for_text_at_bottom(text_block, width_per, per_from_bottom=0,
min_top=30):
# This function ensures that if there is a large block of
# text in your slide it will not overflow off the bottom
# of the slide.
# The input for this function are a block of text and the
# params that define where it will be placed in the slide.
# The function makes some calculations and will output a
# 'top' value (i.e. the left, top, height, width css params)
# so that the text block will come down to some specified
# distance from the bottom of the page.
# TODO: customize this function for different fonts/sizes
max_lines = 37
one_char_percent_width = 0.764
chars_in_full_line = width_per / one_char_percent_width
num_of_lines = 0
char_group = 0
for char in text_block:
if char == '\n':
num_of_lines += 1
char_group = 0
else:
if char_group >= chars_in_full_line:
char_group = 0
num_of_lines += 1
else:
char_group += 1
num_of_lines += 1
top_frac = (max_lines - num_of_lines) / float(max_lines)
top = top_frac * 100 - per_from_bottom
# to be safe
return max(top, min_top)
def _box_specs_gen(num_of_boxes, grouptype='leftgroup_v', width_range=50,
height_range=50, margin=2, betw_boxes=4, middle_center=50):
# the (left, top, width, height) specs
# are added to specs_for_boxes
specs_for_boxes = []
if num_of_boxes == 1 and grouptype in ['leftgroup_v', 'rightgroup_v']:
if grouptype == 'rightgroup_v':
left_shift = (100 - width_range)
else:
left_shift = 0
box_spec = (
left_shift + (margin / WIDTH) * 100,
(margin / HEIGHT) * 100,
100 - (2 * margin / HEIGHT * 100),
width_range - (2 * margin / WIDTH) * 100
)
specs_for_boxes.append(box_spec)
elif num_of_boxes > 1 and grouptype in ['leftgroup_v', 'rightgroup_v']:
if grouptype == 'rightgroup_v':
left_shift = (100 - width_range)
else:
left_shift = 0
if num_of_boxes % 2 == 0:
box_width_px = 0.5 * (
(float(width_range)/100) * WIDTH - 2 * margin - betw_boxes
)
box_width = (box_width_px / WIDTH) * 100
height = (200.0 / (num_of_boxes * HEIGHT)) * (
HEIGHT - (num_of_boxes / 2 - 1) * betw_boxes - 2 * margin
)
left1 = left_shift + (margin / WIDTH) * 100
left2 = left_shift + (
((margin + betw_boxes) / WIDTH) * 100 + box_width
)
for left in [left1, left2]:
for j in range(int(num_of_boxes / 2)):
top = (margin * 100 / HEIGHT) + j * (
height + (betw_boxes * 100 / HEIGHT)
)
specs = (
left,
top,
height,
box_width
)
specs_for_boxes.append(specs)
if num_of_boxes % 2 == 1:
width = width_range - (200 * margin) / WIDTH
height = (100.0 / (num_of_boxes * HEIGHT)) * (
HEIGHT - (num_of_boxes - 1) * betw_boxes - 2 * margin
)
left = left_shift + (margin / WIDTH) * 100
for j in range(num_of_boxes):
top = (margin / HEIGHT) * 100 + j * (
height + (betw_boxes / HEIGHT) * 100
)
specs = (
left,
top,
height,
width
)
specs_for_boxes.append(specs)
elif grouptype == 'middle':
top = float(middle_center - (height_range / 2))
height = height_range
width = (1 / float(num_of_boxes)) * (
width_range - (num_of_boxes - 1) * (100*betw_boxes/WIDTH)
)
for j in range(num_of_boxes):
left = ((100 - float(width_range)) / 2) + j * (
width + (betw_boxes / WIDTH) * 100
)
specs = (left, top, height, width)
specs_for_boxes.append(specs)
elif 'checkerboard' in grouptype and num_of_boxes == 2:
if grouptype == 'checkerboard_topleft':
for j in range(2):
left = j * 50
top = j * 50
height = 50
width = 50
specs = (
left,
top,
height,
width
)
specs_for_boxes.append(specs)
else:
for j in range(2):
left = 50 * (1 - j)
top = j * 50
height = 50
width = 50
specs = (
left,
top,
height,
width
)
specs_for_boxes.append(specs)
return specs_for_boxes
def _return_layout_specs(num_of_boxes, url_lines, title_lines, text_block,
code_blocks, slide_num, style):
# returns specs of the form (left, top, height, width)
code_theme = 'tomorrowNight'
if style == 'martik':
specs_for_boxes = []
margin = 18 # in pxs
# set Headings styles
paragraph_styles['Heading 1'].update(
{'color': '#0D0A1E',
'fontFamily': 'Raleway',
'fontSize': 55,
'fontWeight': fontWeight_dict['Bold']['fontWeight']}
)
paragraph_styles['Heading 2'] = copy.deepcopy(
paragraph_styles['Heading 1']
)
paragraph_styles['Heading 2'].update({'fontSize': 36})
paragraph_styles['Heading 3'] = copy.deepcopy(
paragraph_styles['Heading 1']
)
paragraph_styles['Heading 3'].update({'fontSize': 30})
# set Body style
paragraph_styles['Body'].update(
{'color': '#96969C',
'fontFamily': 'Roboto',
'fontSize': 16,
'fontWeight': fontWeight_dict['Regular']['fontWeight']}
)
bkgd_color = '#F4FAFB'
title_font_color = '#0D0A1E'
text_font_color = '#96969C'
if num_of_boxes == 0 and slide_num == 0:
text_textAlign = 'center'
else:
text_textAlign = 'left'
if num_of_boxes == 0:
specs_for_title = (0, 50, 20, 100)
specs_for_text = (15, 60, 50, 70)
bkgd_color = '#0D0A1E'
title_font_color = '#F4FAFB'
text_font_color = '#F4FAFB'
elif num_of_boxes == 1:
if code_blocks != [] or (url_lines != [] and
get_config()['plotly_domain'] in
url_lines[0]):
if code_blocks != []:
w_range = 40
else:
w_range = 60
text_top = _top_spec_for_text_at_bottom(
text_block, 80,
per_from_bottom=(margin / HEIGHT) * 100
)
specs_for_title = (0, 3, 20, 100)
specs_for_text = (10, text_top, 30, 80)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='middle', width_range=w_range,
height_range=60, margin=margin, betw_boxes=4
)
bkgd_color = '#0D0A1E'
title_font_color = '#F4FAFB'
text_font_color = '#F4FAFB'
code_theme = 'tomorrow'
elif title_lines == [] and text_block == '':
specs_for_title = (0, 50, 20, 100)
specs_for_text = (15, 60, 50, 70)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='middle', width_range=50,
height_range=80, margin=0, betw_boxes=0
)
else:
title_text_width = 40 - (margin / WIDTH) * 100
text_top = _top_spec_for_text_at_bottom(
text_block, title_text_width,
per_from_bottom=(margin / HEIGHT) * 100
)
specs_for_title = (60, 3, 20, 40)
specs_for_text = (60, text_top, 1, title_text_width)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='leftgroup_v', width_range=60,
margin=margin, betw_boxes=4
)
bkgd_color = '#0D0A1E'
title_font_color = '#F4FAFB'
text_font_color = '#F4FAFB'
elif num_of_boxes == 2 and url_lines != []:
text_top = _top_spec_for_text_at_bottom(
text_block, 46, per_from_bottom=(margin / HEIGHT) * 100,
min_top=50
)
specs_for_title = (0, 3, 20, 50)
specs_for_text = (52, text_top, 40, 46)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='checkerboard_topright'
)
elif num_of_boxes >= 2 and url_lines == []:
text_top = _top_spec_for_text_at_bottom(
text_block, 92, per_from_bottom=(margin / HEIGHT) * 100,
min_top=15
)
if num_of_boxes == 2:
betw_boxes = 90
else:
betw_boxes = 10
specs_for_title = (0, 3, 20, 100)
specs_for_text = (4, text_top, 1, 92)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='middle', width_range=92,
height_range=60, margin=margin, betw_boxes=betw_boxes
)
code_theme = 'tomorrow'
else:
text_top = _top_spec_for_text_at_bottom(
text_block, 40 - (margin / WIDTH) * 100,
per_from_bottom=(margin / HEIGHT) * 100
)
specs_for_title = (0, 3, 20, 40 - (margin / WIDTH) * 100)
specs_for_text = (
(margin / WIDTH) * 100, text_top, 50,
40 - (margin / WIDTH) * 100
)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='rightgroup_v', width_range=60,
margin=margin, betw_boxes=4
)
elif style == 'moods':
specs_for_boxes = []
margin = 18
code_theme = 'tomorrowNight'
# set Headings styles
paragraph_styles['Heading 1'].update(
{'color': '#000016',
'fontFamily': 'Roboto',
'fontSize': 55,
'fontWeight': fontWeight_dict['Black']['fontWeight']}
)
paragraph_styles['Heading 2'] = copy.deepcopy(
paragraph_styles['Heading 1']
)
paragraph_styles['Heading 2'].update({'fontSize': 36})
paragraph_styles['Heading 3'] = copy.deepcopy(
paragraph_styles['Heading 1']
)
paragraph_styles['Heading 3'].update({'fontSize': 30})
# set Body style
paragraph_styles['Body'].update(
{'color': '#000016',
'fontFamily': 'Roboto',
'fontSize': 16,
'fontWeight': fontWeight_dict['Thin']['fontWeight']}
)
bkgd_color = '#FFFFFF'
title_font_color = None
text_font_color = None
if num_of_boxes == 0 and slide_num == 0:
text_textAlign = 'center'
else:
text_textAlign = 'left'
if num_of_boxes == 0:
if slide_num == 0 or text_block == '':
bkgd_color = '#F7F7F7'
specs_for_title = (0, 50, 20, 100)
specs_for_text = (15, 60, 50, 70)
else:
bkgd_color = '#F7F7F7'
text_top = _top_spec_for_text_at_bottom(
text_block, width_per=90,
per_from_bottom=(margin / HEIGHT) * 100,
min_top=20
)
specs_for_title = (0, 2, 20, 100)
specs_for_text = (5, text_top, 50, 90)
elif num_of_boxes == 1:
if code_blocks != []:
# code
if text_block == '':
margin = 5
specs_for_title = (0, 3, 20, 100)
specs_for_text = (0, 0, 0, 0)
top = 12
specs_for_boxes = [
(margin, top, 100 - top - margin, 100 - 2 * margin)
]
elif slide_num % 2 == 0:
# middle center
width_per = 90
height_range = 60
text_top = _top_spec_for_text_at_bottom(
text_block, width_per=width_per,
per_from_bottom=(margin / HEIGHT) * 100,
min_top=100 - height_range / 2.
)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='middle',
width_range=50, height_range=60, margin=margin,
)
specs_for_title = (0, 3, 20, 100)
specs_for_text = (
5, text_top, 2, width_per
)
else:
# right
width_per = 50
text_top = _top_spec_for_text_at_bottom(
text_block, width_per=width_per,
per_from_bottom=(margin / HEIGHT) * 100,
min_top=30
)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='rightgroup_v',
width_range=50, margin=40,
)
specs_for_title = (0, 3, 20, 50)
specs_for_text = (
2, text_top, 2, width_per - 2
)
elif (url_lines != [] and
get_config()['plotly_domain'] in url_lines[0]):
# url
if slide_num % 2 == 0:
# top half
width_per = 95
text_top = _top_spec_for_text_at_bottom(
text_block, width_per=width_per,
per_from_bottom=(margin / HEIGHT) * 100,
min_top=60
)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='middle',
width_range=100, height_range=60,
middle_center=30
)
specs_for_title = (0, 60, 20, 100)
specs_for_text = (
2.5, text_top, 2, width_per
)
else:
# middle across
width_per = 95
text_top = _top_spec_for_text_at_bottom(
text_block, width_per=width_per,
per_from_bottom=(margin / HEIGHT) * 100,
min_top=60
)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='middle',
width_range=100, height_range=60
)
specs_for_title = (0, 3, 20, 100)
specs_for_text = (
2.5, text_top, 2, width_per
)
else:
# image
if slide_num % 2 == 0:
# right
width_per = 50
text_top = _top_spec_for_text_at_bottom(
text_block, width_per=width_per,
per_from_bottom=(margin / HEIGHT) * 100,
min_top=30
)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='rightgroup_v',
width_range=50, margin=0,
)
specs_for_title = (0, 3, 20, 50)
specs_for_text = (
2, text_top, 2, width_per - 2
)
else:
# left
width_per = 50
text_top = _top_spec_for_text_at_bottom(
text_block, width_per=width_per,
per_from_bottom=(margin / HEIGHT) * 100,
min_top=30
)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='leftgroup_v',
width_range=50, margin=0,
)
specs_for_title = (50, 3, 20, 50)
specs_for_text = (
52, text_top, 2, width_per - 2
)
elif num_of_boxes == 2:
# right stack
width_per = 50
text_top = _top_spec_for_text_at_bottom(
text_block, width_per=width_per,
per_from_bottom=(margin / HEIGHT) * 100,
min_top=30
)
specs_for_boxes = [(50, 0, 50, 50), (50, 50, 50, 50)]
specs_for_title = (0, 3, 20, 50)
specs_for_text = (
2, text_top, 2, width_per - 2
)
elif num_of_boxes == 3:
# middle top
width_per = 95
text_top = _top_spec_for_text_at_bottom(
text_block, width_per=width_per,
per_from_bottom=(margin / HEIGHT) * 100,
min_top=40
)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='middle',
width_range=100, height_range=40, middle_center=30
)
specs_for_title = (0, 0, 20, 100)
specs_for_text = (
2.5, text_top, 2, width_per
)
else:
# right stack
width_per = 40
text_top = _top_spec_for_text_at_bottom(
text_block, width_per=width_per,
per_from_bottom=(margin / HEIGHT) * 100,
min_top=30
)
specs_for_boxes = _box_specs_gen(
num_of_boxes, grouptype='rightgroup_v',
width_range=60, margin=0,
)
specs_for_title = (0, 3, 20, 40)
specs_for_text = (
2, text_top, 2, width_per - 2
)
# set text style attributes
title_style_attr = {}
text_style_attr = {'textAlign': text_textAlign}
if text_font_color:
text_style_attr['color'] = text_font_color
if title_font_color:
title_style_attr['color'] = title_font_color
return (specs_for_boxes, specs_for_title, specs_for_text, bkgd_color,
title_style_attr, text_style_attr, code_theme)
def _url_parens_contained(url_name, line):
return line.startswith(url_name + '(') and line.endswith(')')
class Presentation(dict):
"""
The Presentation class for creating spectacle-presentations.
The Presentations API is a means for creating JSON blobs which are then
converted Spectacle Presentations. To use the API you only need to define
a block string and define your slides using markdown. Then you can upload
your presentation to the Plotly Server.
Rules for your presentation string:
- use '---' to denote a slide break.
- headers work as per usual, where if '#' is used before a line of text
then it is interpretted as a header. Only the first header in a slide is
displayed on the slide. There are only 3 heading sizes: #, ## and ###.
4 or more hashes will be interpretted as ###.
- you can set the type of slide transition you want by writing a line that
starts with 'transition: ' before your first header line in the slide,
and write the types of transition you want after. Your transition to
choose from are 'slide', 'zoom', 'fade' and 'spin'.
- to insert a Plotly chart into your slide, write a line that has the form
Plotly(url) with your url pointing to your chart. Note that it is
STRONGLY advised that your chart has fig['layout']['autosize'] = True.
- to insert an image from the web, write a line with the form Image(url)
- to insert a block of text, begin with a line that denotes the code
envoronment '```lang' where lang is a valid programming language. To find
the valid languages run:\n
'plotly.presentation_objs.presentation_objs.VALID_LANGUAGES'\n
To end the code block environment,
write a single '```' line. All Plotly(url) and Image(url) lines will NOT
be interpretted as a Plotly or Image url if they are in the code block.
:param (str) markdown_string: the block string that denotes the slides,
slide properties, and images to be placed in the presentation. If
'markdown_string' is set to 'None', the JSON for a presentation with
one empty slide will be created.
:param (str) style: the theme that the presentation will take on. The
themes that are available now are 'martik' and 'moods'.
Default = 'moods'.
:param (bool) imgStretch: if set to False, all images in the presentation
will not have heights and widths that will not exceed the parent
container they belong to. In other words, images will keep their
original aspect ratios.
Default = True.
For examples see the documentation:\n
https://plot.ly/python/presentations-api/
"""
def __init__(self, markdown_string=None, style='moods', imgStretch=True):
self['presentation'] = {
'slides': [],
'slidePreviews': [None for _ in range(496)],
'version': '0.1.3',
'paragraphStyles': paragraph_styles
}
if markdown_string:
if style not in PRES_THEMES:
raise _plotly_utils.exceptions.PlotlyError(
"Your presentation style must be {}".format(
list_of_options(PRES_THEMES, conj='or', period=True)
)
)
self._markdown_to_presentation(markdown_string, style, imgStretch)
else:
self._add_empty_slide()
def _markdown_to_presentation(self, markdown_string, style, imgStretch):
list_of_slides = _list_of_slides(markdown_string)
for slide_num, slide in enumerate(list_of_slides):
lines_in_slide = slide.split('\n')
title_lines = []
# validate blocks of code
if slide.count('```') % 2 != 0:
raise _plotly_utils.exceptions.PlotlyError(CODE_ENV_ERROR)
# find code blocks
code_indices = []
code_blocks = []
wdw_size = len('```')
for j in range(len(slide)):
if slide[j:j+wdw_size] == '```':
code_indices.append(j)
for k in range(int(len(code_indices) / 2)):
code_blocks.append(
slide[code_indices[2 * k]:code_indices[(2 * k) + 1]]
)
lang_and_code_tuples = []
for code_block in code_blocks:
# validate code blocks
code_by_lines = code_block.split('\n')
language = _remove_extra_whitespace_from_line(
code_by_lines[0][3:]
).lower()
if language == '' or language not in VALID_LANGUAGES:
raise _plotly_utils.exceptions.PlotlyError(
"The language of your code block should be "
"clearly indicated after the first ``` that "
"begins the code block. The valid languages to "
"choose from are" + list_of_options(
VALID_LANGUAGES
)
)
lang_and_code_tuples.append(
(language, '\n'.join(code_by_lines[1:]))
)
# collect text, code and urls
title_lines = []
url_lines = []
text_lines = []
inCode = False
for line in lines_in_slide:
# inCode handling
if line[:3] == '```' and len(line) > 3:
inCode = True
if line == '```':
inCode = False
if not inCode and line != '```':
if len(line) > 0 and line[0] == '#':
title_lines.append(line)
elif (_url_parens_contained('Plotly', line) or
_url_parens_contained('Image', line)):
if (line.startswith('Plotly(') and
get_config()['plotly_domain'] not in line):
raise _plotly_utils.exceptions.PlotlyError(
"You are attempting to insert a Plotly Chart "
"in your slide but your url does not have "
"your plotly domain '{}' in it.".format(
get_config()['plotly_domain']
)
)
url_lines.append(line)
else:
# find and set transition properties
trans = 'transition:'
if line.startswith(trans) and title_lines == []:
slide_trans = line[len(trans):]
slide_trans = _remove_extra_whitespace_from_line(
slide_trans
)
slide_transition_list = []
for key in VALID_TRANSITIONS:
if key in slide_trans:
slide_transition_list.append(key)
if slide_transition_list == []:
slide_transition_list.append('slide')
self._set_transition(
slide_transition_list, slide_num
)
else:
text_lines.append(line)
# make text block
for i in range(2):
try:
while text_lines[-i] == '':
text_lines.pop(-i)
except IndexError:
pass
text_block = '\n'.join(text_lines)
num_of_boxes = len(url_lines) + len(lang_and_code_tuples)
(specs_for_boxes, specs_for_title, specs_for_text, bkgd_color,
title_style_attr, text_style_attr,
code_theme) = _return_layout_specs(
num_of_boxes, url_lines, title_lines, text_block, code_blocks,
slide_num, style
)
# background color
self._color_background(bkgd_color, slide_num)
# insert title, text, code, and images
if len(title_lines) > 0:
# clean titles
title = title_lines[0]
num_hashes = 0
while title[0] == '#':
title = title[1:]
num_hashes += 1
title = _remove_extra_whitespace_from_line(title)
self._insert(
box='Text', text_or_url=title,
left=specs_for_title[0], top=specs_for_title[1],
height=specs_for_title[2], width=specs_for_title[3],
slide=slide_num, style_attr=title_style_attr,
paragraphStyle='Heading 1'.format(
min(num_hashes, 3)
)
)
# text
if len(text_lines) > 0:
self._insert(
box='Text', text_or_url=text_block,
left=specs_for_text[0], top=specs_for_text[1],
height=specs_for_text[2], width=specs_for_text[3],
slide=slide_num, style_attr=text_style_attr,
paragraphStyle='Body'
)
url_and_code_blocks = list(url_lines + lang_and_code_tuples)
for k, specs in enumerate(specs_for_boxes):
url_or_code = url_and_code_blocks[k]
if isinstance(url_or_code, tuple):
# code
language = url_or_code[0]
code = url_or_code[1]
box_name = 'CodePane'
# code style
props_attr = {}
props_attr['language'] = language
props_attr['theme'] = code_theme
self._insert(box=box_name, text_or_url=code,
left=specs[0], top=specs[1],
height=specs[2], width=specs[3],
slide=slide_num, props_attr=props_attr)
else:
# url
if get_config()['plotly_domain'] in url_or_code:
box_name = 'Plotly'
else:
box_name = 'Image'
url = url_or_code[len(box_name) + 1: -1]
self._insert(box=box_name, text_or_url=url,
left=specs[0], top=specs[1],
height=specs[2], width=specs[3],
slide=slide_num)
if not imgStretch:
for s, slide in enumerate(self['presentation']['slides']):
for c, child in enumerate(slide['children']):
if child['type'] in ['Image', 'Plotly']:
deep_child = child['props']['style']
width = deep_child['width']
height = deep_child['height']
if width >= height:
deep_child['max-width'] = deep_child.pop('width')
else:
deep_child['max-height'] = deep_child.pop('height')
def _add_empty_slide(self):
self['presentation']['slides'].append(
_empty_slide(['slide'], _generate_id(9))
)
def _add_missing_slides(self, slide):
# add slides if desired slide number isn't in the presentation
try:
self['presentation']['slides'][slide]['children']
except IndexError:
num_of_slides = len(self['presentation']['slides'])
for _ in range(slide - num_of_slides + 1):
self._add_empty_slide()
def _insert(self, box, text_or_url, left, top, height, width, slide=0,
props_attr={}, style_attr={}, paragraphStyle=None):
self._add_missing_slides(slide)
left, top, height, width = _return_box_position(left, top, height,
width)
new_id = _generate_id(9)
child = _box(box, text_or_url, left, top, height, width, new_id,
props_attr, style_attr, paragraphStyle)
self['presentation']['slides'][slide]['children'].append(child)
def _color_background(self, color, slide):
self._add_missing_slides(slide)
loc = self['presentation']['slides'][slide]
loc['props']['style']['backgroundColor'] = color
def _background_image(self, url, slide, bkrd_image_dict):
self._add_missing_slides(slide)
loc = self['presentation']['slides'][slide]['props']
# default settings
size = 'stretch'
repeat = 'no-repeat'
if 'background-size:' in bkrd_image_dict:
size = bkrd_image_dict['background-size:']
if 'background-repeat:' in bkrd_image_dict:
repeat = bkrd_image_dict['background-repeat:']
if size == 'stretch':
backgroundSize = '100% 100%'
elif size == 'original':
backgroundSize = 'auto'
elif size == 'contain':
backgroundSize = 'contain'
elif size == 'cover':
backgroundSize = 'cover'
style = {
'backgroundImage': 'url({})'.format(url),
'backgroundPosition': 'center center',
'backgroundRepeat': repeat,
'backgroundSize': backgroundSize
}
for item in style.items():
loc['style'].setdefault(item[0], item[1])
loc['backgroundImageSrc'] = url
loc['backgroundImageName'] = None
def _set_transition(self, transition, slide):
self._add_missing_slides(slide)
loc = self['presentation']['slides'][slide]['props']
loc['transition'] = transition
| 2.078125 | 2 |
AUTO-PLANNING/AutoTemplateTuning/projects/sequencing prediction/DataProcess.py | fishdda/RL-Application-in-TPS | 5 | 12766604 | <reponame>fishdda/RL-Application-in-TPS
class DATAPROCESS:
'''
Currently, this class is only specific to VMAT plans
'''
def __init__(self,RAW_DATA_PATH,NEW_DATA_PATH):
self.RAW_DATA_PATH = RAW_DATA_PATH
self.NEW_DATA_PATH = NEW_DATA_PATH
self.PLAN = {}
def TRANSFER_RAW_DATA(self):
import numpy as np
import os
import pydicom
# deal with CT image data, dose data and MLC & MU intensity
file_names = os.listdir(self.RAW_DATA_PATH)
for j,file_nam in enumerate(file_names):
CT_ = []
print('This is {}th patient'.format(j))
temp_files = os.listdir(os.path.join(self.RAW_DATA_PATH,file_nam)) # all CT names
for dcm_nam in temp_files:
#
dcm = pydicom.read_file(os.path.join(self.RAW_DATA_PATH,file_nam,dcm_nam),force=True)
if dcm.Modality == 'CT':
print(dcm.Modality)
dcm.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian
img = dcm.pixel_array
CT_.append(img)
intercept = dcm.RescaleIntercept
slope = dcm.RescaleSlope
# The intercept is usually -1024, so air is approximately 0
elif dcm.Modality == 'RTDOSE' and dcm.DoseSummationType == 'PLAN':
dcm.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian
DOSE = np.array(dcm.pixel_array * dcm.DoseGridScaling)
print('The DOSE array shape:{}'.format(DOSE.shape))
np.save(self.NEW_DATA_PATH + file_nam + '_Dose.npy',DOSE)
elif dcm.Modality == 'RTPLAN':
Total_MU = float(dcm.FractionGroupSequence[0].ReferencedBeamSequence[0].BeamMeterset)
MLC = np.zeros([160,len(dcm.BeamSequence[0].ControlPointSequence)])
JAW = np.zeros([2,len(dcm.BeamSequence[0].ControlPointSequence)])
MU_,Gantry = [],[]
for j in range(len(dcm.BeamSequence[0].ControlPointSequence)):
# extract JAW and MLC
JAW[:,j] = np.array(dcm.BeamSequence[0].ControlPointSequence[j].BeamLimitingDevicePositionSequence[0].LeafJawPositions).T
MLC[:,j] = dcm.BeamSequence[0].ControlPointSequence[j].BeamLimitingDevicePositionSequence[1].LeafJawPositions
Gantry.append(dcm.BeamSequence[0].ControlPointSequence[j].GantryAngle)
MU_.append(float(dcm.BeamSequence[0].ControlPointSequence[j].CumulativeMetersetWeight))
MU = []
for k,ktem in enumerate(MU):
if k > 1:
MU.append((MU_[k]-MU_[k-1])*Total_MU)
else:
MU.append(ktem)
self.PLAN['MU'] = MU
self.PLAN['Gantry'] = Gantry
self.PLAN['JAW'] = JAW
self.PLAN['MLC'] = MLC
np.save(self.NEW_DATA_PATH + file_nam + '_Plan.npy',PLAN)
CT = np.array(CT_)
CT[CT == -2000] = 0
if slope != 1:
CT = slope * CT.astype(np.float64)
CT = img.astype(np.int16)
CT1 = CT + np.int16(intercept)
print('The CT array shape: {}'.format(CT1.shape))
np.save(self.NEW_DATA_PATH + file_nam + '_CT.npy',CT1)
def Fluence_Map(self):
'''
Merge all the segment in one fluence map
'''
| 2.328125 | 2 |
demos/graphql/graph/chat/db_utils.py | hzlmn/aiohttp-demos | 649 | 12766605 | <gh_stars>100-1000
from aiopg.sa import SAConnection as SAConn
from aiopg.sa.result import RowProxy
from graph.types import RowsProxy
from graph.constants import OBJECT_NOT_FOUND_ERROR
from graph.chat.tables import (
rooms,
messages,
)
__all__ = [
'select_rooms',
'select_messages_by_room_id',
'select_room',
'create_message',
'delete_message',
]
# selects
async def select_rooms(conn: SAConn) -> RowsProxy:
cursor = await conn.execute(
rooms.select().order_by(rooms.c.id)
)
return await cursor.fetchall()
async def select_room(conn: SAConn, id: int) -> RowProxy:
cursor = await conn.execute(
rooms.select().where(rooms.c.id == id)
)
item = await cursor.fetchone()
assert item, OBJECT_NOT_FOUND_ERROR
return item
async def select_messages_by_room_id(conn: SAConn, room_id: int) -> RowsProxy:
query = messages\
.select()\
.where(messages.c.room_id == room_id)\
.order_by(messages.c.id)
cursor = await conn.execute(query)
return await cursor.fetchall()
# create
async def create_message(
conn: SAConn,
room_id: int,
owner_id: int,
body: str,
) -> RowProxy:
query = messages\
.insert()\
.values(body=body, owner_id=owner_id, room_id=room_id)\
.returning(messages.c.id, messages.c.owner_id)
res = await conn.execute(query)
return await res.fetchone()
# delete
async def delete_message(conn: SAConn, id: int) -> None:
await conn.execute(
messages.delete().where(messages.c.id == id)
)
| 2.359375 | 2 |
ggcg/gen/__init__.py | FreNeS1/ggcg | 1 | 12766606 | <filename>ggcg/gen/__init__.py
"""Generator package. Contains the logic to simplify, modify and regenerate new computational graphs based on existing
ones."""
| 1.257813 | 1 |
profiles/__init__.py | nivbend/memoir | 0 | 12766607 | from __future__ import unicode_literals
default_app_config = 'profiles.apps.ProfilesConfig'
| 1.164063 | 1 |
src/design.py | pbishnupuri/cse518-final-project | 0 | 12766608 | <filename>src/design.py<gh_stars>0
# Will probably split into multiple files and then have app.py put it all together
# QUESTION - Can we use ANNs??? Surely we should be able to right?
##### Graph
# Characteristics of the graph
# 1. Will be a complete graph
# 2. Nodes represent a person's political stance
# 3. Edges represent a news source
# 4. You can't go through the same edge twice
# Characteristics of edge:
# 1. Political stance [-1, 1] - use can use media bias chart [-1, 1]
# 2. History of influence on persons? - like how has this media channel historically made left/right people feel
# (i.e CNN makes extreme right people go more right)
# Characteristics of the algorithm - two options
#######################################################
## Option 1 - Brute Force
# 1. Go through the graph, keeping a history of each movement
# 2. Loop through the history, find the one where the person's political stance is closest to 0
# Pro - Will find minimum
# Cons - Not scalable, takes up a lot of space, no way it's realistic for user to read all of those articles
## Option 2 - Greedy Appraoch
# 1. Start at a node, take the edge that gets your political stance closest to 0
# 2. Repeat until out of nodes to take
# Pros - Faster, more scable?, lowers political polarization, more realistic (since we can show person one article at a time)
# Cons - Doesn't mind minumum
## Option 3 - ANN?
# 1. Not really versed in ML, but my professor said it was a possibility. I'm not sure how we would do this.
##### Person Object
## Characteristics each person will start with:
# 1. Initial political stance [-1, 1]
# 2. Adversity to news (i.e. showing liberal news will make them more conservative and vise versa) [-1, 1]? -- probably an equation
# 3. Influence by news
## Characteristics of node (updated person, similar to initial person):
# 1. Updated political stance [-1, 1]
# 2. Updated adversity to news (i.e. showing liberal news will make them more conservative and vise versa) [-1, 1] -- probably an equation
# 3. Updated influence by news
##### Project Design for Milestone 3
# Do both!
# 1. We can start with a small number of news channels, say 5-10
# 2. Create the data
# 3. Create 2 model persons (keep it easy) -
# they will be relatively moderate, so they are susceptible to moderately leaning news (easy to mimic)
# 4. Run person using both brute force and greedy
# 5. Report data | 3.328125 | 3 |
hotsos/plugin_extensions/storage/bcache_summary.py | KellenRenshaw/hotsos | 6 | 12766609 | import re
from hotsos.core.host_helpers import CLIHelper
from hotsos.core.plugins.storage.bcache import BcacheChecksBase
class BcacheSummary(BcacheChecksBase):
def __summary_cachesets(self):
csets = self.get_sysfs_cachesets()
if csets:
return csets
def __summary_devices(self):
devs = {}
for dev_type in ['bcache', 'nvme']:
for line in CLIHelper().ls_lanR_sys_block():
expr = r".+[0-9:]+\s+({}[0-9a-z]+)\s+.+".format(dev_type)
ret = re.compile(expr).match(line)
if ret:
if dev_type not in devs:
devs[dev_type] = {}
devname = ret[1]
devs[dev_type][devname] = {}
for line in CLIHelper().udevadm_info_dev(device=devname):
expr = r'.+\s+disk/by-dname/(.+)'
ret = re.compile(expr).match(line)
if ret:
devs[dev_type][devname]['dname'] = ret[1]
elif 'dname' not in devs[dev_type][devname]:
devs[dev_type][devname]['dname'] = \
'<notfound>'
if devs:
return devs
| 2.046875 | 2 |
crawler/qq/qq/utils.py | markhuyong/galaxy | 0 | 12766610 | # -*- coding: utf-8 -*-
import random
import urllib
from scrapy.http.headers import Headers
from crawler.misc.spider import CommonSpider
from crawler.misc import agents
class BaseHelper(object):
PROFILE_URL = "https://mobile.qzone.qq.com/profile?hostuin=USER_QQ_NUMBER"
SHUOSHU_URL = "https://mobile.qzone.qq.com/list?g_tk=GTK&format=json&list_type=shuoshuo&action=0&res_uin=USER_QQ_NUMBER&count=PAGECOUNT"
CODE_URL = "https://graph.qq.com/oauth2.0/token?grant_type=authorization_code&client_id=101347930&client_secret=68270da4c08fddb26486283c1fab1b0a&code=CODE&redirect_uri=http%3a%2f%2f29060abb.nat123.net%2fPBMSWEBOOK%2fqqlogin&state=203"
OPENID_URL = "https://graph.qq.com/oauth2.0/me?access_token=ACCESS_TOKEN"
ALBUMLIST_URL = "https://graph.qq.com/photo/list_album?access_token=ACCESS_TOKEN&oauth_consumer_key=101347930&openid=OPENID&format=json"
NICKNAME_URL = "https://graph.qq.com/user/get_user_info?access_token=ACCESS_TOKEN&oauth_consumer_key=101347930&openid=OPENID"
POTOLIST_URL = "https://graph.qq.com/photo/list_photo?access_token=ACCESS_TOKEN&oauth_consumer_key=101347930&openid=OPENID&format=json&albumid=ALBUMID"
ALBUM_URL = "https://mobile.qzone.qq.com/list?g_tk=GTK&format=json&list_type=album&action=0&res_uin=USER_QQ_NUMBER"
PHOTO_URL = "http://h5.qzone.qq.com/webapp/json/mqzone_photo/getPhotoList2?g_tk=GTK&uin=USER_QQ_NUMBER&albumid=ALBUMID&ps=PS&pn=PN"
PAGE_COUNT = '40'
@classmethod
def get_headers(cls):
return Headers({
# 'User-Agent': self._get_user_agent(),
# 'Content-Type': 'application/json',
# "Connection": "keep-alive",
'Accept': 'application/json',
# 'Host': cls.BASE_URL,
})
@classmethod
def get_profile_url(cls, uid):
return cls.PROFILE_URL.replace("USER_QQ_NUMBER", uid)
@classmethod
def get_shuoshuo_url(cls, uid, last_attach=None):
url = cls.SHUOSHU_URL.replace("USER_QQ_NUMBER", uid) \
.replace("PAGECOUNT", cls.PAGE_COUNT)
return url if last_attach is None \
else url + "&res_attach=" + cls._quote_url(last_attach)
def get_code_url(self, uid):
return self.SHUOSHU_URL.replace("USER_QQ_NUMBER", uid)
def get_openid_url(self, uid):
return self.SHUOSHU_URL.replace("USER_QQ_NUMBER", uid)
def get_album_list_url(self, uid):
return self.SHUOSHU_URL.replace("USER_QQ_NUMBER", uid)
def get_photo_list_url(self, uid):
return self.SHUOSHU_URL.replace("USER_QQ_NUMBER", uid)
@classmethod
def get_album_url(cls, uid, last_attach=None):
url = cls.ALBUM_URL.replace("USER_QQ_NUMBER", uid)
return url if last_attach is None \
else url + "&res_attach=" + cls._quote_url(last_attach)
@classmethod
def get_photo_url(cls, uid, album_id, ps, pn, last_attach=None):
url = cls.PHOTO_URL.replace("USER_QQ_NUMBER", uid) \
.replace("ALBUMID", album_id) \
.replace("PS", ps) \
.replace("PN", pn)
return url if last_attach is None \
else url + "&res_attach=" + cls._quote_url(last_attach)
@staticmethod
def get_cookie_key_prefix(spider):
sep = "_"
assert spider.name.index(sep) > 0
return "{}:Cookies".format(spider.name.split(sep)[0])
@staticmethod
def _quote_url(url):
return urllib.quote(unicode(str(url), "UTF-8"))
| 2.1875 | 2 |
order/admin.py | YatharthVats/Dishes-API | 0 | 12766611 | from django.contrib import admin
from .models import Dish
# Register your models here.
admin.site.register(Dish) | 1.328125 | 1 |
discordware/_vendors/__init__.py | znqi/discordware | 0 | 12766612 | import hype
__all__ = ["hype"]
| 1.054688 | 1 |
data/generate-agrawal-3.py | ingako/lifelong-ml | 2 | 12766613 | <gh_stars>1-10
#!/usr/bin/env python3
import sys
path = r'../'
if path not in sys.path:
sys.path.append(path)
from src.stream_generator import RecurrentDriftStream
max_samples = 400001
generator = 'agrawal'
for seed in range(0, 10):
stream = RecurrentDriftStream(generator=generator,
has_noise=False,
random_state=seed)
stream.prepare_for_use()
print(stream.get_data_info())
output_filename = f'agrawal-3/agrawal-3-{seed}.csv'
with open(output_filename, 'w') as out:
for _ in range(max_samples):
X, y = stream.next_sample()
out.write(','.join(str(v) for v in X[0]))
out.write(f',{y[0]}')
out.write('\n')
| 2.5 | 2 |
{{cookiecutter.out_dir}}/src/{{cookiecutter.django_project_name}}/settings/test.py | SebCorbin/cookiecutter-django | 1 | 12766614 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import tempfile
from .base import * # noqa
for a in INSTALLED_APPS:
if 'rest_framework' in a:
REST_FRAMEWORK['TEST_REQUEST_DEFAULT_FORMAT'] = 'json'
SECRET_KEY = 'spam-spam-spam-spam'
MEDIA_ROOT = tempfile.mkdtemp()
FILE_UPLOAD_TEMP_DIR = tempfile.mkdtemp()
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Boost perf a little
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# Force every loggers to use null handler only. Note that using 'root'
# logger is not enough if children don't propage.
for logger in six.itervalues(LOGGING['loggers']): # noqa
logger['handlers'] = ['console']
locs_, globs_, env = post_process_settings(locals())
globals().update(globs_)
try:
from .local import * # noqa
except ImportError:
pass
| 1.742188 | 2 |
networks.py | Blupblupblup/Deep-MSVDD-PyTorch | 0 | 12766615 | <gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
architectures from:
- https://github.com/lukasruff/Deep-SAD-PyTorch/blob/master/src/networks/mnist_LeNet.py
- https://github.com/lukasruff/Deep-SAD-PyTorch/blob/master/src/networks/fmnist_LeNet.py
- https://github.com/lukasruff/Deep-SAD-PyTorch/blob/master/src/networks/cifar10_LeNet.py
one should note that F.leaky_relu() uses leakiness alpha = 0.01 and not 0.1 as indicated in the paper http://proceedings.mlr.press/v80/ruff18a/ruff18a.pdf
"""
#############
### MNIST ###
#############
class MNIST_LeNet(nn.Module):
def __init__(self, rep_dim=32):
super().__init__()
self.rep_dim = rep_dim
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(1, 8, 5, bias=False, padding=2)
self.bn1 = nn.BatchNorm2d(8, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(8, 4, 5, bias=False, padding=2)
self.bn2 = nn.BatchNorm2d(4, eps=1e-04, affine=False)
self.fc1 = nn.Linear(4 * 7 * 7, self.rep_dim, bias=False)
def forward(self, x):
x = x.view(-1, 1, 28, 28)
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2(x)))
x = x.view(int(x.size(0)), -1)
x = self.fc1(x)
return x
class MNIST_LeNet_Decoder(nn.Module):
def __init__(self, rep_dim=32):
super().__init__()
self.rep_dim = rep_dim
# Decoder network
self.deconv1 = nn.ConvTranspose2d(2, 4, 5, bias=False, padding=2)
self.bn3 = nn.BatchNorm2d(4, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(4, 8, 5, bias=False, padding=3)
self.bn4 = nn.BatchNorm2d(8, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(8, 1, 5, bias=False, padding=2)
def forward(self, x):
x = x.view(int(x.size(0)), int(self.rep_dim / 16), 4, 4)
x = F.interpolate(F.leaky_relu(x), scale_factor=2)
x = self.deconv1(x)
x = F.interpolate(F.leaky_relu(self.bn3(x)), scale_factor=2)
x = self.deconv2(x)
x = F.interpolate(F.leaky_relu(self.bn4(x)), scale_factor=2)
x = self.deconv3(x)
x = torch.sigmoid(x)
return x.squeeze()
class MNIST_LeNet_Autoencoder(nn.Module):
def __init__(self, rep_dim=32):
super().__init__()
self.rep_dim = rep_dim
self.encoder = MNIST_LeNet(rep_dim=rep_dim)
self.decoder = MNIST_LeNet_Decoder(rep_dim=rep_dim)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
####################
### FashionMNIST ###
####################
class FashionMNIST_LeNet(nn.Module):
def __init__(self, rep_dim=64):
super().__init__()
self.rep_dim = rep_dim
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(1, 16, 5, bias=False, padding=2)
self.bn2d1 = nn.BatchNorm2d(16, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(16, 32, 5, bias=False, padding=2)
self.bn2d2 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.fc1 = nn.Linear(32 * 7 * 7, 128, bias=False)
self.bn1d1 = nn.BatchNorm1d(128, eps=1e-04, affine=False)
self.fc2 = nn.Linear(128, self.rep_dim, bias=False)
def forward(self, x):
x = x.view(-1, 1, 28, 28)
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2d2(x)))
x = x.view(int(x.size(0)), -1)
x = F.leaky_relu(self.bn1d1(self.fc1(x)))
x = self.fc2(x)
return x
class FashionMNIST_LeNet_Decoder(nn.Module):
def __init__(self, rep_dim=64):
super().__init__()
self.rep_dim = rep_dim
self.fc3 = nn.Linear(self.rep_dim, 128, bias=False)
self.bn1d2 = nn.BatchNorm1d(128, eps=1e-04, affine=False)
self.deconv1 = nn.ConvTranspose2d(8, 32, 5, bias=False, padding=2)
self.bn2d3 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(32, 16, 5, bias=False, padding=3)
self.bn2d4 = nn.BatchNorm2d(16, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(16, 1, 5, bias=False, padding=2)
def forward(self, x):
x = self.bn1d2(self.fc3(x))
x = x.view(int(x.size(0)), int(128 / 16), 4, 4)
x = F.interpolate(F.leaky_relu(x), scale_factor=2)
x = self.deconv1(x)
x = F.interpolate(F.leaky_relu(self.bn2d3(x)), scale_factor=2)
x = self.deconv2(x)
x = F.interpolate(F.leaky_relu(self.bn2d4(x)), scale_factor=2)
x = self.deconv3(x)
x = torch.sigmoid(x)
return x.squeeze()
class FashionMNIST_LeNet_Autoencoder(nn.Module):
def __init__(self, rep_dim=64):
super().__init__()
self.rep_dim = rep_dim
self.encoder = FashionMNIST_LeNet(rep_dim=rep_dim)
self.decoder = FashionMNIST_LeNet_Decoder(rep_dim=rep_dim)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
###############
### CIFAR10 ###
###############
class CIFAR10_LeNet(nn.Module):
def __init__(self, rep_dim=128):
super().__init__()
self.rep_dim = rep_dim
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False)
def forward(self, x):
# x = x.view(-1, 3, 32, 32)
x = torch.transpose(x,1,3)
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn2d3(x)))
x = x.view(int(x.size(0)), -1)
x = self.fc1(x)
return x
class CIFAR10_LeNet_Decoder(nn.Module):
def __init__(self, rep_dim=128):
super().__init__()
self.rep_dim = rep_dim
self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)), 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv4.weight, gain=nn.init.calculate_gain('leaky_relu'))
def forward(self, x):
x = x.view(int(x.size(0)), int(self.rep_dim / (4 * 4)), 4, 4)
x = F.leaky_relu(x)
x = self.deconv1(x)
x = F.interpolate(F.leaky_relu(self.bn2d4(x)), scale_factor=2)
x = self.deconv2(x)
x = F.interpolate(F.leaky_relu(self.bn2d5(x)), scale_factor=2)
x = self.deconv3(x)
x = F.interpolate(F.leaky_relu(self.bn2d6(x)), scale_factor=2)
x = self.deconv4(x)
x = torch.sigmoid(x)
return torch.transpose(x,1,3)
class CIFAR10_LeNet_Autoencoder(nn.Module):
def __init__(self, rep_dim=128):
super().__init__()
self.rep_dim = rep_dim
self.encoder = CIFAR10_LeNet(rep_dim=rep_dim)
self.decoder = CIFAR10_LeNet_Decoder(rep_dim=rep_dim)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x | 2.890625 | 3 |
mocasin/mapper/test/test_gd.py | tud-ccc/mocasin | 1 | 12766616 | <reponame>tud-ccc/mocasin
# Copyright (C) 2020 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Authors: <NAME>, <NAME>
from mocasin.mapper.test.mock_cache import MockMappingCache
from mocasin.mapper.gradient_descent import GradientDescentMapper
import pytest
import numpy as np
from itertools import product
@pytest.fixture
def evaluation_function_gradient():
return lambda m: [
np.sin(1 - 2 * m[1]) * np.sin(m[0] - m[1]),
1 / 2 * (3 * np.cos(1 + m[0] - 3 * m[1]) + np.cos(1 - m[0] - m[1])),
]
@pytest.fixture
def mapper(
graph,
platform,
trace,
representation_pbc,
simres_evaluation_function,
mocker,
):
m = GradientDescentMapper(
graph,
platform,
trace,
representation_pbc,
100,
2,
42,
False,
False,
10,
False,
True,
4,
)
m.simulation_manager = MockMappingCache(simres_evaluation_function, mocker)
return m
def test_gd(mapper, evaluation_function):
result_mapper = mapper.generate_mapping()
results = [
(evaluation_function([x, y]), x, y)
for x, y in product(range(7), range(7))
]
expected = set([(x, y) for (_, x, y) in sorted(results)[:3]])
# result is top 3 best
assert tuple(result_mapper.to_list()) in expected
def test_gradient(mapper, evaluation_function, evaluation_function_gradient):
mapper.dim = 2
good = 0
bad = 0
for (x, y) in product(range(1, 6), range(1, 6)):
# > max(evaluation_function)
mapper.best_exec_time = 3
mapper.best_mapping = np.zeros(mapper.dim)
actual_grad = np.array(evaluation_function_gradient([x, y]))
calculated_grad = mapper.calculate_gradient(
[x, y], evaluation_function([x, y])
)
if not np.allclose(actual_grad, np.zeros(mapper.dim)):
actual_grad_normed = actual_grad * 1 / np.linalg.norm(actual_grad)
else:
actual_grad_normed = actual_grad
if not np.allclose(calculated_grad, np.zeros(mapper.dim)):
calculated_grad_normed = (
calculated_grad * 1 / np.linalg.norm(calculated_grad)
)
else:
calculated_grad_normed = calculated_grad
if np.allclose(actual_grad_normed, calculated_grad_normed, atol=0.4):
good += 1
else:
bad += 1
assert good > bad
| 2.0625 | 2 |
algs/TRKNN/test.py | mbs8/ENaN | 1 | 12766617 | <filename>algs/TRKNN/test.py<gh_stars>1-10
import trknn
from sklearn import datasets
dataset = datasets.load_iris()
# dataset = datasets.load_breast_cancer()
# dataset = datasets.load_boston()
# dataset = datasets.load_diabetes()
# dataset = datasets.load_digits()
n = len(dataset.data)
subset = trknn.run(dataset.data, dataset.target, 1.2)
print(n)
print(len(subset[0])) | 2.484375 | 2 |
p1_basic/day32_35thread/day35/09_基于yield实现协程.py | dong-pro/fullStackPython | 1 | 12766618 | def f1():
print(11)
yield
print(22)
yield
print(33)
def f2():
print(55)
yield
print(66)
yield
print(77)
v1 = f1()
v2 = f2()
next(v1) # v1.send(None)
next(v2) # v1.send(None)
next(v1) # v1.send(None)
next(v2) # v1.send(None)
next(v1) # v1.send(None)
next(v2) # v1.send(None)
| 2.8125 | 3 |
deploy/boto/lib/con.py | mmihira/rocket_wasm | 1 | 12766619 | <reponame>mmihira/rocket_wasm
import boto
import os
import json
import ConfigParser
import time
from boto.ec2.connection import EC2Connection
from boto.ec2.regioninfo import *
class Con:
def __init__(self):
self.proj_prefix = os.environ['PROJECT_PREFIX']
f = open(os.path.expanduser(os.environ["{}_AWS_KEY_FILE".format(self.proj_prefix)]), 'r')
credentials = json.loads(f.read())
f.close()
region = RegionInfo(name="ap-southeast-2", endpoint="ec2.ap-southeast-2.amazonaws.com")
self.conn = boto.connect_ec2(
aws_access_key_id=credentials["access_key"],
aws_secret_access_key=credentials["secret_key"],
is_secure=True,
region=region)
| 2.453125 | 2 |
tareas/1/MontielJavier-RodriguezCarolina/profe_alumnos.py | ArchieMedes/sistop-2021-1 | 2 | 12766620 | #!/usr/bin/python3
'''
############################################################################
->Autores:
-<NAME>
-<NAME>
->Fecha de creación: 01/11/2020
->Descripción: Análisis y resolución del problema 'Los alumnos y el asesor'
############################################################################
'''
import threading
import random
import time
def alumnos(id):
#Indica el número de preguntas que hará el alumno, puede ser de 1 a 10 preguntas
num_preguntas = random.randint(1, 10)
while num_preguntas > 0:
#El alumno intenta conseguir una silla
sillas.acquire()
print('---->El alumno %d consiguió silla' %id)
#El arreglo se usa para saber que alumnos ocupan sillas
alumnos_en_silla.append(id)
mutex_primer_alumno.acquire()
#En caso de ser el primer alumno en conseguir silla, despierta al profe
if len(alumnos_en_silla) == 1:
profe_dormido.release()
mutex_primer_alumno.release()
#Reduce el número de preguntas restantes del alumno
num_preguntas= num_preguntas - 1
print('-------->El alumno %d ya NO tiene dudas, ya se va' %id)
def profe():
while True:
#En caso de que haya alumnos ocupando sillas, resuelve duda
if len(alumnos_en_silla) > 0:
print('------>Resolviendo duda...')
time.sleep(random.random())
alumno_id = alumnos_en_silla.pop()
print('->Duda resuelta del alumno %d' %alumno_id)
sillas.release()
print('El alumno %d dejo la silla' %alumno_id)
#En caso contrario, se va a dormir
else:
print('->Profesor descansando')
profe_dormido.acquire()
print('->Profesor despierto')
#Máximo de alumnos
num_alumno = 10
#Máximo de sillas en el cubículo
num_sillas = 2
#Lista de alumnos sentados
alumnos_en_silla = []
#Creando semáforos
sillas = threading.Semaphore(num_sillas)
profe_dormido = threading.Semaphore(0)
mutex_primer_alumno = threading.Semaphore(1)
#Creando hilos
threading.Thread(target=profe).start()
for alumno_id in range(num_alumno):
threading.Thread(target=alumnos,args=[alumno_id]).start()
| 3.46875 | 3 |
mtgcompiler/frontend/AST/statements.py | rmmilewi/mtgcompiler | 4 | 12766621 | import mtgcompiler.AST.core as core
from enum import Enum
class MgAbstractStatement(core.MgNode):
"""An ability is made of one or more statements, organized into an instruction sequence.
A statement encapsulates a subtree of expressions, usually terminated by a period."""
def __init__(self):
self._traversable = True
#Target creature can’t be blocked this turn except by artifact creatures and/or red creatures.
#You may have Gigantoplasm enter the battlefield as a copy of any creature on the battlefield, except it has [...]
#You may have Quicksilver Gargantuan enter the battlefield as a copy of any creature on the battlefield, except it’s 7/7.
#'except' should probably be an expression as it's a statement that alters the effect of something else.
class MgStatementBlock(MgAbstractStatement):
"""Represents a sequence of statements that make up a single ability."""
def __init__(self,*args):
"""The constructor accepts a list of statements in any order."""
super().__init__()
self._ilist = args
for statement in self._ilist:
statement.setParent(self)
def getNumberOfStatements(self):
"""Get the number of statements in this statement block."""
return len(self._ilist)
def getStatementAtIndex(self,index):
"""Get the statement at the given index in the statement list.
Produces an error if the index falls out of bounds."""
return self._ilist[index]
def isChild(self,child):
return child in self._ilist
def getTraversalSuccessors(self):
return [statement for statement in self._ilist if statement.isTraversable()]
def unparseToString(self):
return '. '.join(statement.unparseToString() for statement in self._ilist)
class MgCompoundStatement(MgAbstractStatement):
"""A compound statement is a series of comma-separated clauses, each of which is a statement.
Compound statements can either be terminated by a 'then' (most common) or an 'and'.
Examples:
'You may search your library for an Equipment card, reveal it, put it into your hand, then shuffle your library.'
'Choose a card name, then reveal a card at random from your hand.'
'If cards with five or more different converted mana costs are exiled with Azor’s Gateway, you gain 5 life, untap Azor’s Gateway, and transform it.'
"""
class CompoundTerminator(Enum):
Then = "then"
And = "and"
def __init__(self,terminator=None,*statements):
super().__init__()
self._statements = statements
if terminator == None:
self._terminator = MgCompoundStatement.CompoundTerminator.Then
else:
self._terminator = terminator
for statement in self._statements:
statement.setParent(self)
def isThenTerminated(self):
"""Checks whether the compound statement is then-terminated."""
return self._terminator == MgCompoundStatement.CompoundTerminator.Then
def setThenTerminated(self):
"""Sets the compound statement to be then-terminated."""
self._terminator = MgCompoundStatement.CompoundTerminator.Then
def isAndTerminated(self):
"""Checks whether the compound statement is and-terminated."""
return self._terminator == MgCompoundStatement.CompoundTerminator.And
def setAndTerminated(self):
"""Sets the compound statement to be and-terminated."""
self._terminator = MgCompoundStatement.CompoundTerminator.And
def getStatements(self):
"""Get the list of statements in this compound expression."""
return self._statements
def setStatements(self,*statements):
"""Set the list of statements in this compound expression."""
self._statements = statements
for statement in self._statements:
statement.setParent(self)
#TODO: Add methods to insert/remove individual statements
def isChild(self,child):
return child in self._statements
def getTraversalSuccessors(self):
return [statement for statement in self._statements if statement.isTraversable()]
def unparseToString(self):
output = ', '.join(statement.unparseToString() for statement in self._statements[0:len(self._tlist)-1])
output = output + ", {0} {1}".format(self._terminator.value,self._statements[len(self._statements)-1])
return output
class MgMayStatement(MgAbstractStatement):
"""A may-statement wraps around another statement, indicating that the statement underneath is optional."""
def __init__(self,player,statement):
"""
player: An expression describing the player who may take the action.
statement: The statement that may be carried out.
"""
self._player = player
self._player.setParent(self)
self._statement = statement
self._statement.setParent(self)
def getPlayer(self):
"""Get the player who may carry out the statement."""
return self._player
def setPlayer(self,player):
"""Set the player who may carry out the statement."""
self._player = player
self._player.setParent(self)
def getStatement(self):
"""Get the statement that may be carried out."""
return self._statement
def setStatement(self):
"""Get the statement that may be carried out."""
self._statement = statement
self._statement.setParent(self)
def isChild(self,child):
return child is not None and child in {self._player,self._statement}
def getTraversalSuccessors(self):
return [node for node in {self._player,self._statement} if node.isTraversable()]
def unparseToString(self):
return "{0} may {1}".format(self._player.unparseToString(),self._statement.unparseToString())
class MgBeingStatement(MgAbstractStatement):
"""The abstract parent class for all statements of being/status, such as:
"~ is an Elf in addition to its other types"
"~ has first strike and haste"
"~ can't block"
"
A being statement consists of a left-hand side and a right-hand side. The left-hand side
may be option (e.g. in a compound statement such as "target creature gets +1/+1 and <implied> has first strike
until end of turn.).
"""
def __init__(self,lhs,rhs):
"""
lhs: (Optional) The thing that has something.
rhs: The thing that is had.
impliedLhs: Indicates whether the lhs is implied. This flag is set automatically by the constructor.
It is checked during binding.
"""
super().__init__()
self._lhs = lhs
if self._lhs is not None:
self._lhs.setParent(self)
self._impliedLhs = False
else:
self._impliedLhs = True
self._rhs = rhs
self._rhs.setParent(self)
def isImpliedLhs(self):
"""Checks whether the left-hand side of this statement is implied (i.e. not an actual child of the node)."""
return self._impliedLhs
def getLhs(self):
"""Get the left-hand side of this statement."""
return self._lhs
def setLhs(self,lhs):
"""Set the left-hand side of this statement."""
self._lhs = lhs
if self.isImpliedLhs():
self._lhs.setParent(self)
def getRhs(self):
"""Get the right-hand side of this statement."""
return self._rhs
def setRhs(self,rhs):
"""Set the right-hand side of this statement."""
self._rhs = rhs
self._rhs.setParent(self)
def isChild(self,child):
if not self.isImpliedLhs():
return child is not None and child in {self._lhs,self._rhs}
else:
return child is not None and child in {self._rhs}
def getTraversalSuccessors(self):
if not self.isImpliedLhs():
return [node for node in {self._lhs,self._rhs} if node.isTraversable()]
else:
return [node for node in {self._rhs} if node.isTraversable()]
class MgIsStatment(MgAbstractStatement):
pass
class MgThenStatement(MgAbstractStatement):
"""Then [expression or statement].
Examples:
'Then shuffle your library.'
'Then if you control an untapped land, destroy all enchantments you control.'
"""
def __init__(self,body):
"""Body: The statement underneath the 'then' statement."""
super().__init__()
self._body = body
self._body.setParent(self)
def getBody(self):
"""Gets the body of this Then statement."""
return self._body
def setBody(self,body):
"""Sets the body of this Then statement."""
self._body = body
self._body.setParent(self)
def isChild(self,child):
return child is not None and child in {self._body}
def getTraversalSuccessors(self):
return [node for node in {self._body} if node is not None and node.isTraversable()]
def unparseToString(self):
return "Then {0}".format(self._body.unparseToString())
class MgKeywordAbilityListStatement(MgAbstractStatement):
"""Represents a comma-separated sequence of keyword abilities, like 'flying, haste, first strike'."""
def __init__(self,*kwabilities):
"""The constructor accepts a list of keyword abilities."""
super().__init__()
self._abilitylist = kwabilities
for ability in self._abilitylist:
ability.setParent(self)
def getAbilityAtIndex(self,index):
return self._abilitylist[index]
def isChild(self,child):
return child is not None and child in self._abilitylist
def getTraversalSuccessors(self):
return [ability for ability in self._abilitylist if ability.isTraversable()]
def unparseToString(self):
if len(self._abilitylist) == 2 and self._abilitylist[-1].hasReminderText():
#If there are only two keyword abilities and the second has reminder text, then
#we use a semicolon to separate them.
return '; '.join(ability.unparseToString() for ability in self._abilitylist)
else:
return ', '.join(ability.unparseToString() for ability in self._abilitylist)
class MgConditionalStatement(MgAbstractStatement):
"""This is an abstract class for conditional statements."""
def __init__(self,conditional,consequence,inverted=False):
"""
conditional: The statement which evaluates to true or false.
consequence: What happens if the condition is true.
"""
super().__init__()
self._conditional = conditional
self._consequence = consequence
self._inverted = inverted
if self._conditional is not None:
self._conditional.setParent(self)
if self._consequence is not None:
self._consequence.setParent(self)
def getConditional(self):
"""Get the conditional for this conditional statement."""
return self._conditional
def setConditional(self,conditional):
"""Set the conditional for this conditional statement."""
if self._conditional is not None:
self._conditional.setParent(self)
def getConsequence(self):
"""Get the consequence for this conditional statement."""
return self._consequence
def setConsequence(self):
"""Get the consequence for this conditional statement."""
if self._consequence is not None:
self._consequence.setParent(self)
def isInverted(self):
"""Checks whether the conditional statement is inverted."""
return self._inverted
def setInverted(self,inverted):
"""Sets the inverted flag for this conditional statement."""
self._inverted = inverted
def isChild(self,child):
return child is not None and child in {self._conditional,self._consequence}
def getTraversalSuccessors(self):
return [node for node in {self._conditional,self._consequence} if node is not None and node.isTraversable()]
class MgIfStatement(MgConditionalStatement):
def __init__(self,conditional,consequence,inverted=False):
super().__init__(conditional,consequence,inverted)
self._inverted = inverted
def unparseToString(self):
if self._inverted is False:
return "if {0}, {1}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
else:
return "{1} if {0}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
class MgWheneverStatement(MgConditionalStatement):
def __init__(self,conditional,consequence,inverted=False):
super().__init__(conditional,consequence,inverted)
def unparseToString(self):
if self._inverted is False:
return "whenever {0}, {1}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
else:
return "{1} whenever {0}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
class MgWhenStatement(MgConditionalStatement):
def __init__(self,conditional,consequence,inverted=False):
super().__init__(conditional,consequence,inverted)
def unparseToString(self):
if self._inverted is False:
return "when {0}, {1}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
else:
return "{1} when {0}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
class MgAtStatement(MgConditionalStatement):
def __init__(self,conditional,consequence,inverted=False):
super().__init__(conditional,consequence,inverted)
def unparseToString(self):
if self._inverted is False:
return "at {0}, {1}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
else:
return "{1} at {0}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
class MgAsLongAsStatement(MgConditionalStatement):
def __init__(self,conditional,consequence,inverted=False):
super().__init__(conditional,consequence,inverted)
def unparseToString(self):
if self._inverted is False:
return "as long as {0}, {1}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
else:
return "{1} as long as {0}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
class MgUntilStatement(MgConditionalStatement):
def __init__(self,conditional,consequence,inverted=False):
super().__init__(conditional,consequence,inverted)
def unparseToString(self):
if self._inverted is False:
return "until {0}, {1}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
else:
return "{1} until {0}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
class MgOtherwiseStatement(MgConditionalStatement):
def __init__(self,conditional,consequence):
super().__init__(conditional,consequence,inverted=False)
def unparseToString(self):
return "otherwise {0}, {1}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
class MgDuringStatement(MgConditionalStatement):
def __init__(self,conditional,consequence,exclusive=False):
"""
exclusive: Specifies that the conditional is exclusive ('only during')."""
super().__init__(conditional,consequence,inverted=False)
self._exclusive = exclusive
def isExclusive(self):
"""Checks whether this during statement is exclusive ('only during')."""
return self._exclusive == True
def setExclusive(self,exclusive):
"""Sets whether this during statement is exclusive ('only during')."""
self._exclusive = exclusive
def unparseToString(self):
if self._exclusive == True:
return "{0} only during {1}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
else:
return "{0} during {1}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
class MgUnlessStatement(MgConditionalStatement):
def __init__(self,conditional,consequence):
super().__init__(conditional,consequence,inverted=False)
def unparseToString(self):
return "{1} unless {0}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
class MgForStatement(MgConditionalStatement):
"""
Examples:
'For each creature target player controls, create a token that’s a copy of that creature.'
'When you cast this spell, copy it for each time you’ve cast your commander from the command zone this game.'
'For each land target player controls in excess of the number you control, choose a land that player controls,
then the chosen permanents phase out.'
"""
pass
class MgWhileStatement(MgConditionalStatement):
"""
Typical construction: While [state], [consequence]
But also situations like: If [if-condition] [while condition], [if-consequence]
"""
def __init__(self,conditional,consequence,inverted=False):
super().__init__(conditional,consequence,inverted=False)
def unparseToString(self):
if self._inverted is False:
return "while {0}, {1}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
else:
return "{1} while {0}".format(self._conditional.unparseToString(),self._consequence.unparseToString())
class MgActivationStatement(MgAbstractStatement):
def __init__(self,cost,instructions):
super().__init__()
self._cost = cost
self._instructions = instructions
self._cost.setParent(self)
self._instructions.setParent(self)
def getCost(self):
"""Get the cost associated with this statement."""
return self._cost
def setCost(self,cost):
"""Set the cost associated with this statement."""
self._cost = cost
self._cost.setParent(self)
def getInstructions(self):
"""Get the instructions associated with this statement."""
return self._instructions
def setInstructions(self,instructions):
"""Set the instructions associated with this statement."""
self._instructions = instructions
self._instructions.setParent(self)
def isChild(self,child):
return child is not None and child in {self._cost,self._instructions}
def getTraversalSuccessors(self):
return [node for node in {self._cost,self._instructions} if node.isTraversable()]
def unparseToString(self):
return "{0}: {1}".format(self._cost.unparseToString(),self._instructions.unparseToString())
class MgExpressionStatement(MgAbstractStatement):
def __init__(self,root):
"""
root: a single expression/term underneath the statement.
"""
super().__init__()
self._root = root
self._root.setParent(self)
def isChild(self,child):
return child is self._root
def getTraversalSuccessors(self):
return [node for node in {self._root} if node.isTraversable()]
def getRoot(self):
"""Get the root expression/term of the statement."""
return self._root
def setRoot(self,root):
"""Set the root expression/term of the statement."""
self._root = root
self._root.setParent(self)
def unparseToString(self):
return "{0}".format(self._root.unparseToString())
class MgAbilitySequenceStatement(MgAbstractStatement):
"""An ability sequence statement is a series of one or more keyword abilities and quoted abilities. If there is more
than one element in the sequence, it is terminated with an 'and'. If there are more than two elements, the sequence is
comma-delimited. Often used in descriptions of tokens. Examples (in asterisks) include:
* A 2/2 white knight creature token *with vigilance*.
* A colorless Treasure artifact token with *"{T}, Sacrifice this artifact: Add one mana of any color."*.
* A 1/1 colorless Insect artifact creature token with *flying and haste* named Hornet.
"""
def __init__(self,*abilities):
"""
args: The abilities contained in the sequence statement.
"""
super().__init__()
self._abilitylist = abilities
for ability in self._abilitylist:
ability.setParent(self)
def getAbilities(self):
"""Get the abilities in this ability sequence statement."""
return self._abilitylist
for ability in self._abilitylist:
ability.setParent(self)
def setAbilities(self,*abilities):
"""Replaces the current list of abilities with a new list of abilities."""
self._abilitylist = abilities
def isChild(self,child):
return child in self._abilitylist
def getTraversalSuccessors(self):
return [ability for ability in self._abilitylist if ability.isTraversable()]
def unparseToString(self):
output = ""
if len(self._abilitylist) > 1:
output = ','.join([ability.unparseToString() for ability in self._abilitylist[0:len(self._abilitylist)-1]])
output = "{0} and {1}".format(output,self._abilitylist[-1].unparseToString())
elif len(self._abilitylist) == 1:
output = "{0}".format(self._abilitylist[0].unparseToString())
else:
output = "empty-ability-sequence-statement"
return output
class MgQuotedAbilityStatement(MgAbstractStatement):
"""A statement block encased in quotes that describes a non-keyword ability that is granted
by some other ability, such as in the following examples:
* Enchanted land has "{T}: This land deals 1 damage to any target."
* You get an emblem with "Your opponents can’t untap more than two permanents during their untap steps."
* Each creature has “When this creature dies, choose target opponent.
That player puts this card from its owner’s graveyard onto the battlefield under their control
at the beginning of the next end step."
"""
def __init__(self,stmtblock):
"""
stmtblock: A statement block.
"""
super().__init__()
self._stmtblock = stmtblock
self._stmtblock.setParent(self)
def isChild(self,child):
return child is self._stmtblock
def getTraversalSuccessors(self):
return [node for node in {self._stmtblock} if node.isTraversable()]
def unparseToString(self):
return "\\\"{0}\\\"".format(self._stmtblock.unparseToString()) | 2.8125 | 3 |
dltools/hooks.py | geodekid/frnn | 299 | 12766622 | """Defines hooks that can run during training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lasagne
import numpy as np
from sklearn import metrics
class LoggingHook(object):
"""This hook writes information to a log file."""
def __init__(self, logger):
"""Initializes a new instance of the LoggingHook class.
Args:
logger: A logger instance.
"""
self._logger = logger
def update(self, **kwargs):
"""Executes the hook.
Args:
**kwargs: Optimizer state dictionary.
"""
self._logger.log(
key="status",
message="Log at iteration %d" % kwargs["update_counter"]
)
self._logger.log(
key="update_counter",
message=kwargs["update_counter"]
)
self._logger.log(
key="update_runtime",
message=kwargs["runtime"]
)
self._logger.log(
key="losses",
message=np.asarray(kwargs["losses"])
)
class SnapshotHook(object):
"""Hook for storing snapshots of the network's weights."""
def __init__(self, filename, network, interval):
"""Initializes a new instance of the SnapshotHook class.
Args:
filename: The base filename of the model.
network: The network instance to store.
interval: The snapshot interval.
"""
self._filename = filename
self._network = network
self._interval = interval
def update(self, **kwargs):
"""Executed the hook.
Args:
**kwargs: The optimizer dictionary.
"""
# Run the hook now?
if kwargs["update_counter"] % self._interval == 0:
# Yes
np.savez(
"%s_snapshot_%d.npz" % (
self._filename, kwargs["update_counter"]),
*lasagne.layers.get_all_param_values(self._network))
class SegmentationValidationHook(object):
"""Performs a validation run for semantic segmentation."""
def __init__(self, val_fn, data_provider, logger, interval=300,
num_classes=19):
"""Initializes a new instance of the SegmentationValidationHook class.
Args:
val_fn: A function that returns the predictions for each image and
a list of losses.
data_provider: A chianti data provider.
logger: A logger instance.
interval: The validation interval.
"""
self._val_fn = val_fn
self._data_provider = data_provider
self._logger = logger
self._interval = interval
self._num_classes = num_classes
def update(self, **kwargs):
"""Runs the validation hook."""
update_now = kwargs["update_counter"] % self._interval == 0
if update_now and kwargs["update_counter"] > 0:
self._logger.log(
key="validation_checkpoint",
message=kwargs["update_counter"]
)
self._logger.log(
key="status",
message="-> Start validation run"
)
# Initialize the confusion matrix
conf_matrix = np.zeros(
(self._num_classes, self._num_classes)).astype('int64')
accumulated_loss = 0
self._data_provider.reset()
for batch_counter in range(self._data_provider.get_num_batches()):
self._logger.log(
key="status",
message="--> Validate batch %d/%d" % (
batch_counter + 1,
self._data_provider.get_num_batches()))
batch = self._data_provider.next()
images = batch[0]
targets = batch[1]
predictions, loss = self._val_fn(images, targets)
accumulated_loss += loss
# Mark the don't care predictions
# Flatten the predictions and targets
flat_predictions = predictions.flatten()
non_void_pixels = (np.max(targets, axis=1) != 0.0).flatten()
flat_targets = np.argmax(targets, axis=1).flatten()
# Select the non-don't cares
flat_targets = flat_targets[non_void_pixels]
flat_predictions = flat_predictions[non_void_pixels]
conf_matrix += metrics.confusion_matrix(
flat_targets,
flat_predictions,
labels=np.arange(self._num_classes, dtype='int64'))
accumulated_loss /= self._data_provider.get_num_batches()
self._logger.log(
key="conf_matrix",
message=conf_matrix
)
self._logger.log(
key="validation_loss",
message=accumulated_loss
)
| 3.015625 | 3 |
updateDDBBservers.py | miguelFLG13/updateDDBBservers | 0 | 12766623 | <filename>updateDDBBservers.py<gh_stars>0
#! /usr/bin/python
"""
Script to upgrade ddbb in mysql servers
17/08/2015 by <NAME>
"""
import os
import sys
ddbb_servers = [] #Complete with MySQL servers
ddbb_users = [] #Complete with MySQL users
ddbb_passwords = [] #Complete with MySQL passwords
ddbb_names = [] #Complete with MySQL DDBB names
if sys.argv[1] == "help":
print("Usage:\n\tupdateNicaDDBB.py mysql_file.sql")
sys.exit(1)
if not os.path.exists(sys.argv[1]):
print("Cannot open " + sys.argv[1])
sys.exit(1)
i = 0
for server in ddbb_servers:
os.system("mysql -h" + server + " -u" + ddbb_users[i] + " -p" + ddbb_passwords[i] + " " + ddbb_names[i] + " < " + sys.argv[1])
i += 1 | 2.3125 | 2 |
sahara-10.0.0/sahara/plugins/edp.py | scottwedge/OpenStack-Stein | 161 | 12766624 | <gh_stars>100-1000
# Copyright (c) 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service.edp import hdfs_helper
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import engine as oozie_engine
from sahara.service.edp.oozie.workflow_creator import workflow_factory
from sahara.service.edp.spark import engine as spark_engine
from sahara.service.edp.storm import engine as storm_engine
from sahara.utils import edp
JOB_TYPE_HIVE = edp.JOB_TYPE_HIVE
JOB_TYPE_SPARK = edp.JOB_TYPE_SPARK
JOB_TYPE_JAVA = edp.JOB_TYPE_JAVA
JOB_TYPE_SHELL = edp.JOB_TYPE_SHELL
JOB_TYPE_PIG = edp.JOB_TYPE_PIG
JOB_TYPE_STORM = edp.JOB_TYPE_STORM
JOB_TYPE_PYLEUS = edp.JOB_TYPE_PYLEUS
JOB_TYPE_MAPREDUCE = edp.JOB_TYPE_MAPREDUCE
JOB_TYPE_MAPREDUCE_STREAMING = edp.JOB_TYPE_MAPREDUCE_STREAMING
JOB_TYPES_ALL = edp.JOB_TYPES_ALL
JOB_STATUS_SUCCEEDED = edp.JOB_STATUS_SUCCEEDED
class PluginsStormJobEngine(storm_engine.StormJobEngine):
def __init__(self, cluster, **kwargs):
super(PluginsStormJobEngine, self).__init__(cluster)
class PluginsStormPyleusJobEngine(storm_engine.StormPyleusJobEngine):
def __init__(self, cluster, **kwargs):
super(PluginsStormPyleusJobEngine, self).__init__(cluster)
class PluginsSparkJobEngine(spark_engine.SparkJobEngine):
def __init__(self, cluster, **kwargs):
super(PluginsSparkJobEngine, self).__init__(cluster)
class PluginsSparkShellJobEngine(spark_engine.SparkShellJobEngine):
def __init__(self, cluster, **kwargs):
super(PluginsSparkShellJobEngine, self).__init__(cluster)
class PluginsOozieJobEngine(oozie_engine.OozieJobEngine):
def __init__(self, cluster, **kwargs):
super(PluginsOozieJobEngine, self).__init__(cluster)
def get_hive_shared_conf_path(hdfs_user, **kwargs):
return edp.get_hive_shared_conf_path(hdfs_user)
def compare_job_type(job_type, *args, **kwargs):
return edp.compare_job_type(job_type, *args, **kwargs)
def get_builtin_binaries(job, configs, **kwargs):
return edp.get_builtin_binaries(job, configs)
def create_dir_hadoop2(r, dir_name, hdfs_user, **kwargs):
hdfs_helper.create_dir_hadoop2(r, dir_name, hdfs_user)
def create_hbase_common_lib(r, **kwargs):
hdfs_helper.create_hbase_common_lib(r)
def get_plugin(cluster, **kwargs):
return job_utils.get_plugin(cluster)
def get_possible_job_config(job_type, **kwargs):
return workflow_factory.get_possible_job_config(job_type)
def get_possible_mapreduce_configs(**kwargs):
return workflow_factory.get_possible_mapreduce_configs()
| 1.5625 | 2 |
html_email/__init__.py | classroomtechtools/moodle_daily_notices | 0 | 12766625 | <filename>html_email/__init__.py
from html_email.Email import Email, read_in_templates
__all__ = [Email, read_in_templates]
| 1.46875 | 1 |
artistapp/artist/migrations/0009_remove_content_slug.py | fallprojects/ArtistApp | 0 | 12766626 | <gh_stars>0
# Generated by Django 3.1.3 on 2020-12-08 12:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('artist', '0008_auto_20201208_1136'),
]
operations = [
migrations.RemoveField(
model_name='content',
name='slug',
),
]
| 1.3125 | 1 |
Advanced/Generators/main.py | Alperencode/Python | 1 | 12766627 | <filename>Advanced/Generators/main.py
## Gerenators
def mygenerator(x):
for i in range(x):
yield i
values = mygenerator(100)
for i in values:
print(i)
print(next(mygenerator)) | 3.515625 | 4 |
flask-celery-example/app/celery/tasks.py | hk-badbuta/buta-exercise-celery | 0 | 12766628 | <reponame>hk-badbuta/buta-exercise-celery
from celery import task
@task(bind=True)
def hello(self):
# How to call me:
# At project folder:
# $ celery -A app.run_celery.celery call app.celery.tasks.hello
# app.run_celery.celery is the celery (actually is FlaskCelery) instance
# (from app.extensions.celery) but with configuration set.
# self here is the instance of FlaskCelery class
if self.app is not None:
return self.app.conf.TITLE
else:
return 'No App found, Hello'
| 2.125 | 2 |
medium/1081-smallest-subsequence-of-distinct-characters.py | wanglongjiang/leetcode | 2 | 12766629 | <reponame>wanglongjiang/leetcode<gh_stars>1-10
'''
不同字符的最小子序列
返回 s 字典序最小的子序列,该子序列包含 s 的所有不同字符,且只包含一次。
注意:该题与 316 https://leetcode.com/problems/remove-duplicate-letters/ 相同
示例 1:
输入:s = "bcabc"
输出:"abc"
示例 2:
输入:s = "cbacdcbc"
输出:"acdb"
提示:
1 <= s.length <= 1000
s 由小写英文字母组成
'''
from collections import Counter
'''
思路:单调栈 哈希
目的:尽量让前面的字符按照升序出现。
首先需要遍历依次字符串,对所有字符进行计数
然后再次遍历字符串,对于每个字符,
如果没有出现在栈内,且与栈顶相比是升序,入栈
如果没有出现在栈内,且与栈顶元素相比是降序,需要尝试将栈顶元素出栈(出栈的条件是该元素在后面还有)
与下面的题相同:
- 316.[去除重复字母](medium/316-remove-duplicate-letters.py)
时间复杂度:O(n)
空间复杂度:O(n)
'''
class Solution:
def smallestSubsequence(self, s: str) -> str:
instack = set()
counter = Counter(s)
ans = []
for c in s:
if c not in instack:
while ans and ans[-1] > c and counter[ans[-1]] > 1:
counter[ans[-1]] -= 1
instack.remove(ans.pop())
ans.append(c)
instack.add(c)
else:
counter[c] -= 1
return ''.join(ans)
s = Solution()
print(s.smallestSubsequence("bcbcbcababa") == "bca")
print(s.smallestSubsequence("ecbacba") == "eacb")
print(s.smallestSubsequence("cbacdcbc"))
print(s.smallestSubsequence("bcabc"))
| 3.15625 | 3 |
utils/measure_inference_time.py | VakhrameevaLiza/pytorch_segmentation_framework | 0 | 12766630 | <filename>utils/measure_inference_time.py
import torch
import time
import numpy as np
from models.Unet import Unet
def measure_inference_time(model, input_size):
torch.backends.cudnn.benchmark = True
model.eval()
model = model.cuda()
input = torch.randn(*input_size).cuda()
# warming
for _ in range(10):
out = model(input)
num_iters = 10
ts = []
torch.cuda.synchronize()
for _ in range(num_iters):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
out = model(input)
end.record()
torch.cuda.synchronize()
t = start.elapsed_time(end)
ts.append(t)
print('{:.1f} ms'.format(np.mean(ts)))
if __name__ == '__main__':
net = Unet(65)
input_size = (1, 3, 1024, 2048)
measure_inference(net, input_size) | 2.484375 | 2 |
cruw/mapping/ops.py | kanishkaisreal/cruw-devkit | 0 | 12766631 | <reponame>kanishkaisreal/cruw-devkit<filename>cruw/mapping/ops.py
import numpy as np
from cruw.mapping.coor_transform import pol2cart_ramap, cart2pol_ramap
def find_nearest(array, value):
"""Find nearest value to 'value' in 'array'."""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx, array[idx]
def idx2ra(rng_id, agl_id, range_grid, angle_grid):
"""Mapping from ra indices to absolute range (m) and azimuth (rad)."""
rng = range_grid[rng_id]
agl = angle_grid[agl_id]
return rng, agl
def idx2ra_interpolate(rng_id, agl_id, range_grid, angle_grid):
"""Mapping from ra indices to absolute range (m) and azimuth (rad)."""
rids = np.arange(range_grid.shape[0])
aids = np.arange(angle_grid.shape[0])
rng = np.interp(rng_id, rids, range_grid)
agl = np.interp(agl_id, aids, angle_grid)
return rng, agl
def ra2idx(rng, agl, range_grid, angle_grid):
"""Mapping from absolute range (m) and azimuth (rad) to ra indices."""
rng_id, _ = find_nearest(range_grid, rng)
agl_id, _ = find_nearest(angle_grid, agl)
return rng_id, agl_id
def ra2idx_interpolate(rng, agl, range_grid, angle_grid):
"""get interpolated RA indices in float"""
rids = np.arange(range_grid.shape[0])
aids = np.arange(angle_grid.shape[0])
rng_id = np.interp(rng, range_grid, rids)
agl_id = np.interp(np.sin(agl), np.sin(angle_grid), aids)
return rng_id, agl_id
def xz2idx_interpolate(x, z, x_grid, z_grid):
"""get interpolated XZ indices in float"""
xids = np.arange(x_grid.shape[0])
zids = np.arange(z_grid.shape[0])
x_id = np.interp(x, x_grid, xids)
z_id = np.interp(z, z_grid, zids)
return x_id, z_id
def ra2xzidx(rng, agl, xz_grid):
x, z = pol2cart_ramap(rng, agl)
x_id, z_id = xz2idx_interpolate(x, z, *xz_grid)
return x_id, z_id
def bilinear_interpolate(im, x, y):
x = np.asarray(x)
y = np.asarray(y)
x0 = np.floor(x).astype(int)
x1 = x0 + 1
y0 = np.floor(y).astype(int)
y1 = y0 + 1
x0 = np.clip(x0, 0, im.shape[1] - 1)
x1 = np.clip(x1, 0, im.shape[1] - 1)
y0 = np.clip(y0, 0, im.shape[0] - 1)
y1 = np.clip(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1 - x) * (y1 - y)
wb = (x1 - x) * (y - y0)
wc = (x - x0) * (y1 - y)
wd = (x - x0) * (y - y0)
return wa * Ia + wb * Ib + wc * Ic + wd * Id
| 2.34375 | 2 |
eval/utils/conf_file_utils.py | mbatchkarov/dc_evaluation | 0 | 12766632 | <filename>eval/utils/conf_file_utils.py
from os import path as path
import os
from pprint import pprint
import sys
from configobj import ConfigObj, flatten_errors
import validate
__author__ = 'mmb28'
def set_nested(dic, key_list, value):
"""
>>> d = {}
>>> nested_set(d, ['person', 'address', 'city'], 'New York')
>>> d
{'person': {'address': {'city': 'New York'}}}
"""
for key in key_list[:-1]:
dic = dic.setdefault(key, {})
dic[key_list[-1]] = value
def set_in_conf_file(conf_file, keys, new_value):
if type(keys) is str:
# handle the case when there is a single key
keys = [keys]
config_obj, configspec_file = parse_config_file(conf_file)
set_nested(config_obj, keys, new_value)
config_obj.write()
def parse_config_file(conf_file, quit_on_error=True, confrc=None):
if not os.path.exists(conf_file):
raise ValueError('Conf file %s does not exits!' % conf_file)
configspec_file = confrc if confrc else get_confrc(conf_file)
config = ConfigObj(conf_file, configspec=configspec_file)
validator = validate.Validator()
result = config.validate(validator, preserve_errors=True)
if result != True and len(result) > 0:
print('Invalid configuration in', conf_file)
pprint(flatten_errors(config, result))
if quit_on_error:
sys.exit(1)
return config, configspec_file
def get_confrc(conf_file):
"""
Searches the file hierarchy top to bottom for confrc,
starting from conf_file and going as many as 4 levels up. As it goes
up, also searches in the 'conf' sub directory, it exists
"""
for subdir in ['.', 'conf']:
for i in range(7):
my_list = [path.dirname(conf_file)] + ['..'] * i + \
[subdir] + ['confrc']
candidate = path.join(*my_list)
if path.exists(candidate):
return candidate | 2.921875 | 3 |
fib.py | DylanYoung/ComparingFibonacciAlgorithms | 0 | 12766633 | # Implementations done in Python since it natively supports unbounded integer
# arithmetic and fractional math
#Compiled with Python 2.7.3 (default, Apr 10 2012, 23:31:26) [MSC v.1500 32 bit (Intel)] on win32
#in IDLE version 2.7.3
#For profiling (written in C to avoid overhead)
import cProfile
# to deal with fractions
from fractions import Fraction
global called
global countAdd
global countSub
global countMult
called = 0
countAdd = 0
countSub = 0
countMult = 0
#Recursive implementation based on text
def fibonacci(n, count = False):
global called
global countAdd
global countSub
global countMult
if count == True:
called += 1
if n<=2:
return 1
else:
if count == True:
countAdd += 1
countSub += 2
return fibonacci(n-1, count)+fibonacci(n-2, count)
'''
For n = 10:
Counts 54 additions vs 52 predicted (not sure why?)
Counts 108 Subtractions vs 104 predicted (not sure why?)
109 Calls implying 55 calls of cost zero
Implementation is Robust (no possibility of rounding error)
& unbounded integer arithmetic in python
Implementation is very slow for n > 60 (because of the
many duplicated recursive calls, as shown in the trie
from Q4); with count = True it is slow for n > 40)
It will also fail completely because of the limit
on recursion (of course, any implementation will
eventually unless storage can grow faster than required
memory)
'''
#Loop implementation (differs slightly from design to be more storage efficient)
def fibLoop(n, count = False):
global called
global countAdd
global countSub
global countMult
fib1 = 1
fib2 = 1
for i in range(2,n):
if count == True:
countAdd += 1
newfib = fib1 + fib2
fib1 = fib2
fib2 = newfib
return fib2
'''
For n = 10:
Counts 8 additions, as predicted
Counts 0 Subtractions vs 16 predicted, since we didn't use
a list, but it performs additional 16 assignments instead
Implementation is Robust (no possibility of rounding error)
& unbounded integer arithmetic in python
n = 100,000 --> 0.166 seconds
n = 1,000,000 --> 14.028 seconds
'''
#Direct implementation subject to rounding errors for n > 71
def fibDirect(n, count = False):
global called
global countAdd
global countSub
global countMult
rFive = (5**0.5)
if count == True:
countMult += 2*(n-1) + 2 #uses Naive assumption for how
countSub += 2 # power function works
countAdd += 1
c = 1/rFive
a = ((1+rFive)/2)**n
b = ((1-rFive)/2)**n
return int(c*(a-b))
'''
For n = 10:
Counts 1 addition, as predicted
Counts 2 Subtractions as predicted
Counts 20 Multiplications as predicted (but this is based on a
Naive assumption of how exponentiation is implemented
Implementation is NOT Robust; we start getting rounding error
at n = 72
Since it is not robust, and the results we are interested in are
exact integers, it isn't really relevent when it fails (i.e
it is unreliable for n > 71)
Maybe if we were interested in ratios, we could still use it
(but why bother since there are better methods?)
Fails for n > 1475 --> Overflow
Python only supports unbounded *integer* arithmetic :)
'''
# Robust Direct implementation (uses helper functions below)
def betterFibDirect(n, count = False):
global called
global countAdd
global countSub
global countMult
if count == True:
countSub += 1
a = rFivePow((Fraction(1,2),Fraction(1,2)),n, count)
b = rFivePow((Fraction(1,2),Fraction(-1,2)),n, count)
return int(a[1]-b[1])
'''
For n = 10:
Counts 36 additions (no prediction)
Counts 1 Subtraction (no prediction)
Counts 90 Multiplications (no prediction) - based on Naive expmentiation
but these are fractional multiplications
Implementation is Robust! Since there no possibility of rounding error
because we are dealing with only integers (and fractions)
n = 100,000 --> 69.411 seconds
n = 1,000,000 --> > 20 minutes
This is as far as I'm willing to go with this one, but I don't think
it will actually fail until we run out of memory, but it does get slower
and slower because of the multiplication of large numbers
Also: see NOTE after the next function
'''
# Robust Direct implementation (uses helper functions below)
# cuts our multiplications and additions in two
# I forgot to do this in the first implementation: D'Oh!
def muchBetterFibDirect(n, count = False):
global called
global countAdd
global countSub
global countMult
if count == True:
countSub += 1
a = rFivePow((Fraction(1,2),Fraction(1,2)),n, count)
b = a[0],-a[1] ## Altered Line
return int(a[1]-b[1])
'''
For n = 10:
Counts 18 additions as predicted
Counts 1 Subtraction as before
Counts 45 Multiplications as predicted
but these are fractional multiplications
Implementation is Robust! Since there is no possibility of rounding
error because we are dealing with only integers (and fractions)
n = 100,000 --> 37.039 seconds
NOTE: We should be able to make this implementation much better
by implementing exponentiation in a smarter way, but it will
probably only compete with fibLoop if we implement a table
to look up our exponents, in which case we might as well store
the fibonacci numbers themselves... unless we want to compute
other fibonacci-like sequences easily...
'''
# A better recursive implementation that avoids duplication of labour
def betterFibonacci(n, count = False):
global called
global countAdd
global countSub
global countMult
if count == True:
called += 1
if n<=2:
return (1,1)
else:
if count == True:
countAdd += 1
countSub += 1
pair = betterFibonacci(n-1, count)
return (pair[1],pair[0]+pair[1])
'''
For n = 10:
Counts 8 additions, as expected (see the graph in Q4)
Counts 8 Subtractions, as expected
9 calls as expected (vs 109 for the Naive implementation)
Implementation is Robust, since there no possibility of rounding error
Fails for n > 991 --> Maximum Recursion depth exceeded
This is a python consideration to protect from catasrophic
failure (likely our Naive implementation would also fail here
if not a little earlier, but I don't have time to wait for that)
'''
###########################################################
# HELPER FUNCTIONS #
###########################################################
# Exact multiplication for numbers of the form x+y*sqrt(5)
def rFiveMult(n,m, count = False):
global called
global countAdd
global countSub
global countMult
if count == True:
countMult += 5
countAdd += 2
return (m[0]*n[0]+m[1]*n[1]*5,m[0]*n[1]+m[1]*n[0])
# Naive power function for numbers of the form x+y*sqrt(5)
def rFivePow(x,n, count = False):
global called
global countAdd
global countSub
global countMult
result = x
for i in range(1,n):
result = rFiveMult(x,result, count)
return result
# Reset the global counters
def resetCount():
global called
global countAdd
global countSub
global countMult
called = 0
countAdd = 0
countSub = 0
countMult = 0
def printResults(n):
global called
global countAdd
global countSub
global countMult
print(n)
print ("\n")
print("Additions: " + str(countAdd))
print("Subtractions: " + str(countSub))
print ("Multiplications: " + str(countMult))
print ("Calls: " + str(called))
resetCount()
if __name__ == "__main__":
done = False
while not done:
resetCount()
n = input("Enter a non-negative integer: ")
print ("\n")
#''' Remove # before and after blocks to comment out
print("fibonacci("+str(n) + ") = ")
printResults(fibonacci(n, True))
cProfile.run('fibonacci(n)')
#'''
#'''
print("fibLoop("+str(n) + ") = ")
printResults(fibLoop(n, True))
cProfile.run('fibLoop(n)')
#'''
#'''
print("fibDirect("+str(n) + ") = ")
printResults(fibDirect(n, True))
cProfile.run('fibDirect(n)')
#'''
''' # replaced with muchBetterFibDirect
print("betterFibDirect("+str(n) + ") = ")
printResults(betterFibDirect(n, True))
cProfile.run('betterFibDirect(n)')
'''
#'''
print("muchBetterFibDirect("+str(n) + ") = ")
printResults(muchBetterFibDirect(n, True))
cProfile.run('muchBetterFibDirect(n)')
#'''
#'''
print("betterFibonacci("+str(n) + ") = ")
printResults(betterFibonacci(n, True)[1])
cProfile.run('betterFibonacci(n)')
#'''
print ("\n")
s = raw_input("Exit? Y/N ").lower()
if s == 'y':
done = True
| 3.640625 | 4 |
script/chinatimes.py | radjan/newspac | 2 | 12766634 | <reponame>radjan/newspac
from bs4 import BeautifulSoup
import urllib2
import re
import rss_base
class Handler(rss_base.RssBaseHandler):
def fetch_text(self, url):
try:
u = urllib2.urlopen(url)
url = u.geturl()
bs = BeautifulSoup(u)
canonical_url = self.get_canonical_url(bs)
url = canonical_url if canonical_url else url
return url, bs.find(attrs={'id': re.compile('ctkeywordcontent')}).get_text()
except Exception, e:
#raise
return url, None
| 2.546875 | 3 |
flask_app.py | molliewhite/mission_to_mars | 0 | 12766635 | #Get dependencies
from flask import Flask, render_template, redirect
import pymongo
import mission_to_mars
import jinja2
from jinja2 import TemplateNotFound
#Create Flask App
app = Flask(__name__)
#Connect to MongoDB
conn = "mongodb://localhost:27017"
client = pymongo.MongoClient(conn)
db = client.mars_DB
@app.route("/")
def index():
mars = db.mars_data.find_one()
return render_template("index.html", mars=mars)
@app.route("/scrape")
def scrape():
mars_data = mission_to_mars.scrape()
db.mars_data.update(
{},
mars_data,
upsert=True
)
return redirect("http://localhost:5000/", code=302)
if __name__ == "__main__":
app.run(debug=True)
| 2.5625 | 3 |
billing/urls.py | dbsiavichay/faclab | 0 | 12766636 | <gh_stars>0
from django.conf.urls import url
from django.urls import path
from .views import (
CustomerListView, CustomerCreateView, CustomerUpdateView,
TaxListView, TaxCreateView, TaxUpdateView, TaxDetailView,
InvoiceListView, InvoiceCreateView, InvoiceUpdateView, InvoiceInvoicedView, InvoicePaidView
)
urlpatterns = [
path('customer/', CustomerListView.as_view(), name='customers'),
path('customer/add/', CustomerCreateView.as_view(), name='add_customer'),
path('customer/<int:pk>/update/', CustomerUpdateView.as_view(), name='update_customer'),
path('tax/', TaxListView.as_view(), name='taxes'),
path('tax/add/', TaxCreateView.as_view(), name='add_tax'),
path('tax/<int:pk>/update/', TaxUpdateView.as_view(), name='update_tax'),
path('tax/<int:pk>/', TaxDetailView.as_view(), name='detail_tax'),
path('invoice/', InvoiceListView.as_view(), name='invoices'),
path('invoice/add/', InvoiceCreateView.as_view(), name='add_invoice'),
path('invoice/<int:pk>/update/', InvoiceUpdateView.as_view(), name='update_invoice'),
path('invoice/<int:pk>/invoiced/', InvoiceInvoicedView.as_view(), name='invoiced_invoice'),
path('invoice/<int:pk>/paid/', InvoicePaidView.as_view(), name='paid_invoice'),
] | 1.8125 | 2 |
xobox/cli/logger.py | gitter-badger/xobox | 0 | 12766637 | # -*- coding: utf-8 -*-
"""
xobox.cli.logger
~~~~~~~~~~~~~~~~
:copyright: Copyright 2017 by the Stormrose Project team, see AUTHORS.
:license: MIT License, see LICENSE for details.
"""
import sys
from datetime import datetime
from ..conf import get_conf
from ..utils.singleton import Singleton
from ..utils import termcolor
from ..utils.timer import counter
Levels = {
'mute': (0, '', ''),
'error': (1, 'ERROR', 'LOG_ERR'),
'warning': (2, 'WARN ', 'LOG_WARNING'),
'notice': (3, 'NOTE ', 'LOG_NOTICE'),
'info': (4, 'INFO ', 'LOG_INFO'),
'debug': (5, 'DEBUG', 'LOG_DEBUG')
}
@Singleton
class Logger(object):
"""
Class providing the output interface for various output channels. Currently,
supported output channels are
* the terminal (stdout and stderr)
* files accessible through the local file system
.. note::
Please note that the :py:class:`~xobox.cli.logger.Logger` class is a
decorated singleton, which means instances cannot be retrieved directly,
but must be obtained using the :py:meth:`~xobox.cli.logger.Logger.get_instance`
method.
The following key word arguments are understood:
:param str type: Type of this logger. Must be one of `file` or `term`.
:param str level: Minimum level for logging. Must be one of `mute`, `error`, `warning`, `info` or `debug`.
:param str file: Path of the log file to log into if type is set to `file`.
:param bool color: Allow colored logging (only effective if type is set to `term`)
"""
def __init__(self, *args, **kwargs):
self._levels = Levels
self._level = get_conf('DEFAULT_LOG_LEVEL')
self._type = get_conf('DEFAULT_LOG_TYPE')
self._logfile = get_conf('DEFAULT_LOG_FILE')
self._queue = None
self._init_queue()
self._fp_std = None
self._fp_err = None
color_candidate = get_conf('DEFAULT_LOG_COLOR')
# Check for positional arguments
if len(args) > 0 and args[0] in ('file', 'term'):
self._type = args[0]
if len(args) > 1 and args[1] in self._levels:
self._level = args[1]
if len(args) > 2 and type(args[2]) == str:
self._logfile = args[2]
if len(args) > 3 and type(args[3]) == bool:
color_candidate = args[3]
# Check for keyword arguments
if 'type' in kwargs and kwargs['type'] in ('file', 'term'):
self._type = kwargs['type']
if 'level' in kwargs and kwargs['level'] in self._levels:
self._level = kwargs['level']
if 'file' in kwargs:
self._logfile = kwargs['file']
if 'color' in kwargs and type(kwargs['color']) == bool:
color_candidate = kwargs['color']
# finally decide about color support and open the log channel
if termcolor.supports_color() and self._type == 'term':
self._color = color_candidate
else:
self._color = False
self._open()
def _open(self):
"""Open the log channel"""
if self._type == 'term':
self._fp_std = sys.stdout
self._fp_err = sys.stderr
elif self._type == 'file':
# enforce lazy behaviour
self._fp_std = None
self._fp_err = None
def _close(self):
"""Close the current log channel"""
if self._type == 'term':
self._fp_std = None
self._fp_err = None
elif self._type == 'file':
self._fp_std.close()
self._fp_err = None
def _init_queue(self):
"""Initialise message queue"""
self._queue = {
'std': {},
'err': {}
}
def _flush(self):
"""Flush log queue to the log channel"""
if self._type == 'term':
self._flush_term()
elif self._type == 'file':
self._flush_file()
self._init_queue()
def _flush_term(self):
"""Flush implementation for terminal logging"""
self._flush_term_err(self._queue['err'])
self._flush_term_std(self._queue['std'])
def _flush_term_std(self, queue):
"""Flushes the stdout message queue on a terminal"""
for msg in sorted(queue.keys()):
print(queue[msg][1], file=self._fp_std)
def _flush_term_err(self, queue):
"""Flushes the stderr message queue on a terminal"""
for msg in sorted(queue.keys()):
print(queue[msg][1], file=self._fp_err)
def _flush_file(self):
"""Flush implementation for file logging (lazy)"""
if not self._fp_std:
self._fp_std = open(self._logfile, encoding=get_conf('DEFAULT_CHARSET'), mode='a+')
padding = len("[{}] [ ]".format(datetime.now().strftime(get_conf('DEFAULT_LOG_TIMESTAMP')))) * " "
queue = dict(self._queue['err'])
queue.update(self._queue['std'])
for msg in sorted(queue.keys()):
# noinspection PyTypeChecker
msg_lines = queue[msg][1].splitlines()
# noinspection PyTypeChecker
print(
"[{date}] [{level}] {message}".format(
date=datetime.now().strftime(get_conf('DEFAULT_LOG_TIMESTAMP')),
level=self._levels[queue[msg][0]][1],
message=msg_lines[0]
),
file=self._fp_std
)
for msg_line in msg_lines[1:]:
# noinspection PyTypeChecker
print("{padding} {message}".format(padding=padding, message=msg_line), file=self._fp_std)
def log(self, level, message):
"""Register a log message within the logging queue"""
# log usage messages only on a terminal
if level == 'usage':
if self._type == 'term':
self._queue['err'][counter()] = (level, message)
# otherwise, log messages if their level is appropriate
if level in self._levels and self._levels[self._level][0] >= self._levels[level][0]:
if level == 'error':
self._queue['err'][counter()] = (level, message)
else:
self._queue['std'][counter()] = (level, message)
self._flush()
def log_error(self, message):
"""Convenience shortcut for registering messages with log level `error`"""
self.log('error', message)
def log_warning(self, message):
"""Convenience shortcut for registering messages with log level `warning`"""
self.log('warning', message)
def log_notice(self, message):
"""Convenience shortcut for registering messages with log level `notice`"""
self.log('notice', message)
def log_info(self, message):
"""Convenience shortcut for registering messages with log level `info`"""
self.log('info', message)
def log_debug(self, message):
"""Convenience shortcut for registering messages with log level `debug`"""
self.log('debug', message)
def log_usage(self, message):
"""Convenience shortcut for registering messages with log level `usage`"""
self.log('usage', message)
@property
def color(self):
"""
Boolean switch indicating whether this logger allows colored output.
"""
return self._color
@color.setter
def color(self, value):
if termcolor.supports_color() and self._type == 'term' and type(value) == bool:
self._color = value
@property
def file(self):
"""
The log file used when run as file logger.
"""
return self._logfile
@file.setter
def file(self, value):
self._logfile = value
if self._type == 'file':
self._close()
self._open()
@property
def level(self):
"""
The log level. Expected to be one of `mute`, `error`, `warning`, `info` or `debug`.
"""
return self._level
@level.setter
def level(self, value):
if value in self._levels:
self._level = value
@property
def type(self):
"""
The logger type. Expected to be one of `term` or `file`.
"""
return self._type
@type.setter
def type(self, value):
if value in ('file', 'term'):
self._close()
self._type = value
self._open()
if value == 'file':
self._color = False
| 2.1875 | 2 |
paperpy/interfaces.py | paperpy/paperpy | 0 | 12766638 | <gh_stars>0
"""
This module contains all public interfaces that you can use to create plugins.
"""
import abc
class Action(abc.ABC):
"""
An action registers a subcommand in the paperpy command.
To register an action in your plugin add an ``paperpy.action`` entry point in the
setup::
setup(
# ...
entry_points={
"paperpy.action" = "my_subcommand = module.my_action"
}
)
If ``module.my_action`` is a class that implements the Action interface then a
parser for the ``paperpy my_subcommand`` CLI command is registered.
"""
@abc.abstractmethod
def get_parser_args(self):
"""
This method should a dictionary of keyword arguments that are sent to the
:class:`argparse.ArgumentParser` constructor.
"""
pass
@abc.abstractmethod
def fill_parser(self, parser):
"""
This method receives an argparse subparser for your command. Here you can add
all your arguments and so on.
"""
pass
@abc.abstractmethod
def handle_command(self, args):
"""
This method receives the parsed arguments and should execute the command.
"""
pass
| 3.171875 | 3 |
blogApp/blog/views.py | yuniithings/yuniithings.com | 0 | 12766639 | <gh_stars>0
from blog.viewpackages.main import main_view
def index(request):
return main_view(request)
| 1.09375 | 1 |
redditclone/urls.py | jessejkwk/yup | 3 | 12766640 | <gh_stars>1-10
from django.conf.urls import url, include
from django.contrib import admin
from posts import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('accounts.urls')),
url(r'^posts/', include('posts.urls')),
url(r'^$', views.HomeView.as_view(), name="home")
]
| 1.6875 | 2 |
interface_stability/scripts/phase_stability.py | adelaiden/interface_stability | 10 | 12766641 | <filename>interface_stability/scripts/phase_stability.py
import argparse
from pymatgen import Composition
from interface_stability.singlephase import VirtualEntry
def get_phase_equilibria_from_composition(args):
"""
Provides the phase equilibria of a phase with given composition
"""
comp = Composition(args.composition)
entry = VirtualEntry.from_composition(comp)
print(entry.get_printable_PE_data_in_pd())
return 0
def get_phase_equilibria_and_decomposition_energy_under_mu_from_composition(args):
"""
Provide the phase equilibria and decomposition energy when open to one element with given miu
Chemical potential is referenced to pure phase of open element.
"""
comp = Composition(args.composition)
chempot = {args.open_element: args.chemical_potential}
entry = VirtualEntry.from_composition(comp)
entry.stabilize()
print(entry.get_printable_PE_and_decomposition_in_gppd(chempot, entries=None))
return 0
def get_phase_evolution_profile(args):
"""
Provides the phase equilibria and decomposition energy evolution process of a phase when open to a specific element
Chemical potential is referenced to pure phase of open element.
"""
comp = Composition(args.composition)
entry = VirtualEntry.from_composition(comp)
oe = args.open_element
entry.stabilize()
print(entry.get_printable_evolution_profile(oe, allowpmu=args.posmu))
return 0
def plot_vc(args):
"""
Get the plot data of voltage profile.
"""
comp = Composition(args.composition)
entry = VirtualEntry.from_composition(comp)
oe = args.open_element
entry.stabilize()
common_working_ions = dict(Li=1, Na=1, K=1, Mg=2, Ca=2, Al=3)
valence = args.valence if args.valence else common_working_ions[oe]
oe_list, v_list = entry.get_vc_plot_data(oe, valence=valence, allowpmu=args.posmu)
print(entry.get_printable_vc_plot_data(oe, oe_list, v_list))
entry.get_voltage_profile_plot(oe, oe_list, v_list, valence).show()
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="""
--BRIEF INTRO--
This script will analyze the stability of a phase with any given input composition
Either in a closed system (phase diagram) or in a system with an open element(grand potential phase diagram)
This script works based on several sub-commands with their own options.
To see the options for the sub-commands, use "python phase_stability.py sub-command -h".
""", epilog="""
--REMINDER--
To use this script, you need to set following variable in ~/.pmgrc.yaml:
PMG_MAPI_KEY :[Mandatory] the API key for MP to fetch data from MP website.
PMG_PD_PRELOAD_PATH : [Optional] the local directory for saved cached data.
""")
parent_comp_mp = argparse.ArgumentParser(add_help=False)
parent_comp_mp.add_argument("composition", type=str, help="The composition for analysis")
parent_oe = argparse.ArgumentParser(add_help=False)
parent_oe.add_argument("open_element", type=str, help="The open element")
parent_mu = argparse.ArgumentParser(add_help=False)
parent_mu.add_argument("chemical_potential", type=float, help="The chemical potential of open element."
"Referenced to pure phase")
parent_posmu = argparse.ArgumentParser(add_help=False)
parent_posmu.add_argument("-posmu", action='store_true', default=False,
help="Allow mu range to go beyond 0 and become positive")
subparsers = parser.add_subparsers()
parser_stability = subparsers.add_parser("stability", parents=[parent_comp_mp],
help="Obtain the phase equilibria of a phase with given composition")
parser_stability.set_defaults(func=get_phase_equilibria_from_composition)
parser_evolution = subparsers.add_parser("evolution", parents=[parent_comp_mp, parent_oe, parent_posmu],
help="Obtain the evolution profile at a given composition when open to an element")
parser_evolution.set_defaults(func=get_phase_evolution_profile)
parser_mu = subparsers.add_parser("mu", parents=[parent_comp_mp, parent_oe, parent_mu],
help="Obtain the phase equilibria & decomposition energy of a phase with given composition when open to an element")
parser_mu.set_defaults(func=get_phase_equilibria_and_decomposition_energy_under_mu_from_composition)
# parser_plot_gppd = subparsers.add_parser("plotgppd", parents=[parent_comp_mp, parent_oe, parent_miu],
# help="Obtain the grand potential phase diagram of a given material system under certain chemical potential")
# parser_plot_gppd.set_defaults(func=plot_gppd)
parser_plot_vc = subparsers.add_parser("plotvc", parents=[parent_comp_mp, parent_oe, parent_posmu],
help="Plot the voltage profile of at a given composition")
parser_plot_vc.add_argument('-v', '--valence', type=int, default=None, help='Valence of Working ion')
parser_plot_vc.set_defaults(func=plot_vc)
args = parser.parse_args()
if hasattr(args, "func"):
args.func(args)
else:
parser.print_help()
if __name__ == "__main__":
main() | 2.875 | 3 |
test_densecrf.py | lynetcha/densecrf-python | 13 | 12766642 | <reponame>lynetcha/densecrf-python
import cv2
import random
import numpy as np
from Pydensecrf import *
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian
def show_img(img):
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def test_img_fw():
img = cv2.imread('examples/im1.ppm')
anno_rgb = cv2.imread('examples/anno1.ppm').astype(np.uint32)
anno_lbl = anno_rgb[:, :, 0] + \
(anno_rgb[:, :, 1] << 8) + (anno_rgb[:, :, 2] << 16)
# Convert the 32bit integer color to 1, 2, ... labeling.
# Note that all-black, i.e. the value 0 for background will stay 0.
colors, labels = np.unique(anno_lbl, return_inverse=True)
# And create a mapping back from the labels to 32bit integer colors.
# But remove the all-0 black, that won't exist in the MAP!
colors = colors[1:]
colorize = np.empty((len(colors), 3), np.uint8)
colorize[:, 0] = (colors & 0x0000FF)
colorize[:, 1] = (colors & 0x00FF00) >> 8
colorize[:, 2] = (colors & 0xFF0000) >> 16
# Compute the number of classes in the label image.
# We subtract one because the number shouldn't include the value 0 which stands
# for "unknown" or "unsure".
n_labels = len(set(labels.flat)) - 1
print(n_labels, " labels and \"unknown\" 0: ", set(labels.flat))
# Example using the DenseCRF class and the util functions
d = DenseCRF(img.shape[1] * img.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_labels(labels, n_labels, gt_prob=0.7, zero_unsure=True)
d.setUnaryEnergy(U)
# This creates the color-independent features and then add them to the CRF
feats = create_pairwise_gaussian(
sdims=(3, 3), shape=img.shape[:2]).astype(np.float32)
d.addPairwiseEnergy(feats, PottsCompatibility(3),
KernelType.DIAG_KERNEL,
NormalizationType.NORMALIZE_SYMMETRIC)
# This creates the color-dependent features and then add them to the CRF
feats = create_pairwise_bilateral(sdims=(80, 80), schan=(13, 13, 13),
img=img, chdim=2).astype(np.float32)
d.addPairwiseEnergy(feats, PottsCompatibility(10),
KernelType.DIAG_KERNEL,
NormalizationType.NORMALIZE_SYMMETRIC)
####################################
### Do inference and compute MAP ###
####################################
# Run five inference steps.
Q = d.inference(5)
# Find out the most probable class for each pixel.
MAP = np.argmax(Q, axis=0)
# Convert the MAP (labels) back to the corresponding colors and save the
# image.
MAP = colorize[MAP, :]
print MAP.shape
show_img(MAP.reshape(img.shape))
def test_img_bw():
NIT_ = 5
img = cv2.imread('examples/im1.ppm')
anno_rgb = cv2.imread('examples/anno1.ppm').astype(np.uint32)
anno_lbl = anno_rgb[:, :, 0] + \
(anno_rgb[:, :, 1] << 8) + (anno_rgb[:, :, 2] << 16)
# Convert the 32bit integer color to 1, 2, ... labels.
# Note that all-black, i.e. the value 0 for background will stay 0.
colors, labeling = np.unique(anno_lbl, return_inverse=True)
# And create a mapping back from the labels to 32bit integer colors.
# But remove the all-0 black, that won't exist in the MAP!
colorize = np.empty((len(colors), 3), np.uint8)
colorize[:, 0] = (colors & 0x0000FF)
colorize[:, 1] = (colors & 0x00FF00) >> 8
colorize[:, 2] = (colors & 0xFF0000) >> 16
# Compute the number of classes in the label image.
# We subtract one because the number shouldn't include the value 0 which stands
# for "unknown" or "unsure".
# n_labels = len(set(labeling.flat)) - 1
# print(n_labels, " labels and \"unknown\" 0: ", set(labeling.flat))
# Example using the DenseCRF class and the util functions
N = img.shape[0] * img.shape[1]
M = 4
d = DenseCRF(img.shape[1] * img.shape[0], M)
logistic_feature = np.ones((4, N), dtype=np.float32)
logistic_transform = np.zeros((M, 4), dtype=np.float32)
img_reshape = img.reshape(-1, 3)
logistic_feature[:3, :] = img_reshape.T / 255.
for j in xrange(logistic_transform.shape[1]):
for i in xrange(logistic_transform.shape[0]):
logistic_transform[i, j] = 0.01 * (1 - 2 * random.random())
# U = unary_from_labels(labeling, n_labels, gt_prob=0.7, zero_unsure=True)
d.setUnaryEnergy(logistic_transform, logistic_feature)
# This creates the color-independent features and then add them to the CRF
feats = create_pairwise_gaussian(
sdims=(3, 3), shape=img.shape[:2]).astype(np.float32)
d.addPairwiseEnergy(feats, PottsCompatibility(1),
KernelType.DIAG_KERNEL,
NormalizationType.NORMALIZE_SYMMETRIC)
# This creates the color-dependent features and then add them to the CRF
feats = create_pairwise_bilateral(sdims=(80, 80), schan=(13, 13, 13),
img=img, chdim=2).astype(np.float32)
d.addPairwiseEnergy(feats, PottsCompatibility(1),
KernelType.DIAG_KERNEL,
NormalizationType.NORMALIZE_SYMMETRIC)
objective = IntersectionOverUnion(labeling.astype(np.int16))
#objective = Hamming(labeling.astype(np.int32),np.ones(n_label).astype(np.float32))
# Optimize the CRF in 3 phases:
# * First unary only
# * Unary and pairwise
# * Full CRF
learning_params = np.array([[True, False, False],
[True, True, False],
[True, True, True]])
for i in range(learning_params.shape[0]):
# Setup the energy
energy = CRFEnergy(d, objective, NIT_, learning_params[
i, 0], learning_params[i, 1], learning_params[i, 2])
energy.setL2Norm(1e-3)
# Minimize the energy
p = minimizeLBFGS(energy, 2, True)
# Save the values
idx = 0
if learning_params[i, 0]:
print "HERE"
print d.unaryParameters().shape
d.setUnaryParameters(p[idx:idx + d.unaryParameters().shape[0]])
idx += d.unaryParameters().shape[0]
if learning_params[i, 1]:
d.setLabelCompatibilityParameters(
p[idx:idx + d.labelCompatibilityParameters().shape[0]])
idx += d.labelCompatibilityParameters().shape[0]
if learning_params[i, 2]:
d.setKernelParameters(p[idx:idx + d.kernelParameters().shape[0]])
idx += d.kernelParameters().shape[0]
print "Pairwise Parameters: ", d.labelCompatibilityParameters()
print "Kernel Parameters: ", d.kernelParameters()
# Run five inference steps.
Q = d.inference(NIT_)
# Find out the most probable class for each pixel.
MAP = np.argmax(Q, axis=0)
# Convert the MAP (labeling) back to the corresponding colors and save the
# image.
MAP = colorize[MAP, :]
from ipdb import set_trace; set_trace()
show_img(MAP.reshape(img.shape))
def test_img_bw_2D():
NIT_ = 5
img = cv2.imread('examples/im1.ppm')
anno_rgb = cv2.imread('examples/anno1.ppm').astype(np.uint32)
anno_lbl = anno_rgb[:, :, 0] + \
(anno_rgb[:, :, 1] << 8) + (anno_rgb[:, :, 2] << 16)
# Convert the 32bit integer color to 1, 2, ... labels.
# Note that all-black, i.e. the value 0 for background will stay 0.
colors, labels = np.unique(anno_lbl, return_inverse=True)
# And create a mapping back from the labels to 32bit integer colors.
# But remove the all-0 black, that won't exist in the MAP!
colors = colors[0:]
colorize = np.empty((len(colors), 3), np.uint8)
colorize[:, 0] = (colors & 0x0000FF)
colorize[:, 1] = (colors & 0x00FF00) >> 8
colorize[:, 2] = (colors & 0xFF0000) >> 16
# Compute the number of classes in the label image.
# We subtract one because the number shouldn't include the value 0 which stands
# for "unknown" or "unsure".
# Example using the DenseCRF class and the util functions
d = DenseCRF2D(img.shape[1], img.shape[0], 4) # n_labels)
N = img.shape[0] * img.shape[1]
M = 4 # n_labels
logistic_feature = np.ones((4, N), dtype=np.float32)
logistic_transform = np.zeros((M, 4), dtype=np.float32)
img_reshape = img.reshape(-1, 3)
logistic_feature[:3, :] = img_reshape.T / 255.
for j in xrange(logistic_transform.shape[1]):
for i in xrange(logistic_transform.shape[0]):
logistic_transform[i, j] = 0.01 * (1 - 2 * random.random())
# U = unary_from_labels(labels, n_labels, gt_prob=0.7, zero_unsure=True)
d.setUnaryEnergy(logistic_transform, logistic_feature)
# This creates the color-independent features and then add them to the CRF
d.addPairwiseGaussian(3, 3, PottsCompatibility(1), KernelType.DIAG_KERNEL,
NormalizationType.NORMALIZE_SYMMETRIC)
# This creates the color-dependent features and then add them to the CRF
matrix = np.ones((M,M)).astype(np.float32)
d.addPairwiseBilateral(80., 80., 13., 13., 13., img,
MatrixCompatibility(matrix),
KernelType.DIAG_KERNEL,
NormalizationType.NORMALIZE_SYMMETRIC)
objective = IntersectionOverUnion(labeling.astype(np.int16))
# Optimize the CRF in 3 phases:
# * First unary only
# * Unary and pairwise
# * Full CRF
learning_params = np.array([[True, False, False],
[True, True, False],
[True, True, True]])
for i in range(learning_params.shape[0]):
# Setup the energy
energy = CRFEnergy(d, objective, NIT_, learning_params[i, 0],
learning_params[i, 1], learning_params[i, 2])
energy.setL2Norm(1e-3)
# Minimize the energy
p = minimizeLBFGS(energy, 2, True)
# Save the values
idx = 0
if learning_params[i, 0]:
d.setUnaryParameters(p[idx:idx + d.unaryParameters().shape[0]])
idx += d.unaryParameters().shape[0]
if learning_params[i, 1]:
d.setLabelCompatibilityParameters(
p[idx:idx + d.labelCompatibilityParameters().shape[0]])
idx += d.labelCompatibilityParameters().shape[0]
if learning_params[i, 2]:
d.setKernelParameters(p[idx:idx + d.kernelParameters().shape[0]])
idx += d.kernelParameters().shape[0]
# Return parameters
print "Unary Parameters: ", d.unaryParameters()
print "Pairwise Parameters: ", d.labelCompatibilityParameters()
print "Kernel Parameters: ", d.kernelParameters()
# Run five inference steps.
Q = d.inference(NIT_)
# Find out the most probable class for each pixel.
MAP = np.argmax(Q, axis=0)
# Convert the MAP (labeling) back to the corresponding colors and save the
# image.
MAP = colorize[MAP, :]
show_img(MAP.reshape(img.shape))
if __name__ == '__main__':
test_img_fw()
test_img_bw()
# test_img_bw_2D()
| 3 | 3 |
python/atmosEdit.py | vasaantk/bin | 0 | 12766643 | <reponame>vasaantk/bin<filename>python/atmosEdit.py
#! /usr/bin/env python
# <NAME>
import re
import sys
import random
userFiles = sys.argv[1:]
inpCount = len(userFiles)
userInp = 0
SPACES = " "
GAUSS_SIGMA = 5
printOrig = False
setUserGauss = False
setRandDZen = False
enterLoop = True
if userFiles == []:
enterLoop = False
print("")
print("# atmosEdit.py reads in ATMOS.FITS from either fits_geoblocks.f or")
print("# from AIPS task and produces another ATMOS.FITS format output. This")
print("# output consists of random values for: zenith, clock, dzenith and")
print("# dclock delays. The purpose of this was to test the effect of random")
print("# zenith delays on QSO position determination in the v255 experiments.")
print("#")
print("# Command-line options are")
print("# --> orig Prints out the original ATMOS.FITS values")
print("# --> gauss= Sets GAUSS_SIGMA to user-defined integer value (default 5)")
print("# --> dzen Includes random dZenith contribution. Otherwise, dzenith")
print("# is assumed to be 0.0")
print("")
for i in userFiles:
if re.match('orig',i):
printOrig = True
if re.match('gauss',i):
setUserGauss = True
gaussPos = userInp
if re.match('dzen',i):
setRandDZen = True
userInp += 1
# This function allows the script to compute how many spaces need to be alloted to ensure equally spaced columns of: zenith, clock, dzenith and dclock delays
def space_compare(charOfInterest):
charDiff = len(str("%.3f"%charOfInterest)) - len(SPACES)
if charDiff < 0:
spaceDiff = len(SPACES) + abs(charDiff)
elif charDiff > 0:
spaceDiff = len(SPACES) - charDiff
else:
spaceDiff = len(SPACES)
return str(spaceDiff*" ")
# Grab the user-defined Gaussian sigma level as defined by the user
if setUserGauss:
reqGaussInfo = re.search('gauss=(\d+)',userFiles[gaussPos])
if reqGaussInfo:
GAUSS_SIGMA = int(reqGaussInfo.group(1))
# Values from original ATMOS.FITS file harvested here
if enterLoop:
# This is just a no. at the very first line of ATMOS.FITS output from fit_geoblocks.f which tells AIPS how many lines in the ATMOS.FITS file.
print(" 15")
for line in open(userFiles[0],'r'):
requiredInformation = re.search("(\s+\d\s+\d\s+\d+\s+\d+\s+\d+.\d+)\s+(-?\d+.\d+)\s+(-?\d+.\d+?)\s+(-?\d+.\d+)\s+(-?\d+.\d+)",line)
if requiredInformation:
universalTime = requiredInformation.group(1)
zenithDelay = requiredInformation.group(2)
clockDelay = requiredInformation.group(3)
dZenithDelay = requiredInformation.group(4)
dClockDelay = requiredInformation.group(5)
# A random delay is determined for the respective parameter based on a Gaussian and sigma
randomZenith = random.gauss(float(zenithDelay), GAUSS_SIGMA)
randomClock = random.gauss(float(clockDelay), GAUSS_SIGMA)
randomDZenith = random.gauss(float(dZenithDelay), GAUSS_SIGMA)
randomDClock = random.gauss(float(dClockDelay), GAUSS_SIGMA)
# Computation of number of spacings required for neatly aligned columns
zenithSpaceDiff = space_compare(randomZenith)
clockSpaceDiff = space_compare(randomClock)
dZenithSpaceDiff = space_compare(randomDZenith)
if printOrig:
print(str(universalTime) + SPACES + str(zenithDelay) + space_compare(float(zenithDelay)) + str(clockDelay) + space_compare(float(clockDelay)) + str(dZenithDelay) + space_compare(float(dZenithDelay)) + str(dClockDelay))
elif setRandDZen:
print(str(universalTime) + SPACES + str("%.3f"%randomZenith) + zenithSpaceDiff + str("%.3f"%randomClock) + clockSpaceDiff + str("%.3f"%randomDZenith) + dZenithSpaceDiff + str("%.3f"%randomDClock))
else:
print(str(universalTime) + SPACES + str("%.3f"%randomZenith) + zenithSpaceDiff + str("%.3f"%randomClock) + clockSpaceDiff + str(dZenithDelay) + SPACES + str("%.3f"%randomDClock))
| 2.84375 | 3 |
masci_tools/tools/greensfunction.py | soumyajyotih/masci-tools | 0 | 12766644 | # -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the Masci-tools package. #
# (Material science tools) #
# #
# The code is hosted on GitHub at https://github.com/judftteam/masci-tools. #
# For further information on the license, see the LICENSE.txt file. #
# For further information please visit http://judft.de/. #
# #
###############################################################################
"""
This module contains utility and functions to work with Green's functions calculated
and written to ``greensf.hdf`` files by fleur
"""
from collections import namedtuple
from itertools import groupby
import numpy as np
import h5py
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation
from masci_tools.util.constants import HTR_TO_EV
GreensfElement = namedtuple('GreensfElement',
['l', 'lp', 'atomType', 'atomTypep', 'sphavg', 'onsite', 'contour', 'nLO', 'atomDiff'])
def _get_sphavg_recipe(group_name, index, contour):
"""
Get the HDF5Reader recipe for reading in a spherically averaged Green's function element
:param group_name: str of the group containing the Green's function elements
:param index: integer index of the element to read in (indexing starts at 1)
:param contour: integer index of the energy contour to read in (indexing starts at 1)
:returns: dict with the recipe reading all the necessary information from the ``greensf.hdf`` file
"""
return {
'datasets': {
'sphavg': {
'h5path':
f'/{group_name}/element-{index}/sphavg',
'transforms': [
Transformation(name='convert_to_complex_array', args=(), kwargs={}),
Transformation(name='multiply_scalar', args=(1.0 / HTR_TO_EV,), kwargs={})
]
},
'energy_points': {
'h5path':
f'/EnergyContours/contour-{contour}/ContourPoints',
'transforms': [
Transformation(name='convert_to_complex_array', args=(), kwargs={}),
AttribTransformation(name='shift_by_attribute',
attrib_name='fermi_energy',
args=(),
kwargs={
'negative': True,
}),
Transformation(name='multiply_scalar', args=(HTR_TO_EV,), kwargs={})
]
},
'energy_weights': {
'h5path':
f'/EnergyContours/contour-{contour}/IntegrationWeights',
'transforms': [
Transformation(name='convert_to_complex_array', args=(), kwargs={}),
Transformation(name='multiply_scalar', args=(HTR_TO_EV,), kwargs={})
]
}
},
'attributes': {
'fermi_energy': {
'h5path':
'/general',
'description':
'fermi_energy of the system',
'transforms': [
Transformation(name='get_attribute', args=('FermiEnergy',), kwargs={}),
Transformation(name='get_first_element', args=(), kwargs={})
]
},
'spins': {
'h5path':
'/general',
'description':
'number of spins',
'transforms': [
Transformation(name='get_attribute', args=('spins',), kwargs={}),
Transformation(name='get_first_element', args=(), kwargs={})
]
},
'mperp': {
'h5path':
'/general',
'description':
'Switch whether spin offdiagonal elements are included',
'transforms': [
Transformation(name='get_attribute', args=('mperp',), kwargs={}),
Transformation(name='get_first_element', args=(), kwargs={}),
Transformation(name='apply_lambda', args=(lambda x: x == 1,), kwargs={})
]
},
'lmax': {
'h5path':
f'/{group_name}',
'description':
'Maximum l considered (Determines size of the matrix)',
'transforms': [
Transformation(name='get_attribute', args=('maxl',), kwargs={}),
Transformation(name='get_first_element', args=(), kwargs={})
]
},
}
}
def _get_radial_recipe(group_name, index, contour):
"""
Get the HDF5Reader recipe for reading in a radial Green's function element
:param group_name: str of the group containing the Green's function elements
:param index: integer index of the element to read in (indexing starts at 1)
:param contour: integer index of the energy contour to read in (indexing starts at 1)
:returns: dict with the recipe reading all the necessary information from the ``greensf.hdf`` file
"""
recipe = _get_sphavg_recipe(group_name, index, contour)
recipe['datasets'].pop('sphavg')
recipe['datasets']['coefficients'] = {
'h5path':
f'/{group_name}/element-{index}',
'transforms': [
Transformation(name='get_all_child_datasets',
args=(),
kwargs={'ignore': ['scalarProducts', 'LOContribution']}),
Transformation(name='convert_to_complex_array', args=(), kwargs={}),
Transformation(name='multiply_scalar', args=(1.0 / HTR_TO_EV,), kwargs={})
],
'unpack_dict':
True
}
recipe['attributes']['scalarProducts'] = {
'h5path': f'/{group_name}/element-{index}/scalarProducts',
'transforms': [Transformation(name='get_all_child_datasets', args=(), kwargs={})]
}
recipe['attributes']['radialFunctions'] = {
'h5path': '/RadialFunctions',
'transforms': [Transformation(name='get_all_child_datasets', args=(), kwargs={})]
}
return recipe
def _get_greensf_group_name(hdffile):
"""
Return the name of the group containing the Green's function elements
:param hdffile: h5py.File of the greensf.hdf file
:returns: str of the group name containing the Green's Function elements
"""
if '/GreensFunctionElements' in hdffile:
return 'GreensFunctionElements'
elif '/Hubbard1Elements' in hdffile:
return 'Hubbard1Elements'
def _read_element_header(hdffile, index):
"""
Read the attributes of the given green's function elements
:param hdffile: h5py.File of the greensf.hdf file
:param index: integer index of the element to read in (indexing starts at 1)
:returns: :py:class:`GreensfElement` corresponding to the read in attributes
"""
group_name = _get_greensf_group_name(hdffile)
element = hdffile.get(f'/{group_name}/element-{index}')
l = element.attrs['l'][0]
lp = element.attrs['lp'][0]
atomType = element.attrs['atomType'][0]
atomTypep = element.attrs['atomTypep'][0]
sphavg = element.attrs['l_sphavg'][0] == 1
onsite = element.attrs['l_onsite'][0] == 1
contour = element.attrs['iContour'][0]
atomDiff = np.array(element.attrs['atomDiff'])
atomDiff[abs(atomDiff) < 1e-12] = 0.0
nLO = element.attrs['numLOs'][0]
return GreensfElement(l, lp, atomType, atomTypep, sphavg, onsite, contour, nLO, atomDiff)
def _read_gf_element(file, index):
"""
Read the information needed for a given Green's function element form a ``greensf.hdf``
file
:param file: filepath or handle to be read
:param index: integer index of the element to read in (indexing starts at 1)
:returns: tuple of the information containing the :py:class:`GreensfElement` for the element
and the datasets and attributes dict produced by the corresponding
:py:class:`~masci_tools.io.parsers.hdf5.HDF5Reader`
"""
with HDF5Reader(file) as h5reader:
gf_element = _read_element_header(h5reader._h5_file, index)
group_name = _get_greensf_group_name(h5reader._h5_file)
if gf_element.sphavg:
recipe = _get_sphavg_recipe(group_name, index, gf_element.contour)
else:
recipe = _get_radial_recipe(group_name, index, gf_element.contour, nlo=gf_element.nLO)
data, attributes = h5reader.read(recipe=recipe)
return gf_element, data, attributes
class GreensFunction:
"""
Class for working with Green's functions calculated by the fleur code
:param element: :py:class:`GreensfElement` namedtuple containing the information about the element
:param data: datasets dict produced by one of the hdf recipes for reading Green's functions
:param attributes: attributes dict produced by one of the hdf recipes for reading Green's functions
"""
def __init__(self, element, data, attributes):
self.element = element
self.points = data.pop('energy_points')
self.weights = data.pop('energy_weights')
self.data = data
if not self.sphavg:
self.scalar_products = attributes['scalarProducts']
self.radial_functions = attributes['radialFunctions']
raise NotImplementedError("Radial Green's functions not yet implemented")
self.spins = attributes['spins']
self.mperp = attributes['mperp']
self.lmax = attributes['lmax']
@classmethod
def fromFile(cls, file, index):
"""
Classmethod for creating a :py:class:`GreensFunction` instance directly from a hdf file
:param file: path or opened file handle to a greensf.hdf file
:param index: int index of the element to read in
"""
element, data, attributes = _read_gf_element(file, index)
return cls(element, data, attributes)
def __getattr__(self, attr):
"""
This __getattr__ method redirects lookups of field names of the stored :py:class:`GreensfElement`
to return the value from the namedtuple
:param attr: attribute to look up
:returns: value of the attribute if it is a field name of :py:class:`GreensfElement`
"""
if attr in GreensfElement._fields:
return self.element._asdict()[attr]
raise AttributeError(f'{self.__class__.__name__!r} object has no attribute {attr!r}')
@staticmethod
def to_m_index(m):
"""
Convert between magnetic quantum numbers between -l and l
to 0 and 2l+1 for easier indexing
:param m: int magnetic quantum number to convert
:returns: converted magnetic quantum number
"""
if abs(m) > 3:
raise ValueError('Invalid magnetic quantum number (>3)')
return m + 3
@staticmethod
def to_spin_indices(spin):
"""
Convert between spin index (0 to 3) to the corresponding
two spin indices (0 or 1)
:param spin: int spin index to convert
:returns: tuple of spin indices
"""
if spin < 0 or spin > 3:
raise ValueError('Invalid spin index')
if spin < 2:
spin1 = spin
spin2 = spin
elif spin == 2:
spin1 = 1
spin2 = 0
else:
spin1 = 0
spin2 = 1
return spin1, spin2
@property
def nspins(self):
"""
Return the number of spins of the current element.
If mperp is True for the element it is 4 otherwise it
is determined by the spins attribute
"""
if self.mperp:
return 4
else:
return self.spins
def get_scalar_product_by_key(self, key, spin):
spin1, spin2 = self.to_spin_indices(spin)
return self.scalar_products[f'{key}n'][spin1, spin2]
def __str__(self):
"""
String representation of the :py:class:`GreensFunction`. Chosen to be the
str representation of the stored :py:class:`GreensfElement` instance.
"""
return str(self.element)
def energy_dependence(self, *, m=None, mp=None, spin, imag=True, both_contours=False):
"""
Select data with energy dependence
:param m: optional integer magnetic quantum number between -l and l
:param mp: optional integer magnetic quantum number between -lp and lp
:param spin: optional integer spin between 1 and nspins
:param both_contours: bool id True the data is not added for both energy contours
:param imag: bool if True and both_contours is False the imaginary part 1/2i(G(z)-G(z^*)) is returned
otherwise the real part 1/2(G(z)+G(z^*))
:returns: numpy array with the selected data
"""
if spin is not None:
spin -= 1
spin_index = min(spin, 2 if self.mperp else self.nspins - 1)
else:
spin_index = slice(0, min(3, self.nspins))
if m is not None:
m_index = self.to_m_index(m)
else:
m_index = slice(self.lmax - self.l, self.lmax + self.l + 1, 1)
if mp is not None:
mp_index = self.to_m_index(mp)
else:
mp_index = slice(self.lmax - self.l, self.lmax + self.lp + 1, 1)
gf = self.data['sphavg'][:, spin_index, mp_index, m_index, :].T
if both_contours:
return gf
else:
if imag:
data = -1 / (2 * np.pi * 1j) * (gf[..., 0] - gf[..., 1])
else:
data = -1 / (2 * np.pi) * (gf[..., 0] + gf[..., 1])
return data.real
def trace_energy_dependence(self, spin, imag=True):
"""
Select trace of data with energy dependence
:param spin: integer spin between 1 and nspins
:param imag: bool if True the imaginary part 1/2i(G(z)-G(z^*)) is returned
otherwise the real part 1/2(G(z)+G(z^*))
:returns: numpy array with the selected and traced over data
"""
if self.l != self.lp:
raise ValueError('Trace only supported for l==lp')
data = np.zeros(self.points.shape)
for m in range(-self.l, self.l + 1):
data += self.energy_dependence(m=m, mp=m, spin=spin, imag=imag)
return data
class colors:
"""
Color strings for coloring terminal output
You may need to change color settings in iPython
"""
red = '\033[31m'
endc = '\033[m'
green = '\033[32m'
def printElements(elements, index=None, mark=None):
"""
Print the given list of :py:class:`GreensfElement` in a nice table
:param elements: list of :py:class:`GreensfElement` to be printed
:param index: optional list of indices to show instead of the default index in the list
:param mark: optional list of int with elements to emphasize with an arrow and color
"""
print('Index | l | lp | atom | atomp | sphavg | onsite | iContour | atomDiff |')
print('-----------------------------------------------------------------------------------------')
if index is None:
elem_iter = enumerate(elements)
else:
elem_iter = zip(index, elements)
for elem_index, element in elem_iter:
if mark is not None and elem_index + 1 in mark:
markStr = '<---'
color = colors.green
else:
markStr = ''
color = ''
atomdiff_str = np.array2string(element.atomDiff,
precision=2,
separator=',',
suppress_small=True,
sign=' ',
floatmode='fixed')
print(
color +
f'{elem_index+1:<7d}|{element.l:7d}|{element.lp:7d}|{element.atomType:7d}|{element.atomTypep:7d}|{str(element.sphavg):>8s}|{str(element.onsite):>8s}|{element.contour:10d}|{atomdiff_str}|{markStr}'
+ colors.endc)
def listElements(hdffile, show=False):
"""
Find the green's function elements contained in the given ``greens.hdf`` file
:param hdffile: filepath or file handle to a greensf.hdf file
:param show: bool if True the found elements are printed in a table
:returns: list of :py:class:`GreensfElement`
"""
with h5py.File(hdffile, 'r') as h5_file:
group_name = _get_greensf_group_name(h5_file)
num_elements = h5_file.get(group_name).attrs['NumElements'][0]
elements = []
for index in range(1, num_elements + 1):
elements.append(_read_element_header(h5_file, index))
if show:
print(f'These Elements are found in {hdffile}:')
printElements(elements)
return elements
def selectOnsite(hdffile, l, atomType, lp=None, show=True):
"""
Find the specified onsite element in the ``greensf.hdf`` file
:param hdffile: filepath or file handle to a greensf.hdf file
:param l: integer of the orbital quantum number
:param atomType: integer of the atom type
:param lp: optional integer of the second orbital quantum number (default equal to l)
:param show: bool if True the found elements are printed in a table and the selected ones are marked
:returns: list of indexes in the ``greensf.hdf`` file corresponding to the selected criteria
"""
if lp is None:
lp = l
elements = listElements(hdffile)
foundIndices = []
for index, elem in enumerate(elements):
if elem.l != l:
continue
if elem.lp != lp:
continue
if elem.atomType != atomType:
continue
if elem.atomTypep != atomType:
continue
if np.linalg.norm(elem.atomDiff) > 1e-12:
continue
foundIndices.append(index + 1)
if show:
printElements(elements, mark=foundIndices)
return foundIndices
def intersite_shells(hdffile, refAtom, return_greensf=True, show=False):
"""
Construct the green's function pairs to calculate the Jij exchange constants
for a given reference atom from a given ``greensf.hdf`` file
:param hdffile: filepath or file handle to a greensf.hdf file
:param refAtom: integer of the atom to calculate the Jij's for (correspinds to the i)
:param return_greensf: bool, if True instead of the indices aiterator yielding the
green's functions directly for calculations
:param show: if True the elements belonging to a shell are printed in a shell
:returns: either list of tuples with distance and all indices of pairs in the shell
or flat iterator with distance and the two corresponding :py:class:`GreensFunction`
instances
"""
elements = listElements(hdffile)
distances = [round(np.linalg.norm(elem.atomDiff), 12) for elem in elements]
#sort the elements according to shells
index_sorted = sorted(range(len(elements)), key=lambda k: distances[k])
elements_sorted = [elements[index] for index in index_sorted]
jijPairs = []
for dist, shell in groupby(zip(index_sorted, elements_sorted), key=lambda k: distances[k[0]]):
if dist > 1e-12:
if show:
print(f'\nFound shell at distance: {dist}')
print('The following elements are present:')
shell_list = list(shell)
jijPairsShell = []
#Try to find gij gji pairs for Jij calculations
for indexij, elemij in shell_list:
for indexji, elemji in shell_list:
if elemij.contour != elemji.contour:
continue
if elemij.atomType != refAtom:
continue
if elemij.atomType != elemji.atomTypep:
continue
if elemij.atomTypep != elemji.atomType:
continue
if elemij.l != elemji.l:
continue
if elemij.lp != elemji.lp:
continue
if np.linalg.norm(elemij.atomDiff + elemji.atomDiff) > 1e-12:
continue
#here we have found a pair
#Plus 1 because the indexing starts at 1 in the hdf file
if (indexji + 1, indexij + 1) not in jijPairsShell or \
elemij.atomType == elemij.atomTypep:
jijPairsShell.append((indexij + 1, indexji + 1))
if len(jijPairsShell) > 0:
jijPairs.append((dist, jijPairsShell))
if show:
#print the elements in the shell
elem = [x[1] for x in shell_list]
index = [x[0] for x in shell_list]
printElements(elem, index=index)
def shell_iterator(shells):
for distance, pairs in shells:
for g1, g2 in pairs:
yield (distance,
GreensFunction.fromFile(hdffile, g1),\
GreensFunction.fromFile(hdffile, g2))
if return_greensf:
return shell_iterator(jijPairs)
else:
return jijPairs
| 2.109375 | 2 |
Logger.py | blake-edwards/Facial-Recognition | 0 | 12766645 | # logging times and names to a file for event parsing
import os
import time
from pathlib import Path
import csv
logPath = os.path.join(os.getcwd(), "face-log.txt")
FIELD_AMT = 2
class Logger(object):
# initialization function that creates the log file
def __init__(self):
path = Path(logPath)
if path.is_file():
print("Found previous log file")
file = open(logPath, "r")
first_line = file.readline()
if first_line is None or first_line == "":
print("[1] File is empty")
logFile = open(logPath, "w")
logFile.write("Event Time\tIdentity\n")
else:
# checking if the header is correct
logFile = open(logPath, "r")
reader = csv.reader(logFile, delimiter='\t')
row = next(reader)
if len(row) > 1:
if row[0] == "Event Time" and row[1] == "Identity":
print("File correctly formatted")
else:
print("[1] File has an incorrectly formatted first line")
print("Please fix the headers and run again")
else:
print("[2] File has an incorrectly formatted first line")
print("Please fix the headers and run again")
# TODO: check if the data within the file is formatted correctly w/ FIELD_AMT
# TODO: clean incorrectly formatted data
else:
print("No previous log file detected!")
print("Creating new log file")
# create new file w/ header
logFile = open(logPath, "w")
logFile.write("Event Time\tIdentity\n")
logFile.close()
def addLog(self, name, time):
if time is None or name is None:
print("[ERROR] no name or time to add to log file!")
else:
logFile = open(logPath, "a")
logFile.write(time + "\t" + name + "\n")
logFile.close()
print("Added log")
# parses through the log and returns time when person was last seen
def checkLastSeen(self, name):
logFile = open(logPath, "r")
reader = csv.reader(logFile, delimiter='\t')
lastSeen = ""
for rows in reader:
if rows[1] == name:
lastSeen = rows[0]
logFile.close()
return lastSeen
# testing our new logger
logger = Logger()
time = str(int(time.time()))
logger.addLog("blake_edwards", time)
print("blake_edwards last seen @: " + logger.checkLastSeen("blake_edwards"))
| 3.296875 | 3 |
te/TE_CLASS.py | priyadarshitathagat/te-ns | 43 | 12766646 | #**********************************************************************************************
# Traffic Emulator for Network Services
# Copyright 2020 VMware, Inc
# The BSD-2 license (the "License") set forth below applies to all parts of
# the Traffic Emulator for Network Services project. You may not use this file
# except in compliance with the License.
#
# BSD-2 License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE
#**********************************************************************************************
from collections import defaultdict
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class TE(object):
__metaclass__ = Singleton
def __init__(self, daemon_ip, flask_port, redis_port, nginx_port, \
postgres_port, zmq_port, grafana_port, loglevel):
self.__daemon_ip = daemon_ip
self.__flask_port = flask_port
self.__nginx_port = nginx_port
self.__redis_port = redis_port
self.__postgres_port = postgres_port
self.__zmq_port = zmq_port
self.__grafana_port = grafana_port
self.__loglevel = loglevel
self.__te_dp_dict = {}
self.__resource_config = None
self.__session_config = None
self.__instance_profile_config = None
self.__client_cert_bundle = None
#Stats Collection Purpose
self.ses_time_stamps = defaultdict(list)
def add_ses_time_stamp(self, ses_tag, timestamp):
self.ses_time_stamps[ses_tag].append(timestamp)
def clear_ses_time_stamp(self):
self.ses_time_stamps.clear()
def get_daemon_ip(self):
return self.__daemon_ip
def get_flask_port(self):
return self.__flask_port
def get_nginx_port(self):
return self.__nginx_port
def get_redis_port(self):
return self.__redis_port
def get_postgres_port(self):
return self.__postgres_port
def get_zmq_port(self):
return self.__zmq_port
def get_grafana_port(self):
return self.__grafana_port
def get_loglevel(self):
return self.__loglevel
def set_te_dp(self, te_dp_dict):
self.__te_dp_dict = te_dp_dict
def unset_te_dp(self):
self.__te_dp_dict = {}
def get_te_dp(self):
return self.__te_dp_dict
def set_resource_config(self, resource_config):
self.__resource_config = resource_config
def unset_resource_config(self):
self.__resource_config = None
def get_resource_config(self):
return self.__resource_config
def set_session_config(self, session_config):
self.__session_config = session_config
def unset_session_config(self):
self.__session_config = None
def get_session_config(self):
return self.__session_config
def set_instance_profile_config(self, instance_profile_config):
self.__instance_profile_config = instance_profile_config
def unset_instance_profile_config(self):
self.__instance_profile_config = None
def get_instance_profile_config(self):
return self.__instance_profile_config
def set_client_cert_bundle(self, client_cert_bundle):
self.__client_cert_bundle = client_cert_bundle
def unset_client_cert_bundle(self):
self.__client_cert_bundle = None
def get_client_cert_bundle(self):
return self.__client_cert_bundle
| 0.945313 | 1 |
corehq/apps/userreports/tests/test_data_source_validaton.py | kkrampa/commcare-hq | 1 | 12766647 | <gh_stars>1-10
from __future__ import absolute_import, unicode_literals
from django.test import SimpleTestCase
from corehq.apps.userreports.exceptions import ValidationError
from corehq.apps.userreports.models import Validation
from corehq.apps.userreports.tests.utils import get_sample_data_source, get_sample_doc_and_indicators
class DataSourceValidationTest(SimpleTestCase):
def setUp(self):
self.config = get_sample_data_source()
self.config.validations = [
Validation.wrap({
"name": "is_starred_valid",
"error_message": "is_starred has unexpected value",
"expression": {
"type": "boolean_expression",
"expression": {
"type": "property_name",
"property_name": "is_starred"
},
"operator": "in",
"property_value": ["yes", "no"]
}
})
]
def test_is_starred_validation(self):
sample_doc, expected_indicators = get_sample_doc_and_indicators()
self.assertIsNone(self.config.validate_document(sample_doc))
sample_doc['is_starred'] = 'what is a star?'
with self.assertRaisesRegexp(ValidationError, "is_starred has unexpected value"):
self.config.validate_document(sample_doc)
def test_multiple_validations(self):
self.config.validations = self.config.validations + [
Validation.wrap({
"name": "a_second_validation",
"error_message": "another starred validation",
"expression": {
"type": "boolean_expression",
"expression": {
"type": "property_name",
"property_name": "is_starred"
},
"operator": "in",
"property_value": ["yes", "no"]
}
})
]
sample_doc, expected_indicators = get_sample_doc_and_indicators()
self.assertIsNone(self.config.validate_document(sample_doc))
sample_doc['is_starred'] = 'what is a star?'
try:
self.config.validate_document(sample_doc)
except ValidationError as e:
self.assertEquals(len(e.errors), 2)
else:
self.fail("There were no validation errors returned")
| 2.25 | 2 |
examples/tensorboard/projector_demo.py | dwolfschlaeger/guildai | 694 | 12766648 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf # Requires Tensorflow >=2.1
from tensorboard.plugins import projector
import tensorflow_datasets as tfds
# This demo expands upon the word embeddings tutorial found
# here: https://www.tensorflow.org/tutorials/text/word_embeddings)
# and is intended to demonstrate the use of the embedding projector.
LOG_DIR = os.getenv("LOGDIR") or "/tmp/projector_demo" # Tensorboard log dir
METADATA_FNAME = "meta.tsv" # Labels will be stored here
STEP = 0
# Load imdb reviews dataset
(train_data, test_data), info = tfds.load(
"imdb_reviews/subwords8k",
split=(tfds.Split.TRAIN, tfds.Split.TEST),
with_info=True,
as_supervised=True,
)
encoder = info.features["text"].encoder
# shuffle, pad, and train the data.
train_batches = train_data.shuffle(1000).padded_batch(10, padded_shapes=((None,), ()))
test_batches = test_data.shuffle(1000).padded_batch(10, padded_shapes=((None,), ()))
train_batch, train_labels = next(iter(train_batches))
embedding_dim = 16
# Create a basic embedding layer
embedding = tf.keras.layers.Embedding(encoder.vocab_size, embedding_dim)
model = tf.keras.Sequential(
[
embedding,
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(16, activation="relu"),
tf.keras.layers.Dense(1),
]
)
# Compile model
model.compile(
optimizer="adam",
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=["accuracy"],
)
# Train model
history = model.fit(
train_batches, epochs=1, validation_data=test_batches, validation_steps=20
)
# Fetch the embedding layer and get the weights.
# Make sure to remove the first element, as it is padding.
weights = tf.Variable(model.layers[0].get_weights()[0][1:])
def register_embedding(weights, labels, log_dir) -> None:
"""Saves a metadata file (labels) and a checkpoint (derived from weights)
and configures the Embedding Projector to read from the appropriate locations.
Args:
weights: tf.Variable with the weights of the embedding layer to be displayed.
labels: list of labels corresponding to the weights.
logdir: Directory into which to store the config file, as a `str`.
"""
# Create a checkpoint from embedding, the filename and key are
# name of the tensor.
checkpoint = tf.train.Checkpoint(embedding=weights)
checkpoint.save(os.path.join(LOG_DIR, "embedding.ckpt"))
# Save Labels separately on a line-by-line manner.
with open(os.path.join(log_dir, METADATA_FNAME), "w") as f:
for label in labels:
f.write("{}\n".format(label))
# Set up config
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
# The name of the tensor will be suffixed by `/.ATTRIBUTES/VARIABLE_VALUE`
embedding.tensor_name = "embedding/.ATTRIBUTES/VARIABLE_VALUE"
embedding.metadata_path = METADATA_FNAME
projector.visualize_embeddings(log_dir, config)
# Save Files
register_embedding(weights, encoder.subwords, LOG_DIR)
| 2.1875 | 2 |
bugbuzz_service/db/tables/files.py | fangpenlin/bugbuzz-api | 1 | 12766649 | <filename>bugbuzz_service/db/tables/files.py<gh_stars>1-10
from __future__ import unicode_literals
from sqlalchemy import Column
from sqlalchemy import Table
from sqlalchemy import Unicode
from sqlalchemy.dialects.postgresql import BYTEA
from sqlalchemy.schema import ForeignKey
from . import metadata
from . import now_func
from ...utils import GUIDFactory
from .utc_dt import UTCDateTime
files = Table(
'files',
metadata,
Column('guid', Unicode(64), primary_key=True, default=GUIDFactory('FL')),
Column('session_guid', Unicode(64), ForeignKey(
'sessions.guid',
ondelete='CASCADE',
onupdate='CASCADE',
), nullable=False, index=True),
# name of file
Column('filename', Unicode, nullable=False),
# mime type of file
Column('mime_type', Unicode, nullable=False),
# TODO: save in amazon S3 instead?
# file content
Column('content', BYTEA, nullable=False),
# AES 256 encryption IV
Column('aes_iv', BYTEA),
# TODO: add a hash column for querying files?
Column('created_at', UTCDateTime, default=now_func),
Column(
'updated_at',
UTCDateTime,
default=now_func,
onupdate=now_func,
),
)
| 2.046875 | 2 |
tests/test_gender.py | madduck/pytcnz | 0 | 12766650 | # -*- coding: utf-8 -*-
#
# Copyright © 2021–2022 <NAME> <<EMAIL>>
# Released under the MIT Licence
#
import pytest
from pytcnz.gender import Gender, InvalidGenderError
@pytest.fixture(
params=[
"M",
"W",
"Man",
"Men",
"Women",
"Woman",
"male",
"female",
None,
"NONE",
"N",
"w",
"m",
"f",
]
)
def valid_gender(request):
return request.param
def test_valid_genders(valid_gender):
Gender.from_string(valid_gender)
@pytest.fixture(params=[1, "X", True, "bar"])
def invalid_gender(request):
return request.param
def test_invalid_genders(invalid_gender):
with pytest.raises(InvalidGenderError):
Gender.from_string(invalid_gender)
@pytest.fixture(
params=[(Gender.M, "Male"), (Gender.W, "Female"), (Gender.N, "None")]
)
def gender_sex_pair(request):
return request.param
def test_gender_to_sex_conversion(gender_sex_pair):
assert Gender.to_sex(gender_sex_pair[0]) == gender_sex_pair[1]
| 2.703125 | 3 |