max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
cfgov/sheerlike/apps.py | Mario-Kart-Felix/cfgov-refresh | 1 | 12757851 | <gh_stars>1-10
from django.apps import AppConfig
from django.conf import settings
class SheerlikeConfig(AppConfig):
name = 'sheerlike'
verbose_name = 'Sheerlike'
def ready(self):
for app, directory in settings.SHEER_SITES.items():
if directory.exists():
engine_config = {
'NAME': app, 'BACKEND': 'django.template.backends.jinja2.Jinja2', 'DIRS': [
str(directory), str(
directory.child('_includes')), str(
directory.child('_layouts'))], 'OPTIONS': {
'environment': 'v1.environment', 'site_slug': app,
'extensions': [
'wagtail.wagtailcore.jinja2tags.core',
'wagtail.wagtailadmin.jinja2tags.userbar',
'wagtail.wagtailimages.jinja2tags.images',
], }}
settings.TEMPLATES.append(engine_config)
| 2.015625 | 2 |
bindings/python/examples/dbr_iterate.py | cmisale/data-broker | 19 | 12757852 | <filename>bindings/python/examples/dbr_iterate.py
#
# Copyright (C) 2018, 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from _dbr_interface import ffi
from dbr_module import dbr
dbr_name = "DBRtestname"
level = dbr.DBR_PERST_VOLATILE_SIMPLE
group_list = ffi.new('DBR_GroupList_t')
dbr_hdl = ffi.new('DBR_Handle_t*')
dbr_hdl = dbr.create(dbr_name, level, group_list)
group = dbr.DBR_GROUP_EMPTY
# query the DB to see if successful
dbr_state = ffi.new('DBR_State_t*')
res = dbr.query(dbr_hdl, dbr_state, dbr.DBR_STATE_MASK_ALL)
# put a set of keys
for i in range(10):
key = "simple_key_"+str(i)
print('Putting key: ' + key)
res = dbr.put(dbr_hdl, "test-value-"+str(i), key, dbr.DBR_GROUP_EMPTY)
iterator = dbr.DBR_ITERATOR_NEW
key, iterator = dbr.iterator(dbr_hdl, iterator, dbr.DBR_GROUP_EMPTY, "")
while iterator != dbr.DBR_ITERATOR_DONE:
value, res = dbr.get(dbr_hdl, key, "", dbr.DBR_GROUP_EMPTY, dbr.DBR_FLAGS_NONE)
print('On key ' + key + ' Get returned: ' + value)
key, iterator = dbr.iterator(dbr_hdl, iterator, dbr.DBR_GROUP_EMPTY, "")
print('Delete Data Broker')
res = dbr.delete(dbr_name)
print('Exit Status: ' + dbr.getErrorMessage(res))
| 2.703125 | 3 |
PythonCode/PW9.1.py | aredshaw/CodePractice | 1 | 12757853 | <reponame>aredshaw/CodePractice
class Car:
def __init__(self, pMake, pModel, pColor, pPrice):
self.make = pMake
self.model = pModel
self.color = pColor
self.price = pPrice
def __str__(self):
return 'Make = %s, Model = %s, Color = %s, Price = %s' %(self.make, self.model, self.color, self.price)
def selecColor(self):
self.color = input('What is the new color? ')
def calculateTax(self):
priceWithTax = 1.1*self.price
return priceWithTax
myFirstCar = Car(pMake = 'Honda', pModel = 'Civic', pColor = 'White', pPrice = '15000')
print(myFirstCar)
# changing the price from 15000 to 18000
myFirstCar.price = 18000
print(myFirstCar)
myFirstCar.color = 'Orange'
print(myFirstCar)
finalPrice = myFirstCar.calculateTax()
print('The final price is $%s' %(finalPrice)) | 3.734375 | 4 |
examples/test-all-low-mem/Prox.py | LuciMouse/EnviroPlus-FeatherWing | 10 | 12757854 | def Prox(tests, num_test_bytes, write_tests_to_nvm, reset):
try:
import board
from pimoroni_circuitpython_adapter import not_SMBus
from pimoroni_ltr559 import LTR559
i2c = board.I2C()
i2c_dev = not_SMBus(I2C=i2c)
ltr559 = LTR559(i2c_dev=i2c_dev)
if 0 <= ltr559.get_proximity() <= 2047:
tests["Prox"]["Passed"] = True
print("Passed with", ltr559.get_proximity())
else:
tests["Prox"]["Passed"] = False
print("Failed")
except Exception as e:
tests["Prox"]["Passed"] = False
print("Failed with ", e)
finally:
tests["Prox"]["Test Run"] = True
write_tests_to_nvm(tests, num_test_bytes)
reset() | 2.3125 | 2 |
hivehoney/hh.py | hive-scripts/hivehoney | 4 | 12757855 | <reponame>hive-scripts/hivehoney<gh_stars>1-10
from __future__ import with_statement
"""
set PROXY_HOST=your_bastion_host
set SERVICE_USER=your_func_user
set LINUX_USER=your_SOID
set LINUX_PWD=<PASSWORD>
"""
import click
import paramiko
import time, sys, os
from pprint import pprint
t_time1 = time.time()
e=sys.exit
import argparse
env=dict(os.environ)
assert 'SERVICE_USER' in env
SERVICE_USER=env['SERVICE_USER']
assert 'LINUX_USER' in env
LINUX_USER=env['LINUX_USER']
assert 'LINUX_PWD' in env
LINUX_PWD=env['LINUX_PWD']
assert 'PROXY_HOST' in env
PROXY_HOST=env['PROXY_HOST']
parser = argparse.ArgumentParser(description='Query file.')
parser.add_argument('--query_file', dest='query_file', type=str,
help='Query file')
parser.add_argument('--out_file', dest='out_file', type=str, default='data_dump.csv',
help='Output csv file')
args = parser.parse_args()
if 1:
assert os.path.isfile(args.query_file), 'Query file does not exists.'
lstat=os.stat(args.query_file)
assert lstat.st_size>0, "Query file is empty"
assert not os.path.isfile(args.out_file), 'Dump file already exists.'
nbytes = 1024*16
BASTION_HOST = PROXY_HOST #'bdgtproxyhad01h2d'
port = 22
#SERVICE_USER='gfocnnsg '
DATA_DUMP_FILE=args.out_file
DIR_REMOTE='/tmp'
QUERY_FILE = args.query_file
REMOTE_QUERY_FILE= DIR_REMOTE + '/' + QUERY_FILE
PB_FILE='pbrun.exp'
REMOTE_PB_FILE=DIR_REMOTE + '/' + PB_FILE
command = '%s %s %s %s && exit;' % (REMOTE_PB_FILE, SERVICE_USER, LINUX_PWD, REMOTE_QUERY_FILE)
print(command)
#command = 'history && exit;'
total_bytes=0
def put_file(sftp, local_file, remote_file, mode=None):
#local_file = os.path.join(DIR_LOCAL, fname)
#remote_file = DIR_REMOTE + '/' + fname
print(local_file, remote_file)
assert os.path.isfile(local_file), 'Dump file is missing.'
try:
sftp.unlink(remote_file)
print('Remote file deleted')
except FileNotFoundError as not_found:
print ('Ignoring remote FileNotFoundError.')
#e(0)
try:
print('start transport...')
sftp.put(local_file, remote_file)
except :
raise
rstat=sftp.stat(remote_file)
lstat=os.stat(local_file)
print(lstat.st_size, rstat.st_size)
assert lstat.st_size == rstat.st_size, "File size mismatch (%d<>%d)" % (lstat.st_size, rstat.st_size)
if mode:
sftp.chmod(remote_file, mode)
def get_data(extract_to_file):
global total_bytes
if 1:
client = paramiko.Transport((BASTION_HOST, port))
client.window_size = pow(2,35)
client.REKEY_BYTES = pow(2, 40)
client.REKEY_PACKETS = pow(2, 40)
else:
client = FastTransport((BASTION_HOST, port))
client.connect(username=LINUX_USER, password=<PASSWORD>)
sftp = paramiko.SFTPClient.from_transport(client)
print(REMOTE_QUERY_FILE)
put_file(sftp,QUERY_FILE, REMOTE_QUERY_FILE)
import stat
put_file(sftp,PB_FILE, REMOTE_PB_FILE, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP)
#e(0)
stdout_data = []
stderr_data = []
session = client.open_channel(kind='session')
session.settimeout(10)
session.exec_command(command)
while True:
if session.recv_ready():
data=session.recv(nbytes)
print(data.decode("ascii"))
if data.startswith(b'Java HotSpot(TM) 64-Bit Server VM warning:'):
#print (123)
break
if session.recv_stderr_ready():
stderr_data.append(session.recv_stderr(nbytes))
if session.exit_status_ready():
break
with open(extract_to_file, 'ab') as the_file:
while True:
if session.recv_ready():
data=session.recv(nbytes)
#print('>%s<' % data.decode("ascii"))
if not data.strip():
print ('Ignoring newline.')
else: #write first block
the_file.write(data)
#print('%d\tbytes written.' % len(data))
total_bytes +=len(data)
break
if session.recv_stderr_ready():
stderr_data.append(session.recv_stderr(nbytes))
if session.exit_status_ready():
break
time1 = time.time()
while True: #write the rest of the blocks
if session.recv_ready():
if 1:
#print(session.recv(nbytes).decode("ascii"))
data=session.recv(nbytes)
the_file.write(data)
#print('%d\tbytes written.' % len(data))
total_bytes +=len(data)
else:
stdout_data.append(session.recv(nbytes))
if session.recv_stderr_ready():
stderr_data.append(session.recv_stderr(nbytes))
if session.exit_status_ready():
break
time2 = time.time()
print('TOTAL BYTES:\t%d' % total_bytes)
print ('Elaplsed: %0.3f s' % ( (time2-time1)))
print ('exit status: ', session.recv_exit_status())
print(len(stdout_data))
print (stderr_data)
session.close()
client.close()
t_time2 = time.time()
print ('TOTAL Elaplsed: %0.3f s' % ( (t_time2-t_time1)))
def get_tail(dump_file, n):
with open(dump_file, 'rb') as f:
assert n >= 0
pos, lines = n+1, []
while len(lines) <= n:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = list(f)
pos *= 2
return lines[-n:]
def truncate_file(fh):
fh.seek(0, os.SEEK_END)
pos = fh.tell() - len(os.linesep.encode())
while pos > 0 and fh.read(1) != "\n":
pos -= 1
fh.seek(pos, os.SEEK_SET)
if pos > 0:
fh.seek(pos+1, os.SEEK_SET)
fh.truncate()
if __name__=='__main__':
if 1:
get_data(DATA_DUMP_FILE)
if 1:
tail=get_tail(DATA_DUMP_FILE, n=7 )
tail.reverse()
if 1:
with open(DATA_DUMP_FILE, "r+", encoding = "utf-8") as fh:
if 1:
assert tail[0].endswith(b'logout'+os.linesep.encode()), "Wrong tail format (logout)"
truncate_file(fh)
if 1:
assert tail[1].endswith(b'exit'+os.linesep.encode()), "Wrong tail format (exit)"
truncate_file(fh)
if 1:
assert tail[2]==os.linesep.encode(), "Wrong tail format (new line)"
truncate_file(fh)
if 1:
assert tail[3]==os.linesep.encode(), "Wrong tail format (new line)"
truncate_file(fh)
if 1:
assert tail[4]==os.linesep.encode(), "Wrong tail format (new line)"
truncate_file(fh)
| 2.171875 | 2 |
20200602_homework_week8/jarek/assignment1/sripts_and_files_for_homework/webscraper.py | DTRademaker/DL_tutorials_2020 | 0 | 12757856 | <reponame>DTRademaker/DL_tutorials_2020<filename>20200602_homework_week8/jarek/assignment1/sripts_and_files_for_homework/webscraper.py
import urllib.request
import re
import os
__author__ = '<NAME>', '<NAME>'
website = 'https://emojipedia.org/apple/'
img_locations = []
def getHTML(website): # Retrieve the source code from any website
req = urllib.request.Request(website)
req.add_header('User-Agent', '')
response = urllib.request.urlopen(req)
html = response.read() # Read the source
return html
html = str(getHTML(website)) # get the source code
print(html)
refs = re.findall('<a href="/(.*?)/">\n', html)# Retrieve all emoji classes
refs = refs[:92] # only interested in 'face' emojis
refs.remove('money-mouth-face') # Remove too strange faces
refs.remove('upside-down-face') # Remove too strange faces
refs.remove('rolling-on-the-floor-laughing')# Remove too strange faces
refs.remove('exploding-head')# Remove too strange faces
refs.remove('face-screaming-in-fear')# Remove too strange faces
base_ref = 'https://emojipedia.org/%s/' # The base site all faces have in common
for emoji in refs:
print(emoji)
html = getHTML(base_ref % emoji) # get html per emoji class
img_locations += re.findall('<img src="(.*?).png"', html) # extract all image locations
# Create a directory to save the images in
try: os.mkdir('imgs')
except:pass
# Donwload the actual png images
for i, n in enumerate(img_locations):
if 'docomo' not in n and 'au-kddi' not in n:
name = '%s_%i.png' % (n.split('/')[-1], i)
os.system('cd imgs; wget -q --output-document=%s %s.png' % (name, n))
| 3.15625 | 3 |
python/ray/rllib/examples/test_invese_model.py | hxkk/ray | 0 | 12757857 | from ray.rllib.agents.bco.inverse_dynamics_model import InverseDynamicsModel
from osim.env import ProstheticsEnv
env = ProstheticsEnv(visualize=True)
# env.change_model(model='3D', prosthetic=False)
print(env.action_space) # Returns `Box(19,)`
print(env.action_space.low) # Returns list of 19 zeroes
print(env.action_space.high) # Returns list of 19 ones
env_model = InverseDynamicsModel(env_creator, config, True)
env_mdoel_data = pickle.load(open("/data/nips/ckpt/checkpoint-1128.env_mdoel_data", "rb"))
env_model.set_weights(env_mdoel_data)
actions = env_model.test_model()
observation = env.reset()
for i in range(300):
# action = df.loc[i][1:].tolist()
action = actions[i]
observation, reward, done, info = env.step(action)
print(reward) | 2.390625 | 2 |
Helpers/nodes.py | bascoe10/data-structures-and-algorithms-python | 2 | 12757858 | class SimpleTreeNode:
def __init__(self, val, left=None, right=None) -> None:
self.val = val
self.left = left
self.right = right
class AdvanceTreeNode:
def __init__(self, val, parent=None, left=None, right=None) -> None:
self.val = val
self.parent: AdvanceTreeNode = parent
self.left: AdvanceTreeNode = left
self.right: AdvanceTreeNode = right
class SingleListNode:
def __init__(self, val, next=None) -> None:
self.val = val
self.next = next
class DoubleListNode:
def __init__(self, val, next=None, prev=None) -> None:
self.val = val
self.next = next
self.prev = prev
| 3.46875 | 3 |
chexpert_labeler/__init__.py | stmharry/interpretable-report-gen | 2 | 12757859 | <filename>chexpert_labeler/__init__.py
import functools
from pathlib import Path
from chexpert_labeler.loader import Loader
from chexpert_labeler.stages import Extractor, Classifier, Aggregator
from chexpert_labeler.constants import CATEGORIES
working_dir = Path(__file__).parent
Loader = functools.partial(
Loader,
extract_impression=False,
)
Extractor = functools.partial(
Extractor,
mention_phrases_dir=working_dir.joinpath('phrases', 'mention'),
unmention_phrases_dir=working_dir.joinpath('phrases', 'unmention'),
)
Classifier = functools.partial(
Classifier,
pre_negation_uncertainty_path=working_dir.joinpath('patterns', 'pre_negation_uncertainty.txt'),
negation_path=working_dir.joinpath('patterns', 'negation.txt'),
post_negation_uncertainty_path=working_dir.joinpath('patterns', 'post_negation_uncertainty.txt'),
)
Aggregator = functools.partial(
Aggregator,
CATEGORIES,
)
| 2.046875 | 2 |
support_contracts_plugin/__init__.py | Haeki/netbox_support_contracts_plugin | 7 | 12757860 | <gh_stars>1-10
from extras.plugins import PluginConfig
class SupportContractsConfig(PluginConfig):
"""
This class defines attributes for the NetBox Support Contracts plugin.
"""
# Plugin package name
name = 'support_contracts_plugin'
# Human-friendly name and description
verbose_name = 'Support Contracts'
description = 'Plugin that allows to create support contracts and add them to devices'
# Plugin version
version = '0.2'
# Plugin author
author = '<NAME>'
author_email = '<EMAIL>'
# Configuration parameters that MUST be defined by the user (if any)
required_settings = []
# Default configuration parameter values, if not set by the user
#default_settings = {
# 'loud': True
#}
# Base URL path. If not set, the plugin name will be used.
base_url = 'support-contracts'
# Caching config
caching_config = {}
config = SupportContractsConfig
| 2.453125 | 2 |
scripts/separate_gene_assignments.py | klaricch/Transposons2 | 1 | 12757861 | #!/usr/bin/env python
# this script classifies TE position as genic or intergenic
# it also outputs the sequeunce name if a TE was found in a gene and whether or not that TE was in the the "border" region of a gene(within 10bp from the end)
# or if it is in an "internal" region
# USE: separate_gene_assignments.py
import re
internal_TEs={}
full_ins_TEs={}
all_TEs={}
'''
chr X
start_TE X
end_TE x
start_gene/NA X
TE X
orient X
RS X
part X
gene_name/NA X
gene class protein_coding/pseduogene/NA X
'''
# put TEs overlapped with genes into dict
with open("all_window.txt", 'r') as IN:
for line in IN:
line=line.rstrip('\n')
items=re.split("[\t]", line)
TE=items[12]
#gene info:
gene_start=items[3]
gene_end=items[4]
gene_info=items[8]
match=re.search("sequence_name=([A-za-z\d\.]+);", gene_info)
gene_name=match.group(1)
match=re.search("biotype=([A-za-z]+);", gene_info)
gene_class=match.group(1)
#TE info:
match = re.search("([A-Z]+)_(\d+)_(\d+)_(([A-Za-z\d+_-]+))_((\w+-)?reference)_+([a-z]+)_([a-z]+)_(\d+)_([\d\.]+)_([\+-])_([A-Za-z]+)_(\w+)", TE) #([A-Za-z\d+_])_((\w+-)?reference)\w+_\d+_\d+
chromosome = match.group(1)
start = match.group(2)
start2 = match.group(3)
te = match.group(4)
RS = match.group(11)
orient = match.group(12)
method=match.group(13)
sample = match.group(14)
new_info="{chromosome}\t{start}\t{start2}\t{method}\t{gene_start}\t{gene_end}\t{te}\t{orient}\t{RS}\t{gene_name}\t{gene_class}".format(**locals())
internal_TEs[TE]=new_info
# put insertion TEs overlapped with full genes giff into dict
with open("insertions_full_window.txt", 'r') as IN:
for line in IN:
line=line.rstrip('\n')
items=re.split("[\t]", line)
TE=items[12]
#gene info:
gene_start=items[3]
gene_end=items[4]
gene_info=items[8]
match=re.search("sequence_name=([A-za-z\d\.]+);", gene_info)
gene_name=match.group(1)
match=re.search("biotype=([A-za-z]+);", gene_info)
gene_class=match.group(1)
#TE info:
match = re.search("([A-Z]+)_(\d+)_(\d+)_(([A-Za-z\d+_-]+))_((\w+-)?reference)_+([a-z]+)_([a-z]+)_(\d+)_([\d\.]+)_([\+-])_([A-Za-z]+)_(\w+)", TE) #([A-Za-z\d+_])_((\w+-)?reference)\w+_\d+_\d+
chromosome = match.group(1)
start = match.group(2)
start2 = match.group(3)
te = match.group(4)
RS = match.group(11)
orient = match.group(12)
method=match.group(13)
sample = match.group(14)
new_info="{chromosome}\t{start}\t{start2}\t{method}\t{gene_start}\t{gene_end}\t{te}\t{orient}\t{RS}\t{gene_name}\t{gene_class}".format(**locals())
full_ins_TEs[TE]=new_info
# put all TEs into dict
with open("all.bed", 'r') as IN:
for line in IN:
line=line.rstrip('\n')
items=re.split("[\t]", line)
TE=items[3]
#TE info:
match = re.search("([A-Z]+)_(\d+)_(\d+)_(([A-Za-z\d+_-]+))_((\w+-)?reference)_+([a-z]+)_([a-z]+)_(\d+)_([\d\.]+)_([\+-])_([A-Za-z]+)_(\w+)", TE) #([A-Za-z\d+_])_((\w+-)?reference)\w+_\d+_\d+
chromosome = match.group(1)
start = match.group(2)
start2 = match.group(3)
te = match.group(4)
RS = match.group(11)
orient = match.group(12)
method=match.group(13)
sample = match.group(14)
gene_start="NA"
gene_end="NA"
gene_name="NA"
gene_class="NA"
new_info="{chromosome}\t{start}\t{start2}\t{method}\t{gene_start}\t{gene_end}\t{TE}\t{orient}\t{RS}\t{gene_name}\t{gene_class}".format(**locals())
all_TEs[TE]=new_info
OUT=open("TE_gene_interrupt_output.txt", 'w')
for key, value in all_TEs.items():
if key in full_ins_TEs.keys() and key not in internal_TEs.keys():
part="border"
overall="Genic"
value=full_ins_TEs[key]
elif key in internal_TEs.keys():
part="internal"
overall="Genic"
value=internal_TEs[key]
else:
part="intergenic"
overall="Intergenic"
value="{value}\t{part}\t{overall}".format(**locals())
OUT.write(value)
OUT.write('\n')
OUT.close()
| 2.90625 | 3 |
src/hepdash/funcs/make_tree.py | ethansimpson285/HEPDash | 0 | 12757862 | <filename>src/hepdash/funcs/make_tree.py<gh_stars>0
'''
HEP-Dash
<NAME>
December 10th 2021
Notes:
- Require that the primary function for creating the web-app must be called main()
- How to re-factor this into something more user freindly?
- Just now, the only thing the user is required to do is edit the input dic - this could come from a config file
- Or it could come from a parser but probably complex
'''
# Imports
# Base imports
import sys
from dataclasses import dataclass
# Third-party imports
import streamlit as st
from streamlit import cli as stcli
# Package imports
from hepdash.apps.Tree_Apps import Preset, General
import sys
app_type = sys.argv[1]
config_file = sys.argv[2]
if app_type=="preset":
app_func = Preset
elif app_type=="general":
app_func = General
elif app_type=="specific":
pass
def main():
# Initialsie the streamlit web-app object
st.set_page_config(layout='wide')
st.title("HEP Dash")
# Import the data
App1 = app_func.make_from_config(config_file)
print("ROOT files loaded")
App1.add_object_pages()
print("Pages written")
App1.make_multipage()
print("Construction complete")
if __name__ == '__main__':
if st._is_running_with_streamlit:
# file_name = sys.argv[1]
# tree_name = sys.argv[2]
# branch_name = sys.argv[3]
main()
else:
sys.argv = ["streamlit", "run", sys.argv[0],sys.argv[1],sys.argv[2]]#,sys.argv[3]]
sys.exit(stcli.main())
| 2.328125 | 2 |
true_house/activities/views.py | gAmadorH/true-house-be | 0 | 12757863 | <reponame>gAmadorH/true-house-be
from datetime import datetime, timedelta
from rest_framework import status, mixins, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from activities.serializers import (
ActivitySerializer,
StatusActivitySerializer,
ScheduleActivitySerializer,
PropertySerializer,
SurveySerializer,
)
from activities.models import Activity, Property, Survey
class ActivityViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet,
):
serializer_class = ActivitySerializer
def get_queryset(self):
queryset = Activity.objects.all()
q_from = self.request.query_params.get("from", None)
q_to = self.request.query_params.get("to", None)
q_status = self.request.query_params.get("status", None)
if self.action == "list":
if (q_from and q_to) or q_status:
if q_from and q_to:
queryset.filter(schedule__range=(q_from, q_to))
if q_status:
queryset.filter(status=q_status)
else:
now = datetime.now()
high_l = now + timedelta(weeks=2)
low_l = now - timedelta(days=3)
queryset = queryset.filter(schedule__range=(low_l, high_l))
return queryset
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = ActivitySerializer(
queryset, context={"request": request}, many=True
)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=True, methods=["patch"])
def schedule(self, request, pk=None):
activity = self.get_object()
serializer = ScheduleActivitySerializer(activity, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=["patch"])
def status(self, request, pk=None):
activity = self.get_object()
serializer = StatusActivitySerializer(activity, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class PropertyViewSet(viewsets.ModelViewSet):
serializer_class = PropertySerializer
queryset = Property.objects.all()
class SurveyViewSet(viewsets.ModelViewSet):
serializer_class = SurveySerializer
queryset = Survey.objects.all()
| 2.0625 | 2 |
examples/createTable.py | parasailteam/coconet | 5 | 12757864 | import re
f = open("adam-results-128-gpus-all-algos", "r")
ourAdam = f.read()
f.close()
f = open("/philly/rr3/msrhyperprojvc2_scratch/saemal/abhinav/nccl-manual/samples/optim-bench-results-128GPUs", "r")
otherAdams = f.read()
f.close()
adamResults = {"FusedAdam":{}, "PyTorchAdam":{}, "OurAdam":{}} #dictionary of [FusedAdam, PyTorchAdam, OurAdam]x[Sizes]x[Times]
allSizes = []
for size, time in re.findall(r'\(null\) (\d+) ([\d\.]+)', ourAdam):
adamResults["OurAdam"][int(size)] = float(time)
allSizes += [int(size)]
for size, time in re.findall(r'fusedadam (\d+) \d+ ([\d\.]+)', otherAdams):
adamResults["FusedAdam"][int(size)] = float(time)
for size, time in re.findall(r'adam (\d+) \d+ ([\d\.]+)', otherAdams):
adamResults["PyTorchAdam"][int(size)] = float(time)
print ("{:<15} {:<15} {:<15} {:<15} {:<15} {:<15}".format("Size", "FusedAdam", "PyTorchAdam", "OurAdam", "Speedup Over FusedAdam", "Speedup Over PytorchAdam"))
for sz in allSizes:
print("{:<15} {:<15.2f} {:<15.2f} {:<15.2f} {:<20.2f} {:<20.2f}".format(sz, adamResults["FusedAdam"][sz], adamResults["PyTorchAdam"][sz], adamResults["OurAdam"][sz],
adamResults["FusedAdam"][sz]/adamResults["OurAdam"][sz], adamResults["PyTorchAdam"][sz]/adamResults["OurAdam"][sz]))
| 2.609375 | 3 |
imgproc/processing/chroma_key.py | Semior001/mdcd-travelhack | 0 | 12757865 | import cv2
import rest
import numpy as np
class ChromaKeyServiceImpl(rest.ChromaKeyingService):
def replace(self, src_image_str, bg_image_str) -> bytes:
bg = cv2.imdecode(np.frombuffer(bg_image_str, np.uint8), cv2.IMREAD_COLOR)
img = cv2.imdecode(np.frombuffer(src_image_str, np.uint8), cv2.IMREAD_COLOR)
RED, GREEN, BLUE = (2, 1, 0)
reds = img[:, :, RED]
greens = img[:, :, GREEN]
blues = img[:, :, BLUE]
# z = np.zeros(shape=img.shape, dtype=in
mask = (greens < 70) | (reds > greens) | (blues > greens)
mask = mask.astype("uint8") * 255
# print(mask)
mask_inv = cv2.bitwise_not(mask)
# cv2.imshow("Mask", mask)
# cv2.imshow("Mask inv", mask_inv)
# converting mask 2d to 3d
result = cv2.bitwise_and(img, img, mask=mask)
bg = cv2.resize(bg, (1280, 720))
bg = cv2.bitwise_and(bg, bg, mask=mask_inv)
res = cv2.add(result, bg)
is_success, im_buf_arr = cv2.imencode(".jpg", res)
return im_buf_arr.tobytes()
# cv2.imshow("Result", res)
# # cv2.imshow("Bg", bg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
| 2.90625 | 3 |
seatable_api/column.py | schattian/seatable-api-python | 12 | 12757866 | <reponame>schattian/seatable-api-python<filename>seatable_api/column.py
from datetime import datetime
from .constants import ColumnTypes
# Set the null list to distinguish the pure none and the number 0 or 0.00, which is
# a critical real value in number type column.
NULL_LIST = ['', [], None]
# Column Value related classes handle the compare computation of the table data
class ColumnValue(object):
"""
This is for the computation of the comparison between the input value and cell value from table
such as >, <, =, >=, <=, !=, which is supposed to fit different column types
"""
def __init__(self, column_value, column_type=None):
self.column_value = column_value
self.column_type = column_type
def equal(self, value):
if value == '':
return self.column_value in NULL_LIST
return self.column_value == value
def unequal(self, value):
if value == '':
return self.column_value not in NULL_LIST
return self.column_value != value
def greater_equal_than(self, value):
raise ValueError("%s type column does not support the query method '%s'" % (self.column_type, '>='))
def greater_than(self, value):
raise ValueError("%s type column does not support the query method '%s'" % (self.column_type, '>'))
def less_equal_than(self, value):
raise ValueError("%s type column does not support the query method '%s'" % (self.column_type, '<='))
def less_than(self, value):
raise ValueError("%s type column does not support the query method '%s'" % (self.column_type, '<'))
def like(self, value):
'''fuzzy search'''
raise ValueError("%s type column does not support the query method '%s'" % (self.column_type, 'like'))
class StringColumnValue(ColumnValue):
"""
the return data of string column value is type of string, including column type of
text, creator, single-select, url, email,....., and support the computation of
= ,!=, and like(fuzzy search)
"""
def like(self, value):
if "%" in value:
column_value = self.column_value or ""
# 1. abc% pattern, start with abc
if value[0] != '%' and value[-1] == '%':
start = value[:-1]
return column_value.startswith(start)
# 2. %abc pattern, end with abc
elif value[0] == '%' and value[-1] != '%':
end = value[1:]
return column_value.endswith(end)
# 3. %abc% pattern, contains abc
elif value[0] == '%' and value[-1] == '%':
middle = value[1:-1]
return middle in column_value
# 4. a%b pattern, start with a and end with b
else:
value_split_list = value.split('%')
start = value_split_list[0]
end = value_split_list[-1]
return column_value.startswith(start) and column_value.endswith(end)
else:
raise ValueError('There is no patterns found in "like" phrases')
class NumberDateColumnValue(ColumnValue):
"""
the returned data of number-date-column is digit number, or datetime obj, including the
type of number, ctime, date, mtime, support the computation of =, > ,< ,>=, <=, !=
"""
def greater_equal_than(self, value):
if value == "":
self.raise_error()
return self.column_value >= value if self.column_value not in NULL_LIST else False
def greater_than(self, value):
if value == "":
self.raise_error()
return self.column_value > value if self.column_value not in NULL_LIST else False
def less_equal_than(self, value):
if value == "":
self.raise_error()
return self.column_value <= value if self.column_value not in NULL_LIST else False
def less_than(self, value):
if value == "":
self.raise_error()
return self.column_value < value if self.column_value not in NULL_LIST else False
def raise_error(self):
raise ValueError("""The token ">", ">=", "<", "<=" does not support the null query string "".""")
class ListColumnValue(ColumnValue):
"""
the returned data of list-column value is a list like data structure, including the
type of multiple-select, image, collaborator and so on, support the computation of
=, != which should be decided by in or not in expression
"""
def equal(self, value):
if not value:
return self.column_value in NULL_LIST
column_value = self.column_value or []
return value in column_value
def unequal(self, value):
if not value:
return self.column_value not in NULL_LIST
column_value = self.column_value or []
return value not in column_value
class BoolColumnValue(ColumnValue):
"""
the returned data of bool-column value is should be True or False, such as check-box
type column. If the value from table shows None, treat it as False
"""
def equal(self, value):
return bool(self.column_value) == value
def unequal(self, value):
return bool(self.column_value) != value
# Column related class handle the treatment of both input value inputted by users by using
# the query statements, and the value in table displayed in different data structure in
# varies types of columns
class BaseColumn(object):
def parse_input_value(self, value):
return value
def parse_table_value(self, value):
return ColumnValue(value)
class TextColumn(BaseColumn):
def __init__(self):
self.column_type = ColumnTypes.TEXT.value
def __str__(self):
return "SeaTable Text Column"
def parse_table_value(self, value):
return StringColumnValue(value, self.column_type)
class LongTextColumn(TextColumn):
def __init__(self):
super(LongTextColumn, self).__init__()
self.column_type = ColumnTypes.LONG_TEXT.value
def __str__(self):
return "SeaTable Long Text Column"
def parse_table_value(self, value):
value = value.strip('\n')
return StringColumnValue(value, self.column_type)
class NumberColumn(BaseColumn):
def __init__(self):
self.column_type = ColumnTypes.NUMBER.value
def __str__(self):
return "SeaTable Number Column"
def parse_input_value(self, value):
if value == "":
return value
if '.' in value:
value = float(value)
else:
try:
value = int(value)
except:
self.raise_input_error(value)
return value
def parse_table_value(self, value):
return NumberDateColumnValue(value, self.column_type)
def raise_input_error(self, value):
raise ValueError("""%s type column does not support the query string as "%s",
please use "" or digital numbers
""" % (self.column_type, value))
class DateColumn(BaseColumn):
def __init__(self):
self.column_type = ColumnTypes.DATE.value
def __str__(self):
return "SeaTable Date Column"
def parse_input_value(self, time_str):
if not time_str:
return ""
try:
time_str_list = time_str.split(' ')
datetime_obj = None
if len(time_str_list) == 1:
ymd = time_str_list[0]
datetime_obj = datetime.strptime(ymd, '%Y-%m-%d')
elif len(time_str_list) == 2:
h, m, s = 0, 0, 0
ymd, hms_str = time_str_list
hms_str_list = hms_str.split(':')
if len(hms_str_list) == 1:
h = hms_str_list[0]
elif len(hms_str_list) == 2:
h, m = hms_str_list
elif len(hms_str_list) == 3:
h, m, s = hms_str_list
datetime_obj = datetime.strptime("%s %s" % (
ymd, "%s:%s:%s" % (h, m, s)), '%Y-%m-%d %H:%M:%S')
return datetime_obj
except:
return self.raise_error(time_str)
def parse_table_value(self, time_str):
return NumberDateColumnValue(self.parse_input_value(time_str), self.column_type)
def raise_error(self, value):
raise ValueError(""" %s type column does not support the query string as "%s",
the supported query string pattern like:
"YYYY-MM-DD" or
"YYYY-MM-DD hh" or
"YYYY-MM-DD hh:mm" or
"YYYY-MM-DD hh:mm:ss" or
""" % (self.column_type, value))
class CTimeColumn(DateColumn):
def __init__(self):
super(CTimeColumn, self).__init__()
self.column_type = ColumnTypes.CTIME.value
def __str__(self):
return "SeaTable CTime Column"
def get_local_time(self, time_str):
utc_time = datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S.%f+00:00')
delta2utc = datetime.now() - datetime.utcnow()
local_time = utc_time + delta2utc
return local_time
def parse_table_value(self, time_str):
return NumberDateColumnValue(self.get_local_time(time_str), self.column_type)
class MTimeColumn(CTimeColumn):
def __init__(self):
super(MTimeColumn, self).__init__()
self.column_type = ColumnTypes.MTIME.value
def __str__(self):
return "SeaTable MTime Column"
def parse_table_value(self, time_str):
return NumberDateColumnValue(super(MTimeColumn, self).get_local_time(time_str), self.column_type)
class CheckBoxColumn(BaseColumn):
def __init__(self):
super(CheckBoxColumn, self).__init__()
self.column_type = ColumnTypes.CHECKBOX.value
def __str__(self):
return "SeaTable Checkbox Column"
def parse_input_value(self, value):
if not value:
return False
elif value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
self.raise_error(value)
def parse_table_value(self, value):
return BoolColumnValue(value, self.column_type)
def raise_error(self, value):
raise ValueError(""" %s type column does not support the query string as "%s",
the supported query string pattern like:
"true" or "false", case insensitive
""" % (self.column_type, value))
class MultiSelectColumn(BaseColumn):
def __init__(self):
super(MultiSelectColumn, self).__init__()
self.column_type = ColumnTypes.MULTIPLE_SELECT.value
def parse_table_value(self, value):
return ListColumnValue(value, self.column_type)
COLUMN_MAP = {
ColumnTypes.NUMBER.value: NumberColumn(), # 1. number type
ColumnTypes.DATE.value: DateColumn(), # 2. date type
ColumnTypes.CTIME.value: CTimeColumn(), # 3. ctime type, create time
ColumnTypes.MTIME.value: MTimeColumn(), # 4. mtime type, modify time
ColumnTypes.CHECKBOX.value: CheckBoxColumn(), # 5. checkbox type
ColumnTypes.TEXT.value: TextColumn(), # 6. text type
ColumnTypes.MULTIPLE_SELECT.value: MultiSelectColumn(), # 7. multi-select type
ColumnTypes.LONG_TEXT.value: LongTextColumn(), # 8. long-text type
}
def get_column_by_type(column_type):
return COLUMN_MAP.get(column_type, TextColumn())
| 3.328125 | 3 |
if_example.py | jakeflo88/pythonClass | 0 | 12757867 | <filename>if_example.py
name = 'Bob'
if name == 'Alice':
print('Hi Alice')
print('Done')
| 2.46875 | 2 |
src/django-nonrel/django/forms/extras/__init__.py | adamjmcgrath/glancydesign | 790 | 12757868 | <gh_stars>100-1000
from widgets import *
| 1.070313 | 1 |
3. Algorithms/grovers_algorithms_examples.py | apcarrik/qiskit | 0 | 12757869 | import pylab
import numpy as np
from qiskit import Aer
from qiskit.utils import QuantumInstance
from qiskit.tools.visualization import plot_histogram
from qiskit.algorithms import Grover, AmplificationProblem
from qiskit.circuit.library.phase_oracle import PhaseOracle
### Finding Solutions to 3-SAT Problems
input_3sat_instance = '''
c example DIMACS-CNF 3-SAT
p cnf 3 5
-1 -2 -3 0
1 -2 3 0
1 2 -3 0
1 -2 -3 0
-1 2 3 0
''' # example problem has 3 solutions: (1 -2 3), (-1 -2 -3), (1 2 -3)
# Create corresponding oracle for Grover search using PhaseOracle (supports DIMACS-CNF format strings)
import os
import tempfile
from qiskit.exceptions import MissingOptionalLibraryError
fp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
fp.write(input_3sat_instance)
file_name = fp.name
fp.close()
oracle = None
try:
oracle = PhaseOracle.from_dimacs_file(file_name)
except MissingOptionalLibraryError as ex:
print(ex)
finally:
os.remove(file_name)
# create Grover instance using oracle
problem = None
if oracle is not None:
problem = AmplificationProblem(oracle, is_good_state=oracle.evaluate_bitstring)
# configure backend & run Grover instance to obtain result
backend = Aer.get_backend('aer_simulator')
quantum_instance = QuantumInstance(backend, shots=1024)
grover = Grover(quantum_instance=quantum_instance)
result = None
if problem is not None:
result = grover.amplify(problem)
print(result.assignment)
plot_histogram(result.circuit_results[0]).show()
### Boolean Logical Expressions
# Construct oracle using arbitrary Boolean logic expression
expression = '(w ^ x) & ~(y ^ z) & (x & y & z)'
try:
oracle = PhaseOracle(expression)
problem = AmplificationProblem(oracle, is_good_state=oracle.evaluate_bitstring)
grover = Grover(quantum_instance=QuantumInstance(Aer.get_backend('aer_simulator'),
shots=1024))
result = grover.amplify(problem)
plot_histogram(result.circuit_results[0]).show()
except MissingOptionalLibraryError as ex:
print(ex) | 2.390625 | 2 |
tests/test_allauth_2f2a.py | jeremyagray/django-allauth-2f2a | 0 | 12757870 | # ******************************************************************************
#
# test_allauth_2f2a.py: allauth_2f2a tests
#
# SPDX-License-Identifier: Apache-2.0
#
# django-allauth-2f2a, a 2fa adapter for django-allauth.
#
# ******************************************************************************
#
# django-allauth-2f2a, a 2fa adapter for django-allauth.
#
# Copyright 2021 <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# ******************************************************************************
#
"""allauth_2f2a tests."""
import base64
import re
from urllib.parse import parse_qsl
from urllib.parse import urlencode
from urllib.parse import urlparse
from urllib.parse import urlunparse
from allauth.account.signals import user_logged_in
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.core.exceptions import ImproperlyConfigured
from django.forms import ValidationError
from django.test import TestCase
from django.test import override_settings
from django.urls import reverse
from django_otp.oath import TOTP
from pyfakefs.fake_filesystem_unittest import patchfs
from allauth_2f2a import app_settings
from allauth_2f2a.middleware import BaseRequire2FAMiddleware
def normalize_url(url):
"""Sort the URL query string parameters."""
url = str(url) # Coerce reverse_lazy() URLs.
scheme, netloc, path, params, query, fragment = urlparse(url)
query_parts = sorted(parse_qsl(query))
return urlunparse(
(
scheme,
netloc,
path,
params,
urlencode(query_parts),
fragment,
)
)
class Test2Factor(TestCase):
"""2fa tests."""
def setUp(self):
"""Set up Test2Factor()."""
self.user_logged_in_count = 0
user_logged_in.connect(self._login_callback)
def tearDown(self):
"""Reset after each test."""
# Set TWOFA_FORMS to default.
setattr(
app_settings,
"TWOFA_FORMS",
{
"authenticate": "allauth_2f2a.forms.TOTPAuthenticateForm",
"device": "allauth_2f2a.forms.TOTPDeviceForm",
"remove": "allauth_2f2a.forms.TOTPDeviceRemoveForm",
},
)
def _login_callback(self, sender, **kwargs):
"""Increment the login count."""
self.user_logged_in_count += 1
def test_standard_login(self):
"""Should login if 2fa is not configured."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
self.assertRedirects(
resp,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
# Ensure the signal is received as expected.
self.assertEqual(self.user_logged_in_count, 1)
def test_2fa_login(self):
"""Should login when 2fa is configured."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
totp_model = user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
self.assertRedirects(
resp,
reverse("two-factor-authenticate"),
fetch_redirect_response=False,
)
# Now ensure that logging in actually works.
totp = TOTP(
totp_model.bin_key,
totp_model.step,
totp_model.t0,
totp_model.digits,
)
resp = self.client.post(
reverse("two-factor-authenticate"),
{"otp_token": totp.token()},
)
self.assertRedirects(
resp,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
# Ensure the signal is received as expected.
self.assertEqual(self.user_logged_in_count, 1)
def test_2fa_setup(self):
"""Should setup device and redirect to backup tokens."""
# Create a user.
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
response = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# GET the setup page.
response = self.client.get(
reverse("two-factor-setup"),
)
# Find the device created by GET.
device = user.totpdevice_set.filter(confirmed=False).first()
# Calculate the token.
totp = TOTP(
device.bin_key,
device.step,
device.t0,
device.digits,
)
# POST the token to the setup page.
response = self.client.post(
reverse("two-factor-setup"),
{
"token": totp.token(),
},
)
self.assertRedirects(
response,
reverse("two-factor-backup"),
)
def test_2fa_already_setup(self):
"""Should redirect to backup tokens."""
# Create a user.
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# GET the setup page.
self.client.get(
reverse("two-factor-setup"),
)
# Find the device created by GET.
device = user.totpdevice_set.filter(confirmed=False).first()
# Calculate the token.
totp = TOTP(
device.bin_key,
device.step,
device.t0,
device.digits,
)
# POST the token to the setup page.
response = self.client.post(
reverse("two-factor-setup"),
{
"token": totp.token(),
},
)
self.assertRedirects(
response,
reverse("two-factor-backup"),
)
# GET the setup page again.
self.client.get(
reverse("two-factor-setup"),
)
# Since 2FA is configured, should redirect to backup token
# generator.
self.assertRedirects(
response,
reverse("two-factor-backup"),
)
def test_2fa_generate_backup_tokens(self):
"""Should generate backup tokens."""
# Create a user.
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# GET the setup page.
self.client.get(
reverse("two-factor-setup"),
)
# Find the device created by GET.
device = user.totpdevice_set.filter(confirmed=False).first()
# Calculate the token.
totp = TOTP(
device.bin_key,
device.step,
device.t0,
device.digits,
)
# POST the token to the setup page.
response = self.client.post(
reverse("two-factor-setup"),
{
"token": totp.token(),
},
)
self.assertRedirects(
response,
reverse("two-factor-backup"),
)
# POST to make new tokens.
response = self.client.post(
reverse("two-factor-backup"),
)
self.assertContains(
response,
"Two-Factor Authentication Backup Tokens",
)
def test_2fa_setup_bad_token(self):
"""Should raise ``django.forms.ValidationError``."""
# Create a user.
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# GET the setup page.
self.client.get(
reverse("two-factor-setup"),
)
# POST the token to the setup page.
response = self.client.post(
reverse("two-factor-setup"),
{
"token": "123456",
},
)
# Should rerender the form successfully.
self.assertEqual(
response.status_code,
200,
)
# Should contain the error message.
self.assertContains(
response,
"Setup Two-Factor Authentication",
)
def test_2fa_remove(self):
"""Should remove device and redirect to backup tokens."""
# Create a user.
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
response = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# GET the setup page.
response = self.client.get(
reverse("two-factor-setup"),
)
# Find the device created by GET.
device = user.totpdevice_set.filter(confirmed=False).first()
# Calculate the token.
totp = TOTP(
device.bin_key,
device.step,
device.t0,
device.digits,
)
# POST the token to the setup page.
response = self.client.post(
reverse("two-factor-setup"),
{
"token": totp.token(),
},
)
# Should redirect to 2FA backup token generator.
self.assertRedirects(
response,
reverse("two-factor-backup"),
)
# POST a device removal request.
response = self.client.post(
reverse("two-factor-remove"),
)
# Should redirect to 2FA setup.
self.assertRedirects(
response,
reverse("two-factor-setup"),
)
def test_2fa_login_custom_form(self):
"""Should login when 2fa is configured."""
setattr(
app_settings,
"TWOFA_FORMS",
{
"authentication": "tests.forms.CustomTOTPAuthenticateForm",
"device": "allauth_2f2a.forms.TOTPDeviceForm",
"remove": "allauth_2f2a.forms.TOTPDeviceRemoveForm",
},
)
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
totp_model = user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
self.assertRedirects(
resp,
reverse("two-factor-authenticate"),
fetch_redirect_response=False,
)
# Now ensure that logging in actually works.
totp = TOTP(
totp_model.bin_key,
totp_model.step,
totp_model.t0,
totp_model.digits,
)
resp = self.client.post(
reverse("two-factor-authenticate"),
{"otp_token": totp.token()},
)
self.assertRedirects(
resp,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
# Ensure the signal is received as expected.
self.assertEqual(self.user_logged_in_count, 1)
def test_invalid_2fa_login(self):
"""Should not login when wrong 2fa code is provided."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
self.assertRedirects(
resp,
reverse("two-factor-authenticate"),
fetch_redirect_response=False,
)
# Ensure that logging in does not work with invalid token
resp = self.client.post(
reverse("two-factor-authenticate"),
{"otp_token": "invalid"},
)
self.assertEqual(resp.status_code, 200)
def test_2fa_redirect(self):
"""Should redirect if 2fa is not necessry."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Not logged in.
resp = self.client.get(reverse("two-factor-authenticate"))
self.assertRedirects(
resp,
reverse("account_login"),
fetch_redirect_response=False,
)
# Logged in.
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
resp = self.client.get(reverse("two-factor-authenticate"))
self.assertRedirects(
resp,
reverse("account_login"),
fetch_redirect_response=False,
)
def test_2fa_reset_flow(self):
"""Should redirect to login on 2fa interruption."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"), {"login": "john", "password": "<PASSWORD>"}
)
self.assertRedirects(
resp, reverse("two-factor-authenticate"), fetch_redirect_response=False
)
# The user ID should be in the session.
self.assertIn("allauth_2f2a_user_id", self.client.session)
# Navigate to a different page.
self.client.get(reverse("account_login"))
# The middleware should reset the login flow.
self.assertNotIn("allauth_2f2a_user_id", self.client.session)
# Trying to continue with two-factor without logging in again will
# redirect to login.
resp = self.client.get(reverse("two-factor-authenticate"))
self.assertRedirects(
resp, reverse("account_login"), fetch_redirect_response=False
)
def test_2fa_login_forwarding_get_parameters(self):
"""Should pass route parameters through 2fa views."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
user.totpdevice_set.create()
# Add a next to unnamed-view.
resp = self.client.post(
reverse("account_login") + "?existing=param&next=unnamed-view",
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
# Ensure that the unnamed-view is still being forwarded to.
resp.redirect_chain[-1] = (
normalize_url(resp.redirect_chain[-1][0]),
resp.redirect_chain[-1][1],
)
self.assertRedirects(
resp,
normalize_url(
reverse("two-factor-authenticate")
+ "?existing=param&next=unnamed-view",
),
fetch_redirect_response=False,
)
def test_2fa_login_forwarding_next_via_post(self):
"""Should respect ``next`` parameter on POST."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
user.totpdevice_set.create()
# Add a next to unnamed-view.
resp = self.client.post(
reverse("account_login") + "?existing=param",
{"login": "john", "password": "<PASSWORD>", "next": "unnamed-view"},
follow=True,
)
# Ensure that the unnamed-view is still being forwarded to,
# preserving existing query params.
resp.redirect_chain[-1] = (
normalize_url(resp.redirect_chain[-1][0]),
resp.redirect_chain[-1][1],
)
self.assertRedirects(
resp,
normalize_url(
reverse("two-factor-authenticate") + "?existing=param&next=unnamed-view"
),
fetch_redirect_response=False,
)
def test_anonymous(self):
"""Anonymous users should not access 2fa views."""
# The authentication page redirects to the login page.
url = reverse("two-factor-authenticate")
resp = self.client.get(url)
self.assertRedirects(
resp, reverse("account_login"), fetch_redirect_response=False
)
# Some pages redirect to the login page and then will redirect back.
for url in [
"two-factor-setup",
"two-factor-backup",
"two-factor-remove",
]:
url = reverse(url)
resp = self.client.get(url)
self.assertRedirects(
resp,
reverse("account_login") + "?next=" + url,
fetch_redirect_response=False,
)
def test_unnamed_view(self):
"""Should reset login if 2fa is interrupted."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"), {"login": "john", "password": "<PASSWORD>"}
)
self.assertRedirects(
resp, reverse("two-factor-authenticate"), fetch_redirect_response=False
)
# The user ID should be in the session.
self.assertIn("allauth_2f2a_user_id", self.client.session)
# Navigate to a different (unnamed) page.
resp = self.client.get("/unnamed-view")
# The middleware should reset the login flow.
self.assertNotIn("allauth_2f2a_user_id", self.client.session)
# Trying to continue with two-factor without logging in again
# will redirect to login.
resp = self.client.get(reverse("two-factor-authenticate"))
self.assertRedirects(
resp, reverse("account_login"), fetch_redirect_response=False
)
def test_backwards_compatible_url(self):
"""Should still work."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
totp_model = user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
self.assertRedirects(
resp,
reverse("two-factor-authenticate"),
fetch_redirect_response=False,
)
# Now ensure that logging in actually works.
totp = TOTP(
totp_model.bin_key,
totp_model.step,
totp_model.t0,
totp_model.digits,
)
# The old URL doesn't have a trailing slash.
url = reverse("two-factor-authenticate").rstrip("/")
resp = self.client.post(url, {"otp_token": totp.token()})
self.assertRedirects(
resp,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
# Ensure the signal is received as expected.
self.assertEqual(self.user_logged_in_count, 1)
def test_not_configured_redirect(self):
"""Should redirect if 2fa is not configured."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# The 2FA pages should redirect.
for url_name in ["two-factor-backup", "two-factor-remove"]:
resp = self.client.get(reverse(url_name))
self.assertRedirects(
resp,
reverse("two-factor-setup"),
fetch_redirect_response=False,
)
class Require2FA(BaseRequire2FAMiddleware):
"""Require 2fa if configured."""
def require_2fa(self, request):
"""Determine if 2fa is required if configured."""
return True
class NoRequire2FA(BaseRequire2FAMiddleware):
"""Require 2fa if configured."""
def require_2fa(self, request):
"""Determine if 2fa is required if configured."""
return False
class Require2FANonexistentAllowed(BaseRequire2FAMiddleware):
"""Require 2fa if configured."""
allowed_pages = [
"bob-is-your-uncle",
"account_logout",
"account_change_password",
"account_reset_password",
"two-factor-setup",
]
def require_2fa(self, request):
"""Determine if 2fa is required if configured."""
return True
class Require2FAWithMessage(BaseRequire2FAMiddleware):
"""Require 2fa if configured and add a message."""
def require_2fa(self, request):
"""Determine if 2fa is required and add a message."""
messages.info(
request,
"2fa required",
extra_tags="2fa_required",
)
return True
@override_settings(
# Don't redirect to an "allowed" URL.
LOGIN_REDIRECT_URL="/unnamed-view",
# Add the middleware that requires 2FA.
MIDDLEWARE=settings.MIDDLEWARE
+ ("allauth_2f2a.middleware.BaseRequire2FAMiddleware",),
)
class TestRequire2FAMiddlewareNotConfigured(TestCase):
"""Unconfigured 2fa middleware tests."""
def test_require2fa_not_implemented(self):
"""Should raise ``NotImplementedError``."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
self.assertRaises(
NotImplementedError,
self.client.post,
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
@override_settings(
# Don't redirect to an "allowed" URL.
LOGIN_REDIRECT_URL="/unnamed-view",
# Add the middleware that requires 2FA.
MIDDLEWARE=settings.MIDDLEWARE + ("tests.test_allauth_2f2a.Require2FA",),
)
class TestRequire2FAMiddleware(TestCase):
"""2fa middleware tests."""
def test_no_2fa(self):
"""Should redirect to setup if 2fa is not configured."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
# The user is redirected to the 2FA setup page.
self.assertRedirects(
resp,
reverse("two-factor-setup"),
fetch_redirect_response=False,
)
@override_settings(
# Don't redirect to an "allowed" URL.
LOGIN_REDIRECT_URL="/unnamed-view",
# Add the middleware that requires 2FA.
MIDDLEWARE=settings.MIDDLEWARE + ("tests.test_allauth_2f2a.NoRequire2FA",),
)
def test_no_2fa_not_required(self):
"""Should redirect to ``LOGIN_REDIRECT_URL``."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
# The user is redirected to ``LOGIN_REDIRECT_URL``.
self.assertRedirects(
resp,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
@override_settings(
# Don't redirect to an "allowed" URL.
LOGIN_REDIRECT_URL="/unnamed-view",
# Add the middleware that requires 2FA.
MIDDLEWARE=settings.MIDDLEWARE
+ ("tests.test_allauth_2f2a.Require2FANonexistentAllowed",),
)
def test_no_2fa_nonexistent_allowed(self):
"""Should warn on nonexistent URL."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
with self.assertWarns(UserWarning):
self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
def test_2fa(self):
"""Should login when 2fa is configured."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
totp_model = user.totpdevice_set.create()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
self.assertRedirects(
resp,
reverse("two-factor-authenticate"),
fetch_redirect_response=False,
)
# Now ensure that logging in actually works.
totp = TOTP(
totp_model.bin_key,
totp_model.step,
totp_model.t0,
totp_model.digits,
)
resp = self.client.post(
reverse("two-factor-authenticate"),
{"otp_token": totp.token()},
)
# The user ends up on the normal redirect login page.
self.assertRedirects(
resp,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
def test_2fa_already_configured(self):
"""Should access all URLs.."""
# Create a user.
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
# Login.
response = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
)
# GET the setup page.
response = self.client.get(
reverse("two-factor-setup"),
)
# Find the device created by GET.
device = user.totpdevice_set.filter(confirmed=False).first()
# Calculate the token.
totp = TOTP(
device.bin_key,
device.step,
device.t0,
device.digits,
)
# POST the token to the setup page.
response = self.client.post(
reverse("two-factor-setup"),
{
"token": totp.token(),
},
)
# Should redirect to 2FA backup token generator.
self.assertRedirects(
response,
reverse("two-factor-backup"),
)
# Load a 2fa protected URL.
response = self.client.get(
settings.LOGIN_REDIRECT_URL,
)
# Should load successfully.
self.assertEqual(
response.status_code,
200,
)
@override_settings(
INSTALLED_APPS=settings.INSTALLED_APPS + ("django.contrib.messages",),
# This doesn't seem to stack nicely with the class-based one,
# so add the middleware here.
MIDDLEWARE=settings.MIDDLEWARE
+ (
"tests.test_allauth_2f2a.Require2FA",
"django.contrib.messages.middleware.MessageMiddleware",
),
)
def test_no_2fa_messages(self):
"""Should redirect to 2fa setup."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
resp = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
# The user is redirected to the 2FA setup page.
self.assertRedirects(
resp, reverse("two-factor-setup"), fetch_redirect_response=False
)
@override_settings(
INSTALLED_APPS=settings.INSTALLED_APPS + ("django.contrib.messages",),
MIDDLEWARE=settings.MIDDLEWARE
+ (
"tests.test_allauth_2f2a.Require2FAWithMessage",
"django.contrib.messages.middleware.MessageMiddleware",
),
)
def test_with_2fa_messages(self):
"""Should redirect to 2fa setup."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
response = self.client.post(
reverse("account_login"),
{"login": "john", "password": "<PASSWORD>"},
follow=True,
)
# The user is redirected to the 2FA setup page.
self.assertRedirects(
response,
reverse("two-factor-setup"),
fetch_redirect_response=False,
)
class TestQRCodeGeneration(TestCase):
"""Tests for QR code generation via file or data: protocol."""
def tearDown(self):
"""Reset settings to default."""
setattr(app_settings, "QRCODE_TYPE", "data")
def test_2fa_setup_data(self):
"""Test 2FA setup using 'data:' protocol."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
self.client.post(reverse("account_login"), {"login": "john", "password": "<PASSWORD>"})
response = self.client.get(reverse("two-factor-setup"))
# Should have the data: URI.
self.assertRegex(
response.content.decode(),
r"data:image\/svg\+xml;base64,",
)
# Should have a valid SVG image in the base64 string.
# Get the base64 string.
svg_match = re.search(
r"\"data:image\/svg\+xml;base64,(.*?)\"",
response.content.decode(),
)
# Assert the string is base64 encoded.
self.assertEqual(
svg_match.group(1),
base64.b64encode(base64.b64decode(svg_match.group(1))).decode(),
)
# Assert the string is a valid SVG image. Well, SVGish at least.
self.assertRegex(
base64.b64decode(svg_match.group(1)).decode(),
r"<svg.*?>",
)
@patchfs
def test_2fa_setup_file(self, fs):
"""Test 2FA setup using an SVG file."""
# Create the fake qrcodes directory.
fs.create_dir("qrcodes")
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
setattr(app_settings, "QRCODE_TYPE", "file")
self.client.post(reverse("account_login"), {"login": "john", "password": "<PASSWORD>"})
response = self.client.get(reverse("two-factor-setup"))
self.assertRegex(
response.content.decode(),
r"qrcodes\/[a-f0-9]{32}\.svg",
)
def test_2fa_setup_file_no_dir(self):
"""Test 2FA setup using an SVG file without the qr code directory."""
user = get_user_model().objects.create(username="john")
user.set_password("<PASSWORD>")
user.save()
setattr(app_settings, "QRCODE_TYPE", "file")
self.client.post(
reverse("account_login"),
{
"login": "john",
"password": "<PASSWORD>",
},
)
self.assertRaises(
ImproperlyConfigured,
self.client.get,
reverse("two-factor-setup"),
)
| 1.578125 | 2 |
src/ram/service/input.py | bootforce-dev/ram-framework | 1 | 12757871 | import cPickle as pickle
from ram.classes.module import UnitService
from ram.classes.module import UseFilename
from ram.classes import DumbResults
import ram.process
from ram.osutils import setenv
class __api__(UnitService):
"""runs dialogs to interact with user
To run dialogs for the unit:
$ ram input <namepath> [<param>] ...
"""
_wrapper = UseFilename('input', required=True)
_results = DumbResults
def __call__(self, ctx, *args, **kwargs):
setenv('RAMARGS', pickle.dumps(args))
if ctx.filename:
ram.process.invoke(ctx.filename, *args, environ=ctx._environ())
| 2.203125 | 2 |
spiders/baike/node_manager.py | aollio/toys | 0 | 12757872 | <gh_stars>0
#!/usr/bin/env python3
from os import path
from multiprocessing.managers import BaseManager
from multiprocessing import Process, Queue
import time
from url_manager import UrlManager
from data_output import DataOutput
__author__ = '<NAME>'
__email__ = '<EMAIL>'
class NodeManager:
def start_manager(self, url_q, result_q):
"""
创建一个分布式管理器
:param url_q: url队列
:param result_q: 结果队列
:return:
"""
BaseManager.register('get_task_queue', callable=lambda: url_q)
BaseManager.register('get_result_queue', callable=lambda: result_q)
# 绑定端口8001, 设置验证口令 'baike'. 这个相当于对象的初始化
manager = BaseManager(address=('127.0.0.1', 8001), authkey=b'baike')
return manager
def url_manager_proc(self, url_q: Queue, conn_q: Queue, root_url):
print('url manager process start...')
url_manager = UrlManager()
url_manager.add_new_url(root_url)
print('url manager process started...')
while True:
while url_manager.has_new_url():
new_url = url_manager.get_new_url()
print('new_url', new_url)
# 将新的URL发给工作节点
url_q.put(new_url)
# 加一个判断条件, 当爬取2000个链接后就关闭, 并保存进度
if url_manager.old_url_size() > 2000:
# 通知爬行节点工作结束
url_q.put('end')
print('控制节点发起结束通知')
# 关闭管理节点, 同事存储set状态
url_manager.save_process(path.join('dist', 'new_urls.txt'), url_manager.new_urls)
url_manager.save_process(path.join('dist', 'old_urls.txt'), url_manager.old_urls)
return
# 将从result_solve_proc 获取到的URL添加到URL管理器
try:
if not conn_q.empty():
urls = conn_q.get()
url_manager.add_new_urls(urls)
except BaseException as e:
time.sleep(0.1)
def result_solve_proc(self, result_q: Queue, conn_q: Queue, store_q: Queue):
while True:
try:
if not result_q.empty():
content = result_q.get()
if content['new_urls'] == 'end':
print('结果分析进程接收通知然后结束!')
store_q.put('end')
return
conn_q.put(content['new_urls']) # url为set类型
store_q.put(content['data']) # 解析出来数据为dict类型
else:
time.sleep(0.1)
except BaseException as e:
time.sleep(0.1)
def store_proc(self, store_q: Queue):
output = DataOutput()
while True:
if not store_q.empty():
data = store_q.get()
if data == 'end':
print('存储进程接收通知然后结束!')
output.flush_data()
output.output_end(output.filepath)
return
output.store_data(data)
else:
time.sleep(0.1)
def main():
print('init...')
# 初始化各个管理进程需要的通信通道
# url_q队列是URL管理进程将URL传递给爬虫节点的通道
url_q = Queue()
# result_q是爬虫节点将数据返回给数据提取进程的通道
result_q = Queue()
# 数据提取进程将新的URL数据提交给URL管理进程的通道
conn_q = Queue()
#
store_q = Queue()
# 创建分布式管理器
node = NodeManager()
manager = node.start_manager(url_q, result_q)
# 创建URL管理进程, 数据提取进程和数据存储进程
root_url = "https://baike.baidu.com/item/网络爬虫/5162711"
url_manager_proc = Process(target=node.url_manager_proc, args=(url_q, conn_q, root_url))
result_solve_proc = Process(target=node.result_solve_proc, args=(result_q, conn_q, store_q))
store_proc = Process(target=node.store_proc, args=(store_q,))
# 启动三个进程和分布式管理器
url_manager_proc.start()
result_solve_proc.start()
store_proc.start()
print('init finish.')
manager.get_server().serve_forever()
if __name__ == '__main__':
main()
| 2.546875 | 3 |
geomdl/multi.py | Maik93/NURBS-Python | 382 | 12757873 | """
.. module:: Multi
:platform: Unix, Windows
:synopsis: Provides container classes for spline geoemtries
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import abc
import warnings
from functools import partial
from multiprocessing import Value, Lock
from . import abstract
from . import vis
from . import voxelize
from . import utilities
from . import tessellate
from . import _utilities as utl
from .exceptions import GeomdlException
@utl.add_metaclass(abc.ABCMeta)
class AbstractContainer(abstract.GeomdlBase):
""" Abstract class for geometry containers.
This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in
a for loop.
This class provides the following properties:
* :py:attr:`type` = container
* :py:attr:`id`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`opt`
* :py:attr:`pdimension`
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`vis`
* :py:attr:`delta`
* :py:attr:`sample_size`
"""
def __init__(self, *args, **kwargs):
self._pdim = 0 if not hasattr(self, '_pdim') else self._pdim # number of parametric dimensions
self._dinit = 0.01 if not hasattr(self, '_dinit') else self._dinit # delta initialization value
super(AbstractContainer, self).__init__(**kwargs)
self._geometry_type = "container"
self._name = self._geometry_type
self._delta = [float(self._dinit) for _ in range(self._pdim)] # evaluation delta
self._elements = [] # list of elements contained
self._vis_component = None # visualization component
self._cache['evalpts'] = []
def __iter__(self):
self._iter_index = 0
return self
def next(self):
return self.__next__()
def __next__(self):
try:
result = self._elements[self._iter_index]
except IndexError:
raise StopIteration
self._iter_index += 1
return result
def __reversed__(self):
return reversed(self._elements)
def __getitem__(self, index):
return self._elements[index]
def __len__(self):
return len(self._elements)
def __add__(self, other):
if not isinstance(other, self.__class__):
raise GeomdlException("Cannot add non-matching container types")
self.add(other)
return self
@property
def pdimension(self):
""" Parametric dimension.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the parametric dimension
:type: int
"""
return self._pdim
@property
def evalpts(self):
""" Evaluated points.
Since there are multiple geometry objects contained in the multi objects, the evaluated points will be returned in the
format of list of individual evaluated points which is also a list of Cartesian coordinates.
The following code example illustrates these details:
.. code-block:: python
:linenos:
multi_obj = multi.SurfaceContainer() # it can also be multi.CurveContainer()
# Add geometries to multi_obj via multi_obj.add() method
# Then, the following loop will print all the evaluated points of the Multi object
for idx, mpt in enumerate(multi_obj.evalpts):
print("Shape", idx+1, "contains", len(mpt), "points. These points are:")
for pt in mpt:
line = ", ".join([str(p) for p in pt])
print(line)
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the evaluated points of all contained geometries
"""
if not self._cache['evalpts']:
for elem in self._elements:
elem.delta = self._delta[0] if self._pdim == 1 else self._delta
evalpts = elem.evalpts
self._cache['evalpts'] += evalpts
return self._cache['evalpts']
@property
def bbox(self):
""" Bounding box.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the bounding box of all contained geometries
"""
all_box = []
for elem in self._elements:
all_box += list(elem.bbox)
return utilities.evaluate_bounding_box(all_box)
@property
def vis(self):
""" Visualization component.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the visualization component
:setter: Sets the visualization component
"""
return self._vis_component
@vis.setter
def vis(self, value):
if not isinstance(value, vis.VisAbstract):
warnings.warn("Visualization component is NOT an instance of the vis.VisAbstract class")
return
self._vis_component = value
@property
def delta(self):
""" Evaluation delta (for all parametric directions).
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta value, smoother the shape.
The following figure illustrates the working principles of the delta property:
.. math::
\\left[{{u_{start}},{u_{start}} + \\delta ,({u_{start}} + \\delta ) + \\delta , \\ldots ,{u_{end}}} \\right]
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value
:setter: Sets the delta value
"""
return self._delta[0] if self._pdim == 1 else self._delta
@delta.setter
def delta(self, value):
if self._pdim == 1 and isinstance(value, (int, float)):
delta_vals = [value]
else:
if isinstance(value, (list, tuple)):
if len(value) != self._pdim:
raise ValueError("The input must be a list of a tuple with a length of " + str(self._pdim))
delta_vals = value
elif isinstance(value, (int, float)):
delta_vals = [value for _ in range(self._pdim)]
else:
raise TypeError("Unsupported input type for evaluation delta. Use float, list or tuple")
# Set delta values
for idx, dval in enumerate(delta_vals):
self._delta_setter_common(idx, dval)
# Reset the cache
self.reset()
def _delta_setter_common(self, idx, value):
# Check and set the delta value corresponding to the idx-th parametric dimension
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Evaluation delta should be between 0.0 and 1.0. You are trying to set it to " + str(value)
+ " for the " + str(idx + 1) + "st parametric dimension.")
self._delta[idx] = float(value)
@property
def sample_size(self):
""" Sample size (for all parametric directions).
Sample size defines the number of points to evaluate. It also sets the ``delta`` property.
The following figure illustrates the working principles of sample size property:
.. math::
\\underbrace {\\left[ {{u_{start}}, \\ldots ,{u_{end}}} \\right]}_{{n_{sample}}}
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size
:setter: Sets sample size
"""
ssz = [self._sample_size_getter_common(idx) for idx in range(self._pdim)]
return ssz[0] if self._pdim == 1 else ssz
@sample_size.setter
def sample_size(self, value):
if self._pdim == 1 and isinstance(value, (int, float)):
ssz = [value]
else:
if isinstance(value, (list, tuple)):
if len(value) != self._pdim:
raise ValueError("The input must be a list of a tuple with a length of " + str(self._pdim))
ssz = value
elif isinstance(value, (int, float)):
ssz = [value for _ in range(self._pdim)]
else:
raise TypeError("Unsupported input type for sample size. Use float, list or tuple")
# Set sample size
for idx, sval in enumerate(ssz):
self._sample_size_setter_common(idx, sval)
# Reset the cache
self.reset()
def _sample_size_getter_common(self, idx):
return int(1 / self._delta[idx]) + 1
def _sample_size_setter_common(self, idx, value):
# Check and set the delta value corresponding to the idx-th parametric dimension
if not isinstance(value, int):
raise GeomdlException("Sample size must be an integer value bigger than 2")
if value < 2:
raise GeomdlException("Sample size must be an integer value bigger than 2")
self._delta[idx] = 1.0 / float(value - 1)
@property
def data(self):
""" Returns a dict which contains the geometry data.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
"""
return tuple([e.data for e in self._elements])
def add(self, element):
""" Adds geometry objects to the container.
The input can be a single geometry, a list of geometry objects or a geometry container object.
:param element: geometry object
"""
if isinstance(element, (self.__class__, list, tuple)):
for elem in element:
self.add(elem)
elif hasattr(self, '_pdim'):
if element.pdimension == self.pdimension:
if self.dimension == 0:
self._dimension = element.dimension
else:
if self.dimension != element.dimension:
raise GeomdlException("The spatial dimensions of the container and the input must be the same")
self._elements.append(element)
else:
raise GeomdlException("Cannot add the element to the container")
# Reset the cache
self.reset()
# Make container look like a list
append = add
def reset(self):
""" Resets the cache. """
self._cache['evalpts'][:] = []
# Runs visualization component to render the surface
@abc.abstractmethod
def render(self, **kwargs):
""" Renders plots using the visualization component.
.. note::
This is an abstract method and it must be implemented in the subclass.
"""
pass
@utl.export
class CurveContainer(AbstractContainer):
""" Container class for storing multiple curves.
This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in
a for loop.
This class provides the following properties:
* :py:attr:`type` = container
* :py:attr:`id`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`opt`
* :py:attr:`pdimension`
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`vis`
* :py:attr:`delta`
* :py:attr:`sample_size`
The following code example illustrates the usage of the Python properties:
.. code-block:: python
# Create a multi-curve container instance
mcrv = multi.CurveContainer()
# Add single or multi curves to the multi container using mcrv.add() command
# Addition operator, e.g. mcrv1 + mcrv2, also works
# Set the evaluation delta of the multi-curve
mcrv.delta = 0.05
# Get the evaluated points
curve_points = mcrv.evalpts
"""
def __init__(self, *args, **kwargs):
self._pdim = 1 # number of parametric dimensions
self._dinit = 0.01 # evaluation delta
super(CurveContainer, self).__init__(*args, **kwargs)
for arg in args:
self.add(arg)
def render(self, **kwargs):
""" Renders the curves.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Keyword Arguments:
* ``cpcolor``: sets the color of the control points grid
* ``evalcolor``: sets the color of the surface
* ``filename``: saves the plot with the input name
* ``plot``: controls plot window visibility. *Default: True*
* ``animate``: activates animation (if supported). *Default: False*
* ``delta``: if True, the evaluation delta of the container object will be used. *Default: True*
* ``reset_names``: resets the name of the curves inside the container. *Default: False*
The ``cpcolor`` and ``evalcolor`` arguments can be a string or a list of strings corresponding to the color
values. Both arguments are processed separately, e.g. ``cpcolor`` can be a string whereas ``evalcolor`` can be
a list or a tuple, or vice versa. A single string value sets the color to the same value. List input allows
customization over the color values. If none provided, a random color will be selected.
The ``plot`` argument is useful when you would like to work on the command line without any window context.
If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables
plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the
configuration class.
"""
if not self._vis_component:
warnings.warn("No visualization component has set")
return
# Get the color values from keyword arguments
cpcolor = kwargs.get('cpcolor')
evalcolor = kwargs.get('evalcolor')
filename = kwargs.get('filename', None)
plot_visible = kwargs.get('plot', True)
animate_plot = kwargs.get('animate', False)
# Flag to control evaluation delta updates
update_delta = kwargs.get('delta', True)
reset_names = kwargs.get('reset_names', False)
# Check if the input list sizes are equal
if isinstance(cpcolor, (list, tuple)):
if len(cpcolor) < len(self._elements):
raise ValueError("The number of color values in 'cpcolor' (" + str(len(cpcolor)) +
") cannot be less than the number of geometries contained ("
+ str(len(self._elements)) + ")")
if isinstance(evalcolor, (list, tuple)):
if len(evalcolor) < len(self._elements):
raise ValueError("The number of color values in 'evalcolor' (" + str(len(evalcolor)) +
") cannot be less than the number of geometries contained ("
+ str(len(self._elements)) + ")")
# Run the visualization component
self._vis_component.clear()
for idx, elem in enumerate(self._elements):
if update_delta:
elem.delta = self.delta
elem.evaluate()
# Reset element name
if reset_names:
elem.name = "curve"
# Fix element name
if elem.name == "curve":
elem.name = elem.name + " " + str(idx)
# Color selection
color = select_color(cpcolor, evalcolor, idx=idx)
self._vis_component.add(ptsarr=elem.ctrlpts, name=(elem.name, "(CP)"),
color=color[0], plot_type='ctrlpts', idx=idx)
self._vis_component.add(ptsarr=elem.evalpts, name=elem.name,
color=color[1], plot_type='evalpts', idx=idx)
# Display the figures
if animate_plot:
self._vis_component.animate(fig_save_as=filename, display_plot=plot_visible)
else:
self._vis_component.render(fig_save_as=filename, display_plot=plot_visible)
@utl.export
class SurfaceContainer(AbstractContainer):
""" Container class for storing multiple surfaces.
This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in
a for loop.
This class provides the following properties:
* :py:attr:`type` = container
* :py:attr:`id`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`opt`
* :py:attr:`pdimension`
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`vis`
* :py:attr:`delta`
* :py:attr:`delta_u`
* :py:attr:`delta_v`
* :py:attr:`sample_size`
* :py:attr:`sample_size_u`
* :py:attr:`sample_size_v`
* :py:attr:`tessellator`
* :py:attr:`vertices`
* :py:attr:`faces`
The following code example illustrates the usage of these Python properties:
.. code-block:: python
# Create a multi-surface container instance
msurf = multi.SurfaceContainer()
# Add single or multi surfaces to the multi container using msurf.add() command
# Addition operator, e.g. msurf1 + msurf2, also works
# Set the evaluation delta of the multi-surface
msurf.delta = 0.05
# Get the evaluated points
surface_points = msurf.evalpts
"""
def __init__(self, *args, **kwargs):
self._pdim = 2 # number of parametric dimensions
self._dinit = 0.05 # evaluation delta
super(SurfaceContainer, self).__init__(*args, **kwargs)
self._cache['vertices'] = []
self._cache['faces'] = []
for arg in args:
self.add(arg)
@property
def delta_u(self):
""" Evaluation delta for the u-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_u`` and ``sample_size_u`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_u`` will also set ``sample_size_u``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the u-direction
:setter: Sets the delta value for the u-direction
:type: float
"""
return self._delta[0]
@delta_u.setter
def delta_u(self, value):
self._delta_setter_common(0, value)
@property
def delta_v(self):
""" Evaluation delta for the v-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_v`` and ``sample_size_v`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_v`` will also set ``sample_size_v``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the v-direction
:setter: Sets the delta value for the v-direction
:type: float
"""
return self._delta[1]
@delta_v.setter
def delta_v(self, value):
self._delta_setter_common(1, value)
@property
def sample_size_u(self):
""" Sample size for the u-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_u`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the u-direction
:setter: Sets sample size for the u-direction
:type: int
"""
return self._sample_size_getter_common(0)
@sample_size_u.setter
def sample_size_u(self, value):
self._sample_size_setter_common(0, value)
@property
def sample_size_v(self):
""" Sample size for the v-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_v`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the v-direction
:setter: Sets sample size for the v-direction
:type: int
"""
return self._sample_size_getter_common(1)
@sample_size_v.setter
def sample_size_v(self, value):
self._sample_size_setter_common(1, value)
@property
def tessellator(self):
""" Tessellation component of the surfaces inside the container.
Please refer to :doc:`Tessellation <module_tessellate>` documentation for details.
.. code-block:: python
:linenos:
from geomdl import multi
from geomdl import tessellate
# Create the surface container
surf_container = multi.SurfaceContainer(surf_list)
# Set tessellator component
surf_container.tessellator = tessellate.TrimTessellate()
:getter: gets the tessellation component
:setter: sets the tessellation component
"""
tsl_comps = []
for idx in range(len(self._elements)):
tsl_comps.append(self._elements[idx].tessellator)
return tsl_comps
@tessellator.setter
def tessellator(self, value):
# Set tessellation component
for idx in range(len(self._elements)):
self._elements[idx].tessellator = value.__class__()
@property
def vertices(self):
""" Vertices generated by the tessellation operation.
If the tessellation component is set to None, the result will be an empty list.
:getter: Gets the vertices
"""
if not self._cache['vertices']:
self.tessellate()
return self._cache['vertices']
@property
def faces(self):
""" Faces (triangles, quads, etc.) generated by the tessellation operation.
If the tessellation component is set to None, the result will be an empty list.
:getter: Gets the faces
"""
if not self._cache['faces']:
self.tessellate()
return self._cache['faces']
def tessellate(self, **kwargs):
""" Tessellates the surfaces inside the container.
Keyword arguments are directly passed to the tessellation component.
The following code snippet illustrates getting the vertices and faces of the surfaces inside the container:
.. code-block:: python
:linenos:
# Tessellate the surfaces inside the container
surf_container.tessellate()
# Vertices and faces are stored inside the tessellator component
tsl = surf_container.tessellator
# Loop through all tessellator components
for t in tsl:
# Get the vertices
vertices = t.tessellator.vertices
# Get the faces (triangles, quads, etc.)
faces = t.tessellator.faces
Keyword Arguments:
* ``num_procs``: number of concurrent processes for tessellating the surfaces. *Default: 1*
* ``delta``: if True, the evaluation delta of the container object will be used. *Default: True*
* ``force``: flag to force tessellation. *Default: False*
"""
# Keyword arguments
force_tsl = kwargs.get('force', False)
update_delta = kwargs.pop('delta', True)
# Don't re-tessellate if everything is in place
if all((self._cache['vertices'], self._cache['faces'])) and not force_tsl:
return
# Tessellate the surfaces in the container
num_procs = kwargs.pop('num_procs', 1)
new_elems = []
if num_procs > 1:
with utl.pool_context(processes=num_procs) as pool:
tmp_elem = pool.map(partial(process_tessellate, delta=self.delta, update_delta=update_delta, **kwargs),
self._elements)
new_elems += tmp_elem
else:
for idx in range(len(self._elements)):
tmp_elem = process_tessellate(self._elements[idx], delta=self.delta, update_delta=update_delta, **kwargs)
new_elems.append(tmp_elem)
self._elements = new_elems
# Update caches
verts = []
faces = []
v_offset = 0
f_offset = 0
for elem in self._elements:
v = elem.vertices
for i in range(len(v)):
v[i].id += v_offset
verts += v
f = elem.faces
for i in range(len(f)):
f[i].id += f_offset
# for j in range(len(f[i]._data)):
# f[i]._data[j].id += v_offset
faces += f
v_offset += len(v)
f_offset += len(f)
self._cache['vertices'] = verts
self._cache['faces'] = faces
def reset(self):
""" Resets the cache. """
super(SurfaceContainer, self).reset()
self._cache['vertices'][:] = []
self._cache['faces'][:] = []
def render(self, **kwargs):
""" Renders the surfaces.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Keyword Arguments:
* ``cpcolor``: sets the color of the control points grids
* ``evalcolor``: sets the color of the surface
* ``filename``: saves the plot with the input name
* ``plot``: controls plot window visibility. *Default: True*
* ``animate``: activates animation (if supported). *Default: False*
* ``colormap``: sets the colormap of the surfaces
* ``delta``: if True, the evaluation delta of the container object will be used. *Default: True*
* ``reset_names``: resets the name of the surfaces inside the container. *Default: False*
* ``num_procs``: number of concurrent processes for rendering the surfaces. *Default: 1*
The ``cpcolor`` and ``evalcolor`` arguments can be a string or a list of strings corresponding to the color
values. Both arguments are processed separately, e.g. ``cpcolor`` can be a string whereas ``evalcolor`` can be
a list or a tuple, or vice versa. A single string value sets the color to the same value. List input allows
customization over the color values. If none provided, a random color will be selected.
The ``plot`` argument is useful when you would like to work on the command line without any window context.
If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables
plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the
configuration class.
Please note that ``colormap`` argument can only work with visualization classes that support colormaps. As an
example, please see :py:class:`.VisMPL.VisSurfTriangle()` class documentation. This method expects multiple
colormap inputs as a list or tuple, preferable the input list size is the same as the number of surfaces
contained in the class. In the case of number of surfaces is bigger than number of input colormaps, this method
will automatically assign a random color for the remaining surfaces.
"""
# Validation
if not self._vis_component:
warnings.warn("No visualization component has been set")
return
# Get the color values from keyword arguments
cpcolor = kwargs.get('cpcolor')
evalcolor = kwargs.get('evalcolor')
trimcolor = kwargs.get('trimcolor', 'black')
filename = kwargs.get('filename', None)
plot_visible = kwargs.get('plot', True)
animate_plot = kwargs.get('animate', False)
# Flag to control evaluation delta updates
update_delta = kwargs.get('delta', True)
reset_names = kwargs.get('reset_names', False)
# Number of parallel processes
num_procs = kwargs.get('num_procs', 1)
force_tsl = bool(kwargs.pop('force', False)) # flag to force re-tessellation
# Check if the input list sizes are equal
if isinstance(cpcolor, (list, tuple)):
if len(cpcolor) != len(self._elements):
raise ValueError("The number of colors in 'cpcolor' (" + str(len(cpcolor)) +
") cannot be less than the number of geometries contained(" +
str(len(self._elements)) + ")")
if isinstance(evalcolor, (list, tuple)):
if len(evalcolor) != len(self._elements):
raise ValueError("The number of colors in 'evalcolor' (" + str(len(evalcolor)) +
") cannot be less than the number of geometries contained ("
+ str(len(self._elements)) + ")")
# Get colormaps as a list
surf_cmaps = kwargs.get('colormap', [])
if not isinstance(surf_cmaps, (list, tuple)):
warnings.warn("Expecting a list of colormap values, not " + str(type(surf_cmaps)))
surf_cmaps = []
# Run the visualization component
self._vis_component.clear()
vis_list = []
if num_procs > 1:
mp_lock = Lock()
mp_val = Value('i', 0)
with utl.pool_context(initializer=mp_init, initargs=(mp_lock, mp_val), processes=num_procs) as pool:
tmp = pool.map(partial(process_elements_surface, mconf=self._vis_component.mconf,
colorval=(cpcolor, evalcolor, trimcolor), idx=-1, force_tsl=force_tsl,
update_delta=update_delta, delta=self.delta, reset_names=reset_names),
self._elements)
vis_list += tmp
else:
for idx, elem in enumerate(self._elements):
tmp = process_elements_surface(elem, self._vis_component.mconf, (cpcolor, evalcolor, trimcolor),
idx, force_tsl, update_delta, self.delta, reset_names)
vis_list += tmp
for vl in vis_list:
if isinstance(vl, dict):
self._vis_component.add(**vl)
else:
for v in vl:
self._vis_component.add(**v)
# Display the figures
if animate_plot:
self._vis_component.animate(fig_save_as=filename, display_plot=plot_visible, colormap=surf_cmaps)
else:
self._vis_component.render(fig_save_as=filename, display_plot=plot_visible, colormap=surf_cmaps)
@utl.export
class VolumeContainer(AbstractContainer):
""" Container class for storing multiple volumes.
This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in
a for loop.
This class provides the following properties:
* :py:attr:`type`
* :py:attr:`id`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`opt`
* :py:attr:`pdimension`
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`vis`
* :py:attr:`delta`
* :py:attr:`delta_u`
* :py:attr:`delta_v`
* :py:attr:`delta_w`
* :py:attr:`sample_size`
* :py:attr:`sample_size_u`
* :py:attr:`sample_size_v`
* :py:attr:`sample_size_w`
The following code example illustrates the usage of these Python properties:
.. code-block:: python
# Create a multi-volume container instance
mvol = multi.VolumeContainer()
# Add single or multi volumes to the multi container using mvol.add() command
# Addition operator, e.g. mvol1 + mvol2, also works
# Set the evaluation delta of the multi-volume
mvol.delta = 0.05
# Get the evaluated points
volume_points = mvol.evalpts
"""
def __init__(self, *args, **kwargs):
self._pdim = 3 # number of parametric dimensions
self._dinit = 0.1 # evaluation delta
super(VolumeContainer, self).__init__()
for arg in args:
self.add(arg)
@property
def delta_u(self):
""" Evaluation delta for the u-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_u`` and ``sample_size_u`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_u`` will also set ``sample_size_u``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the u-direction
:setter: Sets the delta value for the u-direction
:type: float
"""
return self._delta[0]
@delta_u.setter
def delta_u(self, value):
self._delta_setter_common(0, value)
@property
def delta_v(self):
""" Evaluation delta for the v-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_v`` and ``sample_size_v`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_v`` will also set ``sample_size_v``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the v-direction
:setter: Sets the delta value for the v-direction
:type: float
"""
return self._delta[1]
@delta_v.setter
def delta_v(self, value):
self._delta_setter_common(1, value)
@property
def delta_w(self):
""" Evaluation delta for the w-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_w`` and ``sample_size_w`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_w`` will also set ``sample_size_w``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the w-direction
:setter: Sets the delta value for the w-direction
:type: float
"""
return self._delta[2]
@delta_w.setter
def delta_w(self, value):
self._delta_setter_common(2, value)
@property
def sample_size_u(self):
""" Sample size for the u-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_u`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the u-direction
:setter: Sets sample size for the u-direction
:type: int
"""
return self._sample_size_getter_common(0)
@sample_size_u.setter
def sample_size_u(self, value):
self._sample_size_setter_common(0, value)
@property
def sample_size_v(self):
""" Sample size for the v-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_v`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the v-direction
:setter: Sets sample size for the v-direction
:type: int
"""
return self._sample_size_getter_common(1)
@sample_size_v.setter
def sample_size_v(self, value):
self._sample_size_setter_common(1, value)
@property
def sample_size_w(self):
""" Sample size for the w-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_w`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the w-direction
:setter: Sets sample size for the w-direction
:type: int
"""
return self._sample_size_getter_common(2)
@sample_size_w.setter
def sample_size_w(self, value):
self._sample_size_setter_common(2, value)
def render(self, **kwargs):
""" Renders the volumes.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Keyword Arguments:
* ``cpcolor``: sets the color of the control points plot
* ``evalcolor``: sets the color of the volume
* ``filename``: saves the plot with the input name
* ``plot``: controls plot window visibility. *Default: True*
* ``animate``: activates animation (if supported). *Default: False*
* ``delta``: if True, the evaluation delta of the container object will be used. *Default: True*
* ``reset_names``: resets the name of the volumes inside the container. *Default: False*
* ``grid_size``: grid size for voxelization. *Default: (16, 16, 16)*
* ``num_procs``: number of concurrent processes for voxelization. *Default: 1*
The ``cpcolor`` and ``evalcolor`` arguments can be a string or a list of strings corresponding to the color
values. Both arguments are processed separately, e.g. ``cpcolor`` can be a string whereas ``evalcolor`` can be
a list or a tuple, or vice versa. A single string value sets the color to the same value. List input allows
customization over the color values. If none provided, a random color will be selected.
The ``plot`` argument is useful when you would like to work on the command line without any window context.
If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables
plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the
configuration class.
"""
if not self._vis_component:
warnings.warn("No visualization component has been set")
return
cpcolor = kwargs.pop('cpcolor', None)
evalcolor = kwargs.pop('evalcolor', None)
filename = kwargs.pop('filename', None)
plot_visible = kwargs.pop('plot', True)
animate_plot = kwargs.pop('animate', False)
# Flag to control evaluation delta updates
update_delta = kwargs.pop('delta', True)
reset_names = kwargs.get('reset_names', False)
# Check if the input list sizes are equal
if isinstance(cpcolor, (list, tuple)):
if len(cpcolor) != len(self._elements):
raise ValueError("The number of colors in 'cpcolor' (" + str(len(cpcolor)) +
") cannot be less than the number of geometries contained(" +
str(len(self._elements)) + ")")
if isinstance(evalcolor, (list, tuple)):
if len(evalcolor) != len(self._elements):
raise ValueError("The number of colors in 'evalcolor' (" + str(len(evalcolor)) +
") cannot be less than the number of geometries contained ("
+ str(len(self._elements)) + ")")
# Run the visualization component
self._vis_component.clear()
for idx, elem in enumerate(self._elements):
if update_delta:
elem.delta = self.delta
elem.evaluate()
# Reset element name
if reset_names:
elem.name = "volume"
# Fix element name
if elem.name == "volume":
elem.name = elem.name + " " + str(idx)
# Color selection
color = select_color(cpcolor, evalcolor, idx=idx)
# Add control points
if self._vis_component.mconf['ctrlpts'] == 'points':
self._vis_component.add(ptsarr=elem.ctrlpts, name=(elem.name, "(CP)"),
color=color[0], plot_type='ctrlpts', idx=idx)
# Add evaluated points
if self._vis_component.mconf['evalpts'] == 'points':
self._vis_component.add(ptsarr=elem.evalpts, name=elem.name,
color=color[1], plot_type='evalpts', idx=idx)
# Add evaluated points as voxels
if self._vis_component.mconf['evalpts'] == 'voxels':
grid, filled = voxelize.voxelize(elem, **kwargs)
polygrid = voxelize.convert_bb_to_faces(grid)
self._vis_component.add(ptsarr=[polygrid, filled], name=elem.name,
color=color[1], plot_type='evalpts', idx=idx)
# Display the figures
if animate_plot:
self._vis_component.animate(fig_save_as=filename, display_plot=plot_visible)
else:
self._vis_component.render(fig_save_as=filename, display_plot=plot_visible)
def select_color(cpcolor, evalcolor, idx=0):
""" Selects item color for plotting.
:param cpcolor: color for control points grid item
:type cpcolor: str, list, tuple
:param evalcolor: color for evaluated points grid item
:type evalcolor: str, list, tuple
:param idx: index of the current geometry object
:type idx: int
:return: a list of color values
:rtype: list
"""
# Random colors by default
color = utilities.color_generator()
# Constant color for control points grid
if isinstance(cpcolor, str):
color[0] = cpcolor
# User-defined color for control points grid
if isinstance(cpcolor, (list, tuple)):
color[0] = cpcolor[idx]
# Constant color for evaluated points grid
if isinstance(evalcolor, str):
color[1] = evalcolor
# User-defined color for evaluated points grid
if isinstance(evalcolor, (list, tuple)):
color[1] = evalcolor[idx]
return color
def process_tessellate(elem, update_delta, delta, **kwargs):
""" Tessellates surfaces.
.. note:: Helper function required for ``multiprocessing``
:param elem: surface
:type elem: abstract.Surface
:param update_delta: flag to control evaluation delta updates
:type update_delta: bool
:param delta: evaluation delta
:type delta: list, tuple
:return: updated surface
:rtype: abstract.Surface
"""
if update_delta:
elem.delta = delta
elem.evaluate()
elem.tessellate(**kwargs)
return elem
def process_elements_surface(elem, mconf, colorval, idx, force_tsl, update_delta, delta, reset_names):
""" Processes visualization elements for surfaces.
.. note:: Helper function required for ``multiprocessing``
:param elem: surface
:type elem: abstract.Surface
:param mconf: visualization module configuration
:type mconf: dict
:param colorval: color values
:type colorval: tuple
:param idx: index of the surface
:type idx: int
:param force_tsl: flag to force re-tessellation
:type force_tsl: bool
:param update_delta: flag to update surface delta
:type update_delta: bool
:param delta: new surface evaluation delta
:type delta: list, tuple
:param reset_names: flag to reset names
:type reset_names: bool
:return: visualization element (as a dict)
:rtype: list
"""
if idx < 0:
lock.acquire()
idx = counter.value
counter.value += 1
lock.release()
if update_delta:
elem.delta = delta
elem.evaluate()
# Reset element name
if reset_names:
elem.name = "surface"
# Fix element name
if elem.name == "surface" and idx >= 0:
elem.name = elem.name + " " + str(idx)
# Color selection
color = select_color(colorval[0], colorval[1], idx=idx)
# Initialize the return list
rl = []
# Add control points
if mconf['ctrlpts'] == 'points':
ret = dict(ptsarr=elem.ctrlpts, name=(elem.name, "(CP)"),
color=color[0], plot_type='ctrlpts', idx=idx)
rl.append(ret)
# Add control points as quads
if mconf['ctrlpts'] == 'quads':
qtsl = tessellate.QuadTessellate()
qtsl.tessellate(elem.ctrlpts, size_u=elem.ctrlpts_size_u, size_v=elem.ctrlpts_size_v)
ret = dict(ptsarr=[qtsl.vertices, qtsl.faces], name=(elem.name, "(CP)"),
color=color[0], plot_type='ctrlpts', idx=idx)
rl.append(ret)
# Add surface points
if mconf['evalpts'] == 'points':
ret = dict(ptsarr=elem.evalpts, name=(elem.name, idx), color=color[1], plot_type='evalpts', idx=idx)
rl.append(ret)
# Add surface points as quads
if mconf['evalpts'] == 'quads':
qtsl = tessellate.QuadTessellate()
qtsl.tessellate(elem.evalpts, size_u=elem.sample_size_u, size_v=elem.sample_size_v)
ret = dict(ptsarr=[qtsl.vertices, qtsl.faces],
name=elem.name, color=color[1], plot_type='evalpts', idx=idx)
rl.append(ret)
# Add surface points as vertices and triangles
if mconf['evalpts'] == 'triangles':
elem.tessellate(force=force_tsl)
ret = dict(ptsarr=[elem.tessellator.vertices, elem.tessellator.faces],
name=elem.name, color=color[1], plot_type='evalpts', idx=idx)
rl.append(ret)
# Add the trim curves
for itc, trim in enumerate(elem.trims):
ret = dict(ptsarr=elem.evaluate_list(trim.evalpts), name=("trim", itc),
color=colorval[2], plot_type='trimcurve', idx=idx)
rl.append(ret)
# Return the list
return rl
def mp_init(l, c):
""" Initialization function for multi-threaded operations.
:param l: lock
:param c: value for common counter
"""
global lock
global counter
lock = l
counter = c
| 2.296875 | 2 |
autoperf/tools/caliper.py | xdai/autoperf | 5 | 12757874 | <gh_stars>1-10
import logging
import os
from .interface import *
class Tool(AbstractTool):
def __init__(self, experiment):
self.name = "caliper"
self.longname = "Tool.caliper.%s" % experiment.name
self.experiment = experiment
self.logger = logging.getLogger(__name__)
def setup(self):
self.platform = self.experiment.platform
self.analyses = self.experiment.analyses
def build_env(self):
return dict()
def setup_str(self) -> str:
return ""
def wrap_command(self, exe_cmd, exe_opt) -> (str,str):
datadir = self.experiment.datadirs[self.experiment.iteration]
metrics = self.experiment.parted_metrics[self.experiment.iteration]
measurement = "%s/measurement" % datadir
_execmd = "hpcrun -o %s" % measurement
for metric in metrics.split(':'):
_execmd += " -e %s@%s" % (metric, self.experiment.metric_set.interval[metric])
_execmd += " %s" % exe_cmd
return (_execmd, exe_opt)
def aggregate(self):
"""
Aggregate data collected by all iterations of the current
experiment. We assume that iterations have all been finished.
"""
execmd = config.get("%s.exe_cmd" % self.experiment.longname)
execmd = os.path.expanduser(execmd)
exebin = os.path.basename(execmd)
appsrc = config.get("%s.appsrc" % self.longname)
self.logger.info("Aggregating all collected data")
hpcstruct = "%s/%s.hpcstruct" % (self.experiment.insname, exebin)
cmd = ["hpcstruct",
"-o",
hpcstruct,
execmd]
self.logger.info("HPCToolkit: run hpcstruct")
self.logger.cmd(' '.join(cmd))
subprocess.call(cmd)
# This could be stupid, but it is the only way I know to
# aggregate HPCToolkit collected data:
for datadir in self.experiment.datadirs:
measurement = "%s/measurement" % datadir
database = "%s/database" % datadir
# 1. convert to ppk (paraprof -f hpc --pack)
cmd = ["hpcprof",
"-o",
database,
"-S",
hpcstruct,
"-I",
"%s/'*'" % appsrc,
measurement]
self.logger.info("HPCToolkit: run hpcprof")
self.logger.cmd(' '.join(cmd))
subprocess.call(cmd)
cmd = ["%s/bin/paraprof" % self.experiment.tauroot,
"-f",
"hpc",
"--pack",
"%s/data.ppk" % datadir,
"%s/experiment.xml" % database]
self.logger.info("Pack collected data to TAU .ppk package")
self.logger.cmd(' '.join(cmd))
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
# 2. dump as tau profile (paraprof --dump)
cwd = os.getcwd()
self.logger.cmd("cd %s/profiles", datadir)
os.chdir("%s/profiles" % datadir)
cmd = ["%s/bin/paraprof" % self.experiment.tauroot,
"--dump",
"../data.ppk"]
self.logger.info("Unpack .ppk to TAU profiles")
self.logger.cmd(' '.join(cmd))
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
self.logger.cmd("cd %s", cwd)
os.chdir(cwd)
# 3. aggregate tau profiles
for metric in os.listdir("%s/profiles" % datadir):
target = os.path.relpath("%s/profiles/%s" % (datadir, metric),
"%s/profiles" % self.experiment.insname)
link_name = "%s/profiles/%s" % (self.experiment.insname, metric)
self.logger.cmd("ln -s %s %s", target, link_name)
# link error will happen if different iterations share
# some metrics, in this case we just ignore the error
try:
os.symlink(target, link_name)
except:
pass
self.logger.newline()
| 2.296875 | 2 |
2019/aoc2019_4b.py | ByteCommander/AdventOfCode | 2 | 12757875 | # Advent Of Code 2019, day 4, part 2)
# http://adventofcode.com/2019/day/4
# solution by ByteCommander, 2019-12-04
import re
def main():
wires = []
with open("inputs/aoc2019_4.txt") as file:
a, b = map(int, file.read().split("-"))
count = sum(
1 for x in range(a, b + 1) if (
any(len(match.group()) == 2 for match in re.finditer(r"(\d)\1+", str(x))) and
all(int(d1) <= int(d2) for d1, d2 in zip(str(x), str(x)[1:]))
)
)
print(f"There are {count} possible passwords in the range.")
if __name__ == "__main__":
main()
| 3.375 | 3 |
notebooks/algo/algo02_v0.py | bmwant/chemister | 0 | 12757876 | <gh_stars>0
import argparse
import statistics
from datetime import datetime, timedelta
import pandas as pd
from tables import Table
from download_rates import DATE_FMT
class Transaction(object):
def __init__(self, amount, rate_buy, rate_sale, date, verbose=True):
self.amount = amount # amount of currency we bought
self.rate_buy = rate_buy # rate when trading
# selling rate when trading to calculate future profit
self.rate_sale = rate_sale
self.date = date
self._sold = False
self.verbose = verbose
def log(self, message):
if self.verbose:
print(message)
def sale(self, rate_sale, dry_run=False):
amount = self.amount * rate_sale
if not dry_run:
profit = amount - self.price # what we gain
self.log(
'Selling {amount:.2f}({rate_buy:.2f}) at {rate_sale:.2f}; '
'total: {total:.2f}; profit: {profit:.2f}'.format(
amount=self.amount,
rate_buy=self.rate_buy,
rate_sale=rate_sale,
total=amount,
profit=profit,
)
)
self._sold = True
return amount
@property
def price(self):
return self.amount * self.rate_buy
@property
def sold(self):
return self._sold
def __str__(self):
return '{}: {:.2f} at {:.2f}'.format(
self.date.strftime(DATE_FMT),
self.amount,
self.rate_buy
)
class ShiftTrader(object):
def __init__(self, starting_amount, shift, verbose=True):
self.transactions = [] # history of all transactions
self.amount = starting_amount # operation money we use to buy currency
self.shift = shift # days we wait between buying/selling
self._min_debt = 0
self._success = [] # success periods
self._fail = [] # fail periods
self._strike_data = []
self._strike = 0 # length of the period
self._flag = False # whether we incrementing same period
self.verbose = verbose
def log(self, message):
if self.verbose:
print(message)
def trade(self, daily_data):
date = daily_data['date']
# our perspective
rate_sale = daily_data['buy']
rate_buy = daily_data['sale']
is_success = False # if today's trade is successful
for t in self.transactions:
if (
t.date + timedelta(days=self.shift) == date and
rate_sale > t.rate_buy
):
self.amount += t.sale(rate_sale)
is_success = True
# handle expired transactions
expired_sold = self.handle_expired(date, rate_sale)
if is_success or expired_sold:
if self._flag is True:
self._strike += 1
else:
self._flag = True
if self._strike:
self._fail.append(self._strike)
self._strike_data.append(-self._strike)
self._strike = 1
else:
if self._flag is False:
self._strike += 1
else:
self._flag = False
self._success.append(self._strike)
self._strike_data.append(self._strike)
self._strike = 1
# buy some amount of currency
t = Transaction(
rate_buy=rate_buy,
rate_sale=rate_sale,
amount=self.daily_amount,
date=date,
verbose=self.verbose,
)
debt = self.amount - t.price
# if debt < 0:
# raise ValueError(
# 'Cannot buy {:.2f}$. Available: {:.2f}UAH'.format(self.daily_amount, self.amount))
self._min_debt = min(debt, self._min_debt)
self.amount -= t.price
self.transactions.append(t)
self.log('Amount in the end of the day: {:.2f}'.format(self.amount))
def handle_expired(self, date, rate_sale):
expired_sold = False # any expired transaction was sold
for t in self.transactions:
if (
t.date + timedelta(days=self.shift) < date and
rate_sale > t.rate_buy and
not t.sold
):
self.log('Selling expired {}'.format(t))
self.amount += t.sale(rate_sale)
expired_sold = True
return expired_sold
def close(self, rate_sale_closing):
"""Sell all hanging transaction for the rate specified"""
self.log('Closing trading for {} transactions'.format(len(self.hanging)))
for t in self.hanging:
self.amount += t.sale(rate_sale_closing)
@property
def daily_amount(self):
return 100
def get_potential(self, rate_sale):
return self.amount + sum([t.sale(rate_sale, dry_run=True)
for t in self.hanging])
@property
def hanging(self):
return list(filter(lambda t: not t.sold, self.transactions))
def launch_trading(*, year, starting_amount_uah, shift, verbose=True):
"""
:param year: year we want to launch our algorithm on
:param starting_amount_uah: how much we initially invest
:param shift: days shift to wait before selling transaction
:return:
"""
currency = 'usd'
filename = 'data/uah_to_{}_{}.csv'.format(currency, year)
df = pd.read_csv(filename)
df['date'] = pd.to_datetime(df['date'], format=DATE_FMT)
sd = datetime.strptime('01.01.{}'.format(year), DATE_FMT)
ed = datetime.strptime('07.01.{}'.format(year), DATE_FMT)
# Get end date
# last_date_value = df.iloc[[-1]]['date'].item()
# pd_date = pd.to_datetime(last_date_value)
# ed = pd_date.to_pydatetime()
print('Trading at period: [{} - {}]'.format(sd, ed))
trader = ShiftTrader(
starting_amount=starting_amount_uah,
shift=shift,
verbose=verbose,
)
i = 0
s = { # stats
'year': year,
'shift': shift,
'k1_return': None,
'k1_return_soft': None,
'k5_return': None,
'k5_return_soft': None,
'p10_return': None,
'p10_return_soft': None,
'exit_period': None,
# strikes
'success': None,
'fail': None,
'strikes': None,
'starting_amount': starting_amount_uah,
'end_amount': None,
'debt': None,
# transactions
'transactions': None, # atomic bank operations
'handing': None, # transactions without profit
}
current_date = sd # starting date
while current_date <= ed: # until end date
rate_sale = df.loc[df['date'] == current_date]['sale'].item()
rate_buy = df.loc[df['date'] == current_date]['buy'].item()
if verbose:
print(
'\n==>{}: {:.2f}/{:.2f}'.format(
current_date.strftime(DATE_FMT),
rate_buy,
rate_sale,
)
)
daily_data = {
'date': current_date,
'buy': rate_sale, # we buy currency, bank sale currency
'sale': rate_buy, # we sale currency, bank buy currency
}
potential = trader.get_potential(rate_buy)
if verbose:
print('Potential = {:.2f}'.format(potential))
trader.trade(daily_data)
days_passed = current_date - sd # how many days passed since start
if s['exit_period'] is None and potential > starting_amount_uah:
s['exit_period'] = days_passed
if s['k1_return'] is None and trader.amount >= starting_amount_uah + 1000:
s['k1_return'] = days_passed
if s['k1_return_soft'] is None and potential >= starting_amount_uah + 1000:
s['k1_return_soft'] = days_passed
if s['k5_return'] is None and trader.amount >= starting_amount_uah + 5000:
s['k5_return'] = days_passed
if s['k5_return_soft'] is None and potential >= starting_amount_uah + 5000:
s['k5_return_soft'] = days_passed
if s['p10_return'] is None and trader.amount >= 1.1 * starting_amount_uah:
s['p10_return'] = days_passed
if s['p10_return_soft'] is None and potential >= 1.1 * starting_amount_uah:
s['p10_return_soft'] = days_passed
i += 1
current_date += timedelta(days=1)
s['hanging'] = len(trader.hanging)
# close period at the last day no matter which rate
# in order to calculate raw profit
trader.close(rate_buy)
# sell every purchased transaction
s['transactions'] = 2 * len(trader.transactions)
s['strikes'] = trader._strike_data
s['success'] = trader._success
s['fail'] = trader._fail
s['debt'] = trader._min_debt
s['end_amount'] = trader.amount
if verbose:
print_stats(s)
return s # return statistics for trading period
def print_stats(stats):
starting_amount = stats['starting_amount']
debt = stats['debt']
print(
'\n#### Report for {year} year. '
'Shift: {shift} ####\n'.format(**stats)
)
print('Minimal investment needed: {:.2f} UAH'.format(starting_amount-debt))
print('\n#### Return/exit periods ####\n')
if stats['k1_return'] is not None:
print('1K profit period: {} days'.format(stats['k1_return'].days))
else:
print('1K HARD is unreachable within given period')
if stats['k1_return_soft'] is not None:
print('1K gain soft period: {} days'.format(stats['k1_return_soft'].days))
else:
print('1K SOFT is unreachable within given period')
if stats['k5_return'] is not None:
print('5K profit period: {} days'.format(stats['k5_return'].days))
else:
print('5K HARD is unreachable within given period')
if stats['k5_return_soft'] is not None:
print('5K gain soft period: {} days'.format(stats['k5_return_soft'].days))
else:
print('5K SOFT is unreachable within given period')
if stats['p10_return'] is not None:
print('10% profit period: {} days'.format(stats['p10_return'].days))
else:
print('10% HARD is unreachable within given period')
if stats['p10_return_soft'] is not None:
print('10% gain soft period: {} days'.format(stats['p10_return_soft'].days))
else:
print('10% SOFT is unreachable within given period')
if stats['exit_period'] is not None:
print('Exit period: {} days\n'.format(stats['exit_period'].days))
else:
print('Cannot exit within given period\n')
print('\n#### Strikes ####\n')
print('Periods: {}'.format(len(stats['strikes'])))
print('Success: {}'.format(len(stats['success'])))
print('\tShortest: {}'.format(min(stats['success'])))
print('\tLongest: {}'.format(max(stats['success'])))
print('\tMean: {:.2f}'.format(statistics.mean(stats['success'])))
print('Fail: {}'.format(len(stats['fail'])))
print('\tShortest: {}'.format(min(stats['fail'])))
print('\tLongest: {}'.format(max(stats['fail'])))
print('\tMean: {:.2f}'.format(statistics.mean(stats['fail'])))
print('\n#### Transactions ####\n')
print('Total transactions: {}'.format(stats['transactions']))
print('Hanging transactions: {}'.format(stats['hanging']))
print('\n#### Profits ####\n')
end_amount = stats['end_amount']
print('Initial invested amount: {} UAH'.format(starting_amount))
print('Amount we have in the end: {:.2f} UAH'.format(end_amount))
print('Raw profit: {:.2f} UAH'.format(end_amount-starting_amount))
print('Profit, %: {:.2f}'.format(end_amount / starting_amount * 100))
def build_shift_comparison_table(year):
header = [
'year',
'shift',
'minimal investment',
'raw profit, uah',
'profit, %',
]
data = []
for s in range(0, 31):
shift = s+1
stats = launch_trading(
year=year,
shift=shift,
starting_amount_uah=0,
verbose=False,
)
min_investment = -stats['debt']
row = [
year,
shift,
'{:.2f}'.format(min_investment),
'{:.2f}'.format(stats['end_amount']),
'{:.2f}'.format(stats['end_amount'] / min_investment * 100),
]
data.append(row)
t = Table(header=header, data=data)
t.print()
def parse_args():
parser = argparse.ArgumentParser(description='TradeAlgo#02v0')
parser.add_argument(
'--year',
required=True,
type=int,
help='which year you want to analyze',
)
parser.add_argument(
'--shift',
required=False,
default=1,
type=int,
help='minimal delay between buying and selling',
)
parser.add_argument(
'--amount',
required=False,
default=10000,
type=int,
help='amount of money you want to initially invest',
)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
launch_trading(
year=args.year,
shift=args.shift,
starting_amount_uah=args.amount,
)
# build_shift_comparison_table(year=args.year)
| 2.921875 | 3 |
eventgraphs/motif.py | empiricalstateofmind/eventgraphs | 17 | 12757877 | """
Copyright (C) 2018 <NAME> (<EMAIL>)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections.abc import Iterable
# Check that undirected motifs are being processed correctly.
class Motif(object):
def __init__(self, e1, e2, condensed, directed):
"""
"""
self.directed = directed
for s, nodes in zip(['U1', 'U2', 'V1', 'V2'],
[e1[0], e2[0], e1[1], e2[1]]):
if isinstance(nodes, Iterable) and not isinstance(nodes, str):
setattr(self, s, set(nodes))
else:
setattr(self, s, {nodes})
motif = (len(self.U1 & self.U2),
len(self.V1 & self.U2),
len(self.U2 - (self.U1 | self.V1)),
len(self.U1 & self.V2),
len(self.V1 & self.V2),
len(self.V2 - (self.U1 | self.V1)))
if condensed:
motif = tuple(int(bool(entry)) for entry in motif)
if len(e1) == 4:
self._motif = (*motif, e1[-1], e2[-1])
else:
self._motif = motif
# Cleanup - keep the object lightweight
for attr in ['U1', 'U2', 'V1', 'V2']:
delattr(self, attr)
def __str__(self):
return self._iconify_motif()
def __hash__(self):
return hash(self._motif)
def __repr__(self):
return "< Motif {} {} >".format(self._motif, self._iconify_motif())
def __eq__(self, other):
# Add check to see if other is a motif.
if isinstance(other, self.__class__):
if self._motif == other._motif:
return True
else:
return False
elif isinstance(other, str):
if self.__str__ == other:
return True
else:
return False
def _iconify_motif(self):
"""
Input:
Returns:
None
"""
icons = ['●', '○', '+']
string = ''
for ix, entry in enumerate(self._motif[:3]):
string += icons[ix] * entry
if self.directed:
string += '|'
for ix, entry in enumerate(self._motif[3:6]):
string += icons[ix] * entry
if len(self._motif) == 8:
# We can shorten this or alter it.
string += " ({},{})".format(self._motif[-2][0] + self._motif[-2][-1],
self._motif[-1][0] + self._motif[-1][-1])
return string
| 2.3125 | 2 |
docs/example/example_anndata_to_tsv.py | erikadudki/de_analysis | 0 | 12757878 | <reponame>erikadudki/de_analysis
from de_analysis.anndata_to_tsv import anndata_to_tsv
# example script to transform your anndata file (*.h5ad) to the required form
# of .tsv files
# working directory path (path which contains the subfolder 'data')
wd = './de_analysis_clean/docs/example/'
# name of your anndata- file
filename = 'myDataset'
# pick which layer/assay of normalized data should be used, usually:
# 'logcounts' / 'cpm'
user_layer = 'logcounts'
anndata_to_tsv(wd,filename,user_layer)
| 2.140625 | 2 |
iterator/unzip/impl.py | cunitac/procon-rs-lib | 0 | 12757879 | def tup(name, len):
ret = '('
for i in range(len):
ret += f'{name}{i}, '
return ret + ')'
for i in range(10):
print(
f'impl_unzip!({tup("T",i)}, {tup("A",i)}, {tup("s",i)}, {tup("t",i)});')
| 3.25 | 3 |
paddlespeech/s2t/io/dataset.py | gongel/DeepSpeech | 0 | 12757880 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
# Modified from wenet(https://github.com/wenet-e2e/wenet)
from typing import Optional
from paddle.io import Dataset
from yacs.config import CfgNode
from paddlespeech.s2t.frontend.utility import read_manifest
from paddlespeech.s2t.utils.log import Log
__all__ = ["ManifestDataset", "TransformDataset"]
logger = Log(__name__).getlog()
class ManifestDataset(Dataset):
@classmethod
def params(cls, config: Optional[CfgNode]=None) -> CfgNode:
default = CfgNode(
dict(
manifest="",
max_input_len=27.0,
min_input_len=0.0,
max_output_len=float('inf'),
min_output_len=0.0,
max_output_input_ratio=float('inf'),
min_output_input_ratio=0.0, ))
if config is not None:
config.merge_from_other_cfg(default)
return default
@classmethod
def from_config(cls, config):
"""Build a ManifestDataset object from a config.
Args:
config (yacs.config.CfgNode): configs object.
Returns:
ManifestDataset: dataet object.
"""
assert 'manifest' in config.data
assert config.data.manifest
dataset = cls(
manifest_path=config.data.manifest,
max_input_len=config.data.max_input_len,
min_input_len=config.data.min_input_len,
max_output_len=config.data.max_output_len,
min_output_len=config.data.min_output_len,
max_output_input_ratio=config.data.max_output_input_ratio,
min_output_input_ratio=config.data.min_output_input_ratio, )
return dataset
def __init__(self,
manifest_path,
max_input_len=float('inf'),
min_input_len=0.0,
max_output_len=float('inf'),
min_output_len=0.0,
max_output_input_ratio=float('inf'),
min_output_input_ratio=0.0):
"""Manifest Dataset
Args:
manifest_path (str): manifest josn file path
max_input_len ([type], optional): maximum output seq length,
in seconds for raw wav, in frame numbers for feature data. Defaults to float('inf').
min_input_len (float, optional): minimum input seq length,
in seconds for raw wav, in frame numbers for feature data. Defaults to 0.0.
max_output_len (float, optional): maximum input seq length,
in modeling units. Defaults to 500.0.
min_output_len (float, optional): minimum input seq length,
in modeling units. Defaults to 0.0.
max_output_input_ratio (float, optional): maximum output seq length/output seq length ratio.
Defaults to 10.0.
min_output_input_ratio (float, optional): minimum output seq length/output seq length ratio.
Defaults to 0.05.
"""
super().__init__()
# read manifest
self._manifest = read_manifest(
manifest_path=manifest_path,
max_input_len=max_input_len,
min_input_len=min_input_len,
max_output_len=max_output_len,
min_output_len=min_output_len,
max_output_input_ratio=max_output_input_ratio,
min_output_input_ratio=min_output_input_ratio)
self._manifest.sort(key=lambda x: x["feat_shape"][0])
def __len__(self):
return len(self._manifest)
def __getitem__(self, idx):
return self._manifest[idx]
class TransformDataset(Dataset):
"""Transform Dataset.
Args:
data: list object from make_batchset
converter: batch function
reader: read data
"""
def __init__(self, data, converter, reader):
"""Init function."""
super().__init__()
self.data = data
self.converter = converter
self.reader = reader
def __len__(self):
"""Len function."""
return len(self.data)
def __getitem__(self, idx):
"""[] operator."""
return self.converter([self.reader(self.data[idx], return_uttid=True)])
class AudioDataset(Dataset):
def __init__(self,
data_file,
max_length=10240,
min_length=0,
token_max_length=200,
token_min_length=1,
batch_type='static',
batch_size=1,
max_frames_in_batch=0,
sort=True,
raw_wav=True,
stride_ms=10):
"""Dataset for loading audio data.
Attributes::
data_file: input data file
Plain text data file, each line contains following 7 fields,
which is split by '\t':
utt:utt1
feat:tmp/data/file1.wav or feat:tmp/data/fbank.ark:30
feat_shape: 4.95(in seconds) or feat_shape:495,80(495 is in frames)
text:i love you
token: i <space> l o v e <space> y o u
tokenid: int id of this token
token_shape: M,N # M is the number of token, N is vocab size
max_length: drop utterance which is greater than max_length(10ms), unit 10ms.
min_length: drop utterance which is less than min_length(10ms), unit 10ms.
token_max_length: drop utterance which is greater than token_max_length,
especially when use char unit for english modeling
token_min_length: drop utterance which is less than token_max_length
batch_type: static or dynamic, see max_frames_in_batch(dynamic)
batch_size: number of utterances in a batch,
it's for static batch size.
max_frames_in_batch: max feature frames in a batch,
when batch_type is dynamic, it's for dynamic batch size.
Then batch_size is ignored, we will keep filling the
batch until the total frames in batch up to max_frames_in_batch.
sort: whether to sort all data, so the utterance with the same
length could be filled in a same batch.
raw_wav: use raw wave or extracted featute.
if raw wave is used, dynamic waveform-level augmentation could be used
and the feature is extracted by torchaudio.
if extracted featute(e.g. by kaldi) is used, only feature-level
augmentation such as specaug could be used.
"""
assert batch_type in ['static', 'dynamic']
# read manifest
data = read_manifest(data_file)
if sort:
data = sorted(data, key=lambda x: x["feat_shape"][0])
if raw_wav:
assert data[0]['feat'].split(':')[0].splitext()[-1] not in ('.ark',
'.scp')
data = map(lambda x: (float(x['feat_shape'][0]) * 1000 / stride_ms))
self.input_dim = data[0]['feat_shape'][1]
self.output_dim = data[0]['token_shape'][1]
# with open(data_file, 'r') as f:
# for line in f:
# arr = line.strip().split('\t')
# if len(arr) != 7:
# continue
# key = arr[0].split(':')[1]
# tokenid = arr[5].split(':')[1]
# output_dim = int(arr[6].split(':')[1].split(',')[1])
# if raw_wav:
# wav_path = ':'.join(arr[1].split(':')[1:])
# duration = int(float(arr[2].split(':')[1]) * 1000 / 10)
# data.append((key, wav_path, duration, tokenid))
# else:
# feat_ark = ':'.join(arr[1].split(':')[1:])
# feat_info = arr[2].split(':')[1].split(',')
# feat_dim = int(feat_info[1].strip())
# num_frames = int(feat_info[0].strip())
# data.append((key, feat_ark, num_frames, tokenid))
# self.input_dim = feat_dim
# self.output_dim = output_dim
valid_data = []
for i in range(len(data)):
length = data[i]['feat_shape'][0]
token_length = data[i]['token_shape'][0]
# remove too lang or too short utt for both input and output
# to prevent from out of memory
if length > max_length or length < min_length:
# logging.warn('ignore utterance {} feature {}'.format(
# data[i][0], length))
pass
elif token_length > token_max_length or token_length < token_min_length:
pass
else:
valid_data.append(data[i])
data = valid_data
self.minibatch = []
num_data = len(data)
# Dynamic batch size
if batch_type == 'dynamic':
assert (max_frames_in_batch > 0)
self.minibatch.append([])
num_frames_in_batch = 0
for i in range(num_data):
length = data[i]['feat_shape'][0]
num_frames_in_batch += length
if num_frames_in_batch > max_frames_in_batch:
self.minibatch.append([])
num_frames_in_batch = length
self.minibatch[-1].append(data[i])
# Static batch size
else:
cur = 0
while cur < num_data:
end = min(cur + batch_size, num_data)
item = []
for i in range(cur, end):
item.append(data[i])
self.minibatch.append(item)
cur = end
def __len__(self):
return len(self.minibatch)
def __getitem__(self, idx):
return self.minibatch[idx]
| 1.84375 | 2 |
gallery/migrations/0012_auto_20200526_1129.py | BlessedAssurance/gallery | 0 | 12757881 | <filename>gallery/migrations/0012_auto_20200526_1129.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-05-26 08:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gallery', '0011_auto_20200525_2302'),
]
operations = [
migrations.AlterModelOptions(
name='location',
options={'verbose_name_plural': 'categories'},
),
migrations.RemoveField(
model_name='location',
name='locs',
),
migrations.AddField(
model_name='location',
name='cate',
field=models.CharField(choices=[('Flowers', 'Flowers'), ('Places', 'Places'), ('Animals', 'Animals'), ('People', 'People')], default='True', max_length=255),
),
]
| 1.703125 | 2 |
PyRAI2MD/Quantum_Chemistry/qc_bagel.py | lijingbai2009/PyRAI2MD | 1 | 12757882 | <reponame>lijingbai2009/PyRAI2MD<filename>PyRAI2MD/Quantum_Chemistry/qc_bagel.py
######################################################
#
# PyRAI2MD 2 module for BAGEL interface
#
# Author <NAME>
# Sep 20 2021
#
######################################################
import os, subprocess, shutil, json
import numpy as np
from PyRAI2MD.Utils.coordinates import S2F
class BAGEL:
""" BEGEL single point calculation interface
Parameters: Type:
keywords dict keywords dict
id int calculation index
Attribute: Type:
natom int number of atoms.
nstate int number of electronic state
nnac int number of non-adiabatic couplings
nac_coupling list non-adibatic coupling pairs
state int current state
activestate int only compute gradient for current state
keep_tmp int keep the BAGEL calculation folders (1) or not (0).
verbose int print level.
project str calculation name.
workdir str calculation folder.
bagel str BAGEL executable folder
nproc int number of CPUs for parallelization
mpi str path to mpi library
blas str path to blas library
lapack str path to lapack library
boost str path to boost library
mkl str path to mkl library
arch str CPU architecture
threads int number of threads for OMP parallelization.
use_hpc int use HPC (1) for calculation or not(0), like SLURM.
use_mpi int use MPI (1) for calculation or not(0).
Functions: Returns:
train self fake function
load self fake function
appendix self fake function
evaluate self run single point calculation
"""
def __init__(self, keywords = None, id = None, runtype = 'qm'):
self.runtype = runtype
self.nstate = 0
self.nnac = 0
self.nac_coupling = []
self.state = 0
self.activestate = 0
variables = keywords['bagel']
self.keep_tmp = variables['keep_tmp']
self.verbose = variables['verbose']
self.project = variables['bagel_project']
self.workdir = variables['bagel_workdir']
self.archive = variables['bagel_archive']
self.bagel = variables['bagel']
self.nproc = variables['bagel_nproc']
self.mpi = variables['mpi']
self.blas = variables['blas']
self.lapack = variables['lapack']
self.boost = variables['boost']
self.mkl = variables['mkl']
self.arch = variables['arch']
self.threads = variables['omp_num_threads']
self.use_mpi = variables['use_mpi']
self.use_hpc = variables['use_hpc']
## check calculation folder
## add index when running in adaptive sampling
if id != None:
self.workdir = '%s/tmp_BAGEL-%s' % (self.workdir, id)
elif id == 'Read':
self.workdir = self.workdir
else:
self.workdir = '%s/tmp_BAGEL' % (self.workdir)
## initialize runscript
self.runscript = """
export BAGEL_PROJECT=%s
export BAGEL=%s
export BLAS=%s
export LAPACK=%s
export BOOST=%s
export MPI=%s
export BAGEL_WORKDIR=%s
export OMP_NUM_THREADS=%s
export MKL_NUM_THREADS=%s
export BAGEL_NUM_THREADS=%s
export MV2_ENABLE_AFFINITY=0
export LD_LIBRARY_PATH=$MPI/lib:$BAGEL/lib:$BALS:$LAPACK:$BOOST/lib:$LD_LIBRARY_PATH
export PATH=$MPI/bin:$PATH
source %s %s
cd $BAGEL_WORKDIR
""" % ( self.project,
self.bagel,
self.blas,
self.lapack,
self.boost,
self.mpi,
self.workdir,
self.threads,
self.threads,
self.threads,
self.mkl,
self.arch)
if self.use_mpi == 0:
self.runscript += '$BAGEL/bin/BAGEL $BAGEL_WORKDIR/$BAGEL_PROJECT.json > $BAGEL_WORKDIR/$BAGEL_PROJECT.log\n'
else:
self.runscript += 'mpirun -np $SLURM_NTASKS $BAGEL/bin/BAGEL $BAGEL_WORKDIR/$BAGEL_PROJECT.json > $BAGEL_WORKDIR/$BAGEL_PROJECT.log\n'
def _setup_hpc(self):
## setup calculation using HPC
## read slurm template from .slurm files
if os.path.exists('%s.slurm' % (self.project)) == True:
with open('%s.slurm' % (self.project)) as template:
submission = template.read()
else:
sys.exit('\n FileNotFoundError\n BAGEL: looking for submission file %s.slurm' % (self.project))
submission += '\n%s' % (self.runscript)
with open('%s/%s.sbatch' % (self.workdir, self.project), 'w') as out:
out.write(submission)
def _setup_bagel(self, x):
## make calculation folder and input file
if os.path.exists(self.workdir) == False:
os.makedirs(self.workdir)
## prepare .json .archive files
self._write_coord(x)
## save .archive file
if os.path.exists('%s.archive' % (self.project)) == False:
sys.exit('\n FileNotFoundError\n BAGEL: looking for orbital %s.archive' % (self.project))
if self.archive == 'default':
self.archive = self.project
if os.path.exists('%s/%s.archive' % (self.workdir, self.archive)) == False:
shutil.copy2('%s.archive' % (self.project), '%s/%s.archive' % (self.workdir, self.archive))
## clean calculation folder
os.system("rm %s/ENERGY*.out > /dev/null 2>&1" % (self.workdir))
os.system("rm %s/FORCE_*.out > /dev/null 2>&1" % (self.workdir))
os.system("rm %s/NACME_*.out > /dev/null 2>&1" % (self.workdir))
## write run script
with open('%s/%s.sh' % (self.workdir, self.project), 'w') as out:
out.write(self.runscript)
## setup HPC settings
if self.use_hpc == 1:
self._setup_hpc()
def _write_coord(self, x):
## write coordinate file
## convert xyz from array to bagel format (Bohr)
natom = len(x)
a2b=1/0.529177249 # angstrom to bohr
jxyz=[]
for n, line in enumerate(x):
e, x, y, z = line
jxyz.append({"atom" : e, "xyz" : [float(x) * a2b, float(y) * a2b, float(z) * a2b]})
## Read input template from current directory
with open('%s.bagel' % (self.project), 'r') as template:
input = json.load(template)
si_input = input.copy()
si_input['bagel'][0]['geometry'] = jxyz
## default is to use template force setting, replace with the current state if requested
if self.activestate == 1:
si_input['bagel'][2]['grads'] = [{'title': 'force', 'target': self.state - 1}]
## save xyz file
with open('%s/%s.json' % (self.workdir, self.project), 'w') as out:
json.dump(si_input, out)
def _run_bagel(self):
## run BAGEL calculation
maindir = os.getcwd()
os.chdir(self.workdir)
if self.use_hpc == 1:
subprocess.run(['sbatch', '-W', '%s/%s.sbatch' % (self.workdir, self.project)])
else:
subprocess.run(['bash', '%s/%s.sh' % (self.workdir, self.project)])
os.chdir(maindir)
def _read_data(self, natom):
## read BAGEL logfile and pack data
if os.path.exists('%s/%s.log' % (self.workdir, self.project)) == False:
return [], np.zeros(1), np.zeros(1), np.zeros(1), np.zeros(1)
with open('%s/%s.log' % (self.workdir, self.project), 'r') as out:
log = out.read().splitlines()
coord = []
for line in log:
if '"atom"' in line:
line=line.replace(',', ' ').replace('"', ' ').split()
coord.append(line[3: 4] + [float(i) * 0.529177 for i in line[7: 10]])
coord = coord[:natom]
## pack energy, only includes the requested states by self.nstate
energy = []
if os.path.exists('%s/ENERGY.out' % (self.workdir)) == True:
energy = np.loadtxt('%s/ENERGY.out' % (self.workdir))[0: self.nstate]
## pack force
gradient = []
for i in range(self.nstate):
if os.path.exists('%s/FORCE_%s.out' % (self.workdir, i)) == True:
with open('%s/FORCE_%s.out' % (self.workdir, i)) as force:
g = force.read().splitlines()[1: natom + 1]
g = S2F(g)
else:
g = [[0, 0, 0] for x in range(natom)]
gradient.append(g)
gradient = np.array(gradient)
## pack nac
nac = []
for pair in self.nac_coupling:
pa, pb = pair
if os.path.exists('%s/NACME_%s_%s.out' % (self.workdir, pa, pb)) == True:
with open('%s/NACME_%s_%s.out' % (self.workdir, pa, pb)) as nacme:
n = nacme.read().splitlines()[1: natom + 1]
n = S2F(n)
nac.append(n)
nac = np.array(nac)
soc = np.zeros(0)
return coord, energy, gradient, nac, soc
def _qmmm(self, traj):
## run BAGEL for QMMM calculation
## create qmmm model
traj = traj.applyqmmm()
xyz = np.concatenate((traj.qm_atoms, traj.qm_coord), axis=1)
nxyz = len(xyz)
## setup BAGEL calculation
self._setup_bagel(xyz)
## run BAGEL calculation
self._run_bagel()
## read BAGEL output files
coord, energy, gradient, nac, soc = self._read_data(nxyz)
## project force and coupling
jacob = traj.Hcap_jacob
gradient = np.array([np.dot(x, jacob) for x in gradient])
nac = np.array([np.dot(x, jacob) for x in nac])
return energy, gradient, nac
def _qm(self, traj):
## run BAGEL for QM calculation
xyz = np.concatenate((traj.atoms, traj.coord), axis=1)
nxyz = len(xyz)
## setup BAGEL calculation
self._setup_bagel(xyz)
## run BAGEL calculation
self._run_bagel()
## read BAGEL output files
coord, energy, gradient, nac, soc = self._read_data(nxyz)
return energy, gradient, nac
def appendix(self, addons):
## fake function
return self
def evaluate(self, traj):
## main function to run BAGEL calculation and communicate with other PyRAIMD modules
## load trajectory info
self.nstate = traj.nstate
self.nnac = traj.nnac
self.nac_coupling = traj.nac_coupling
self.state = traj.state
self.activestate = traj.activestate
## compute properties
energy = []
gradient = []
nac = []
completion = 0
if self.runtype == 'qm':
energy, gradient, nac = self._qm(traj)
elif self.runtype == 'qmmm':
energy, gradient, nac = self._qmmm(traj)
if len(energy) >= self.nstate and len(gradient) >= self.nstate and len(nac) >= self.nnac:
completion = 1
## clean up
if self.keep_tmp == 0:
shutil.rmtree(self.workdir)
# update trajectory
traj.energy = np.copy(energy)
traj.grad = np.copy(gradient)
traj.nac = np.copy(nac)
traj.soc = np.zeros(0)
traj.err_energy = None
traj.err_grad = None
traj.err_nac = None
traj.err_soc = None
traj.status = completion
return traj
def train(self):
## fake function
return self
def load(self):
## fake function
return self
| 1.734375 | 2 |
methcomp/glucose.py | wptmdoorn/methcomp | 3 | 12757883 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from shapely.geometry import Point, Polygon
try:
import importlib.resources as pkg_resources
from importlib.resources import path
def path_func(pkg, file):
with path(pkg, file) as p:
return p
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources # type: ignore
def path_func(pkg, file):
return pkg_resources.files(pkg).joinpath(file)
__all__ = ["clarke", "parkes", "seg", "clarkezones", "parkeszones", "segscores"]
class _Clarke(object):
"""Internal class for drawing a Clarke-Error grid plotting"""
def __init__(
self,
reference,
test,
units,
x_title,
y_title,
graph_title,
xlim,
ylim,
color_grid,
color_gridlabels,
color_points,
grid,
percentage,
point_kws,
grid_kws,
):
# variables assignment
self.reference: np.array = np.asarray(reference)
self.test: np.array = np.asarray(test)
self.units = units
self.graph_title: str = graph_title
self.x_title: str = x_title
self.y_title: str = y_title
self.xlim: list = xlim
self.ylim: list = ylim
self.color_grid: str = color_grid
self.color_gridlabels: str = color_gridlabels
self.color_points: str = color_points
self.grid: bool = grid
self.percentage: bool = percentage
self.point_kws = {} if point_kws is None else point_kws.copy()
self.grid_kws = {} if grid_kws is None else grid_kws.copy()
self._check_params()
self._derive_params()
def _check_params(self):
if len(self.reference) != len(self.test):
raise ValueError("Length of reference and test values are not equal")
if self.units not in ["mmol", "mg/dl", "mgdl"]:
raise ValueError(
"The provided units should be one of the following: "
"mmol, mgdl or mg/dl."
)
if any(
[
x is not None and not isinstance(x, str)
for x in [self.x_title, self.y_title]
]
):
raise ValueError("Axes labels arguments should be provided as a str.")
def _derive_params(self):
if self.x_title is None:
_unit = "mmol/L" if "mmol" else "mg/dL"
self.x_title = "Reference glucose concentration ({})".format(_unit)
if self.y_title is None:
_unit = "mmol/L" if "mmol" else "mg/dL"
self.y_title = "Predicted glucose concentration ({})".format(_unit)
self.xlim = self.xlim or [0, 400]
self.ylim = self.ylim or [0, 400]
def _calc_error_zone(self):
# ref, pred
ref = self.reference
pred = self.test
# calculate conversion factor if needed
n = 18 if self.units == "mmol" else 1
# we initialize an array with ones
# this in fact very smart because all the non-matching values will automatically
# end up in zone B (which is 1)!
_zones = np.ones(len(ref))
# absolute relative error = abs(bias)/reference*100
bias = pred - ref
are = abs(bias) / ref * 100
eq1 = (7 / 5) * (ref - 130 / n)
eq2 = ref + 110 / n
# zone E: (ref <= 70 and test >= 180) or (ref >=180 and test <=70)
zone_e = ((ref <= 70 / n) & (pred >= 180 / n)) | (
(ref >= 180 / n) & (pred <= 70 / n)
)
_zones[zone_e] = 4
# zone D: ref < 70 and (test > 70 and test < 180) or
# ref > 240 and (test > 70 and test < 180)
test_d = (pred >= 70 / n) & (
pred < 180 / n
) # error corrected >=70 instead of >70
zone_d = ((ref < 70 / n) & test_d) | ((ref > 240 / n) & test_d)
_zones[zone_d] = 3
# zone C: (ref >= 130 and ref <= 180 and test < eq1) or
# (ref > 70 and ref > 180 and ref > eq2)
zone_c = ((ref >= 130 / n) & (ref <= 180 / n) & (pred < eq1)) | (
(ref > 70 / n) & (pred > 180 / n) & (pred > eq2)
)
_zones[zone_c] = 2
# zone A: are <= 20 or (ref < 58.3 and test < 70)
zone_a = (are <= 20) | ((ref < 70 / n) & (pred < 70 / n))
_zones[zone_a] = 0
return [int(i) for i in _zones]
def plot(self, ax):
_gridlines = [
([0, 400], [0, 400], ":"),
([0, 175 / 3], [70, 70], "-"),
([175 / 3, 400 / 1.2], [70, 400], "-"),
([70, 70], [84, 400], "-"),
([0, 70], [180, 180], "-"),
([70, 290], [180, 400], "-"),
([70, 70], [0, 56], "-"),
([70, 400], [56, 320], "-"),
([180, 180], [0, 70], "-"),
([180, 400], [70, 70], "-"),
([240, 240], [70, 180], "-"),
([240, 400], [180, 180], "-"),
([130, 180], [0, 70], "-"),
]
colors = ["#196600", "#7FFF00", "#FF7B00", "#FF5700", "#FF0000"]
_gridlabels = [
(30, 15, "A", colors[0]),
(370, 260, "B", colors[1]),
(280, 370, "B", colors[1]),
(160, 370, "C", colors[2]),
(160, 15, "C", colors[2]),
(30, 140, "D", colors[3]),
(370, 120, "D", colors[3]),
(30, 370, "E", colors[4]),
(370, 15, "E", colors[4]),
]
# calculate conversion factor if needed
n = 18 if self.units == "mmol" else 1
# plot individual points
if self.color_points == "auto":
ax.scatter(
self.reference,
self.test,
marker="o",
alpha=0.6,
c=[colors[i] for i in self._calc_error_zone()],
s=8,
**self.point_kws
)
else:
ax.scatter(
self.reference,
self.test,
marker="o",
color=self.color_points,
alpha=0.6,
s=8,
**self.point_kws
)
# plot grid lines
if self.grid:
for g in _gridlines:
ax.plot(
np.array(g[0]) / n,
np.array(g[1]) / n,
g[2],
color=self.color_grid,
**self.grid_kws
)
if self.percentage:
zones = [["A", "B", "C", "D", "E"][i] for i in self._calc_error_zone()]
for label in _gridlabels:
ax.text(
label[0] / n,
label[1] / n,
label[2],
fontsize=12,
fontweight="bold",
color=label[3]
if self.color_gridlabels == "auto"
else self.color_gridlabels,
)
ax.text(
label[0] / n + (8 / n),
label[1] / n + (8 / n),
"{:.1f}".format((zones.count(label[2]) / len(zones)) * 100),
fontsize=9,
fontweight="bold",
color=label[3]
if self.color_gridlabels == "auto"
else self.color_gridlabels,
)
else:
for label in _gridlabels:
ax.text(
label[0] / n,
label[1] / n,
label[2],
fontsize=12,
fontweight="bold",
color=label[3]
if self.color_gridlabels == "auto"
else self.color_gridlabels,
)
# limits and ticks
ax.set_xlim(self.xlim[0] / n, self.xlim[1] / n)
ax.set_ylim(self.ylim[0] / n, self.ylim[1] / n)
# graph labels
ax.set_ylabel(self.y_title)
ax.set_xlabel(self.x_title)
if self.graph_title is not None:
ax.set_title(self.graph_title)
def clarke(
reference,
test,
units,
x_label=None,
y_label=None,
title=None,
xlim=None,
ylim=None,
color_grid="#000000",
color_gridlabels="auto",
color_points="auto",
grid=True,
percentage=False,
point_kws=None,
grid_kws=None,
square=False,
ax=None,
):
"""Provide a glucose error grid analyses as designed by Clarke.
This is an Axis-level function which will draw the Clarke-error grid plot.
onto the current active Axis object unless ``ax`` is provided.
Parameters
----------
reference, test : array, or list
Glucose values obtained from the reference and predicted methods, preferably
provided in a np.array.
units : str
The SI units which the glucose values are provided in.
Options: 'mmol', 'mgdl' or 'mg/dl'.
x_label : str, optional
The label which is added to the X-axis. If None is provided, a standard
label will be added.
y_label : str, optional
The label which is added to the Y-axis. If None is provided, a standard
label will be added.
title : str, optional
Title of the Clarke error grid plot. If None is provided, no title will
be plotted.
xlim : list, optional
Minimum and maximum limits for X-axis. Should be provided as list or tuple.
If not set, matplotlib will decide its own bounds.
ylim : list, optional
Minimum and maximum limits for Y-axis. Should be provided as list or tuple.
If not set, matplotlib will decide its own bounds.
color_grid : str, optional
Color of the Clarke error grid lines.
color_gridlabels : str, optional
Color of the gridlabels (A, B, C, ..) that will be plotted. If set to 'auto',
it will plot the points according to their zones.
color_points : str, optional
Color of the individual differences that will be plotted. If set to 'auto',
it will plot the points according to their zones.
grid : bool, optional
Enable the grid lines of the Parkes error. Defaults to True.
percentage : bool, optional
If True, percentage of the zones will be depicted in the plot.
point_kws : dict of key, value mappings, optional
Additional keyword arguments for `plt.scatter`.
grid_kws : dict of key, value mappings, optional
Additional keyword arguments for the grid with `plt.plot`.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be square-shaped.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
Returns
-------
ax : matplotlib Axes
Axes object with the Clarke-error grid plot.
See Also
-------
<NAME>., <NAME>., et al. Diabetes Care, vol. 10, no. 5, 1987, pp. 622–628.
"""
plotter: _Clarke = _Clarke(
reference,
test,
units,
x_label,
y_label,
title,
xlim,
ylim,
color_grid,
color_gridlabels,
color_points,
grid,
percentage,
point_kws,
grid_kws,
)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax)
return ax
def clarkezones(reference, test, units, numeric=False):
"""Provides the error zones as depicted by the
Clarke error grid analysis for each point in the reference and test datasets.
Parameters
----------
reference, test : array, or list
Glucose values obtained from the reference and predicted methods, preferably
provided in a np.array.
units : str
The SI units which the glucose values are provided in.
Options: 'mmol', 'mgdl' or 'mg/dl'.
numeric : bool, optional
If this is set to true, returns integers (0 to 4) instead of characters for
each of the zones.
Returns
-------
clarkezones : list
Returns a list depecting the zones for each of the reference and test values.
"""
# obtain zones from a Clarke reference object
_zones = _Clarke(
reference,
test,
units,
None,
None,
None,
None,
None,
"#000000",
"auto",
"auto",
True,
False,
None,
None,
)._calc_error_zone()
if numeric:
return _zones
else:
labels = ["A", "B", "C", "D", "E"]
return [labels[i] for i in _zones]
class _Parkes(object):
"""Internal class for drawing a Parkes consensus error grid plot"""
def __init__(
self,
type,
reference,
test,
units,
x_title,
y_title,
graph_title,
xlim,
ylim,
color_grid,
color_gridlabels,
color_points,
grid,
percentage,
point_kws,
grid_kws,
):
# variables assignment
self.type: int = type
self.reference: np.array = np.asarray(reference)
self.test: np.array = np.asarray(test)
self.units = units
self.graph_title: str = graph_title
self.x_title: str = x_title
self.y_title: str = y_title
self.xlim: list = xlim
self.ylim: list = ylim
self.color_grid: str = color_grid
self.color_gridlabels: str = color_gridlabels
self.color_points: str = color_points
self.grid: bool = grid
self.percentage: bool = percentage
self.point_kws = {} if point_kws is None else point_kws.copy()
self.grid_kws = {} if grid_kws is None else grid_kws.copy()
self._check_params()
self._derive_params()
def _check_params(self):
if self.type != 1 and self.type != 2:
raise ValueError("Type of Diabetes should either be 1 or 2.")
if len(self.reference) != len(self.test):
raise ValueError("Length of reference and test values are not equal")
if self.units not in ["mmol", "mg/dl", "mgdl"]:
raise ValueError(
"The provided units should be one of the following:"
" mmol, mgdl or mg/dl."
)
if any(
[
x is not None and not isinstance(x, str)
for x in [self.x_title, self.y_title]
]
):
raise ValueError("Axes labels arguments should be provided as a str.")
def _derive_params(self):
if self.x_title is None:
_unit = "mmol/L" if "mmol" else "mg/dL"
self.x_title = "Reference glucose concentration ({})".format(_unit)
if self.y_title is None:
_unit = "mmol/L" if "mmol" else "mg/dL"
self.y_title = "Predicted glucose concentration ({})".format(_unit)
def _coef(self, x, y, xend, yend):
if xend == x:
raise ValueError("Vertical line - function inapplicable")
return (yend - y) / (xend - x)
def _endy(self, startx, starty, maxx, coef):
return (maxx - startx) * coef + starty
def _endx(self, startx, starty, maxy, coef):
return (maxy - starty) / coef + startx
def _calc_error_zone(self):
# ref, pred
ref = self.reference
pred = self.test
# calculate conversion factor if needed
n = 18 if self.units == "mmol" else 1
maxX = max(max(ref) + 20 / n, 550 / n)
maxY = max([*(np.array(pred) + 20 / n), maxX, 550 / n])
# we initialize an array with ones
# this in fact very smart because all the non-matching values will automatically
# end up in zone A (which is zero)
_zones = np.zeros(len(ref))
if self.type == 1:
ce = self._coef(35, 155, 50, 550)
cdu = self._coef(80, 215, 125, 550)
cdl = self._coef(250, 40, 550, 150)
ccu = self._coef(70, 110, 260, 550)
ccl = self._coef(260, 130, 550, 250)
cbu = self._coef(280, 380, 430, 550)
cbl = self._coef(385, 300, 550, 450)
limitE1 = Polygon(
[
(x, y)
for x, y in zip(
[0, 35 / n, self._endx(35 / n, 155 / n, maxY, ce), 0, 0],
[150 / n, 155 / n, maxY, maxY, 150 / n],
)
]
)
limitD1L = Polygon(
[
(x, y)
for x, y in zip(
[250 / n, 250 / n, maxX, maxX, 250 / n],
[0, 40 / n, self._endy(410 / n, 110 / n, maxX, cdl), 0, 0],
)
]
)
limitD1U = Polygon(
[
(x, y)
for x, y in zip(
[
0,
25 / n,
50 / n,
80 / n,
self._endx(80 / n, 215 / n, maxY, cdu),
0,
0,
],
[100 / n, 100 / n, 125 / n, 215 / n, maxY, maxY, 100 / n],
)
]
)
limitC1L = Polygon(
[
(x, y)
for x, y in zip(
[120 / n, 120 / n, 260 / n, maxX, maxX, 120 / n],
[
0,
30 / n,
130 / n,
self._endy(260 / n, 130 / n, maxX, ccl),
0,
0,
],
)
]
)
limitC1U = Polygon(
[
(x, y)
for x, y in zip(
[
0,
30 / n,
50 / n,
70 / n,
self._endx(70 / n, 110 / n, maxY, ccu),
0,
0,
],
[60 / n, 60 / n, 80 / n, 110 / n, maxY, maxY, 60 / n],
)
]
)
limitB1L = Polygon(
[
(x, y)
for x, y in zip(
[50 / n, 50 / n, 170 / n, 385 / n, maxX, maxX, 50 / n],
[
0,
30 / n,
145 / n,
300 / n,
self._endy(385 / n, 300 / n, maxX, cbl),
0,
0,
],
)
]
)
limitB1U = Polygon(
[
(x, y)
for x, y in zip(
[
0,
30 / n,
140 / n,
280 / n,
self._endx(280 / n, 380 / n, maxY, cbu),
0,
0,
],
[50 / n, 50 / n, 170 / n, 380 / n, maxY, maxY, 50 / n],
)
]
)
for i, points in enumerate(zip(ref, pred)):
for f, r in zip(
[
limitB1L,
limitB1U,
limitC1L,
limitC1U,
limitD1L,
limitD1U,
limitE1,
],
[1, 1, 2, 2, 3, 3, 4],
):
if f.contains(Point(points[0], points[1])):
_zones[i] = r
return [int(i) for i in _zones]
elif self.type == 2:
ce = self._coef(35, 200, 50, 550)
cdu = self._coef(35, 90, 125, 550)
cdl = self._coef(410, 110, 550, 160)
ccu = self._coef(30, 60, 280, 550)
ccl = self._coef(260, 130, 550, 250)
cbu = self._coef(230, 330, 440, 550)
cbl = self._coef(330, 230, 550, 450)
limitE2 = Polygon(
[
(x, y)
for x, y in zip(
[
0,
35 / n,
self._endx(35 / n, 200 / n, maxY, ce),
0,
0,
], # x limits E upper
[200 / n, 200 / n, maxY, maxY, 200 / n],
)
]
) # y limits E upper
limitD2L = Polygon(
[
(x, y)
for x, y in zip(
[
250 / n,
250 / n,
410 / n,
maxX,
maxX,
250 / n,
], # x limits D lower
[
0,
40 / n,
110 / n,
self._endy(410 / n, 110 / n, maxX, cdl),
0,
0,
],
)
]
) # y limits D lower
limitD2U = Polygon(
[
(x, y)
for x, y in zip(
[
0,
25 / n,
35 / n,
self._endx(35 / n, 90 / n, maxY, cdu),
0,
0,
], # x limits D upper
[80 / n, 80 / n, 90 / n, maxY, maxY, 80 / n],
)
]
) # y limits D upper
limitC2L = Polygon(
[
(x, y)
for x, y in zip(
[90 / n, 260 / n, maxX, maxX, 90 / n], # x limits C lower
[0, 130 / n, self._endy(260 / n, 130 / n, maxX, ccl), 0, 0],
)
]
) # y limits C lower
limitC2U = Polygon(
[
(x, y)
for x, y in zip(
[
0,
30 / n,
self._endx(30 / n, 60 / n, maxY, ccu),
0,
0,
], # x limits C upper
[60 / n, 60 / n, maxY, maxY, 60 / n],
)
]
) # y limits C upper
limitB2L = Polygon(
[
(x, y)
for x, y in zip(
[
50 / n,
50 / n,
90 / n,
330 / n,
maxX,
maxX,
50 / n,
], # x limits B lower
[
0,
30 / n,
80 / n,
230 / n,
self._endy(330 / n, 230 / n, maxX, cbl),
0,
0,
],
)
]
) # y limits B lower
limitB2U = Polygon(
[
(x, y)
for x, y in zip(
[
0,
30 / n,
230 / n,
self._endx(230 / n, 330 / n, maxY, cbu),
0,
0,
], # x limits B upper
[50 / n, 50 / n, 330 / n, maxY, maxY, 50 / n],
)
]
) # y limits B upper
for i, points in enumerate(zip(ref, pred)):
for f, r in zip(
[
limitB2L,
limitB2U,
limitC2L,
limitC2U,
limitD2L,
limitD2U,
limitE2,
],
[1, 1, 2, 2, 3, 3, 4],
):
if f.contains(Point(points[0], points[1])):
_zones[i] = r
return [int(i) for i in _zones]
def plot(self, ax):
# ref, pred
ref = self.reference
pred = self.test
# calculate conversion factor if needed
n = 18 if self.units == "mmol" else 1
maxX = self.xlim or max(max(ref) + 20 / n, 550 / n)
maxY = self.ylim or max([*(np.array(pred) + 20 / n), maxX, 550 / n])
if self.type == 1:
ce = self._coef(35, 155, 50, 550)
cdu = self._coef(80, 215, 125, 550)
cdl = self._coef(250, 40, 550, 150)
ccu = self._coef(70, 110, 260, 550)
ccl = self._coef(260, 130, 550, 250)
cbu = self._coef(280, 380, 430, 550)
cbl = self._coef(385, 300, 550, 450)
_gridlines = [
([0, min(maxX, maxY)], [0, min(maxX, maxY)], ":"),
([0, 30 / n], [50 / n, 50 / n], "-"),
([30 / n, 140 / n], [50 / n, 170 / n], "-"),
([140 / n, 280 / n], [170 / n, 380 / n], "-"),
(
[280 / n, self._endx(280 / n, 380 / n, maxY, cbu)],
[380 / n, maxY],
"-",
),
([50 / n, 50 / n], [0 / n, 30 / n], "-"),
([50 / n, 170 / n], [30 / n, 145 / n], "-"),
([170 / n, 385 / n], [145 / n, 300 / n], "-"),
(
[385 / n, maxX],
[300 / n, self._endy(385 / n, 300 / n, maxX, cbl)],
"-",
),
([0 / n, 30 / n], [60 / n, 60 / n], "-"),
([30 / n, 50 / n], [60 / n, 80 / n], "-"),
([50 / n, 70 / n], [80 / n, 110 / n], "-"),
(
[70 / n, self._endx(70 / n, 110 / n, maxY, ccu)],
[110 / n, maxY],
"-",
),
([120 / n, 120 / n], [0 / n, 30 / n], "-"),
([120 / n, 260 / n], [30 / n, 130 / n], "-"),
(
[260 / n, maxX],
[130 / n, self._endy(260 / n, 130 / n, maxX, ccl)],
"-",
),
([0 / n, 25 / n], [100 / n, 100 / n], "-"),
([25 / n, 50 / n], [100 / n, 125 / n], "-"),
([50 / n, 80 / n], [125 / n, 215 / n], "-"),
(
[80 / n, self._endx(80 / n, 215 / n, maxY, cdu)],
[215 / n, maxY],
"-",
),
([250 / n, 250 / n], [0 / n, 40 / n], "-"),
(
[250 / n, maxX],
[40 / n, self._endy(410 / n, 110 / n, maxX, cdl)],
"-",
),
([0 / n, 35 / n], [150 / n, 155 / n], "-"),
([35 / n, self._endx(35 / n, 155 / n, maxY, ce)], [155 / n, maxY], "-"),
]
elif self.type == 2:
ce = self._coef(35, 200, 50, 550)
cdu = self._coef(35, 90, 125, 550)
cdl = self._coef(410, 110, 550, 160)
ccu = self._coef(30, 60, 280, 550)
ccl = self._coef(260, 130, 550, 250)
cbu = self._coef(230, 330, 440, 550)
cbl = self._coef(330, 230, 550, 450)
_gridlines = [
([0, min(maxX, maxY)], [0, min(maxX, maxY)], ":"),
([0, 30 / n], [50 / n, 50 / n], "-"),
([30 / n, 230 / n], [50 / n, 330 / n], "-"),
(
[230 / n, self._endx(230 / n, 330 / n, maxY, cbu)],
[330 / n, maxY],
"-",
),
([50 / n, 50 / n], [0 / n, 30 / n], "-"),
([50 / n, 90 / n], [30 / n, 80 / n], "-"),
([90 / n, 330 / n], [80 / n, 230 / n], "-"),
(
[330 / n, maxX],
[230 / n, self._endy(330 / n, 230 / n, maxX, cbl)],
"-",
),
([0 / n, 30 / n], [60 / n, 60 / n], "-"),
([30 / n, self._endx(30 / n, 60 / n, maxY, ccu)], [60 / n, maxY], "-"),
([90 / n, 260 / n], [0 / n, 130 / n], "-"),
(
[260 / n, maxX],
[130 / n, self._endy(260 / n, 130 / n, maxX, ccl)],
"-",
),
([0 / n, 25 / n], [80 / n, 80 / n], "-"),
([25 / n, 35 / n], [80 / n, 90 / n], "-"),
([35 / n, self._endx(35 / n, 90 / n, maxY, cdu)], [90 / n, maxY], "-"),
([250 / n, 250 / n], [0 / n, 40 / n], "-"),
([250 / n, 410 / n], [40 / n, 110 / n], "-"),
(
[410 / n, maxX],
[110 / n, self._endy(410 / n, 110 / n, maxX, cdl)],
"-",
),
([0 / n, 35 / n], [200 / n, 200 / n], "-"),
([35 / n, self._endx(35 / n, 200 / n, maxY, ce)], [200 / n, maxY], "-"),
]
colors = ["#196600", "#7FFF00", "#FF7B00", "#FF5700", "#FF0000"]
_gridlabels = [
(600, 600, "A", colors[0]),
(360, 600, "B", colors[1]),
(600, 355, "B", colors[1]),
(165, 600, "C", colors[2]),
(600, 215, "C", colors[2]),
(600, 50, "D", colors[3]),
(75, 600, "D", colors[3]),
(5, 600, "E", colors[4]),
]
# plot individual points
if self.color_points == "auto":
ax.scatter(
self.reference,
self.test,
marker="o",
alpha=0.6,
c=[colors[i] for i in self._calc_error_zone()],
s=8,
**self.point_kws
)
else:
ax.scatter(
self.reference,
self.test,
marker="o",
color=self.color_points,
alpha=0.6,
s=8,
**self.point_kws
)
# plot grid lines
if self.grid:
for g in _gridlines:
ax.plot(
np.array(g[0]),
np.array(g[1]),
g[2],
color=self.color_grid,
**self.grid_kws
)
if self.percentage:
zones = [["A", "B", "C", "D", "E"][i] for i in self._calc_error_zone()]
for label in _gridlabels:
ax.text(
label[0] / n,
label[1] / n,
label[2],
fontsize=12,
fontweight="bold",
color=label[3]
if self.color_gridlabels == "auto"
else self.color_gridlabels,
)
ax.text(
label[0] / n + (18 / n),
label[1] / n + (18 / n),
"{:.1f}".format((zones.count(label[2]) / len(zones)) * 100),
fontsize=9,
fontweight="bold",
color=label[3]
if self.color_gridlabels == "auto"
else self.color_gridlabels,
)
else:
for label in _gridlabels:
ax.text(
label[0] / n,
label[1] / n,
label[2],
fontsize=12,
fontweight="bold",
color=label[3]
if self.color_gridlabels == "auto"
else self.color_gridlabels,
)
# limits and ticks
_ticks = [
70,
100,
150,
180,
240,
300,
350,
400,
450,
500,
550,
600,
650,
700,
750,
800,
850,
900,
950,
1000,
]
ax.set_xticks([round(x / n, 1) for x in _ticks])
ax.set_yticks([round(x / n, 1) for x in _ticks])
ax.set_xlim(0, maxX)
ax.set_ylim(0, maxY)
# graph labels
ax.set_ylabel(self.y_title)
ax.set_xlabel(self.x_title)
if self.graph_title is not None:
ax.set_title(self.graph_title)
def parkes(
type,
reference,
test,
units,
x_label=None,
y_label=None,
title=None,
xlim=None,
ylim=None,
color_grid="#000000",
color_gridlabels="auto",
color_points="auto",
grid=True,
percentage=False,
point_kws=None,
grid_kws=None,
square=False,
ax=None,
):
"""Provide a glucose error grid analyses as designed by Parkes.
This is an Axis-level function which will draw the Parke-error grid plot.
onto the current active Axis object unless ``ax`` is provided.
Parameters
----------
type : int
Parkes error grid differ for each type of diabetes. This should be either
1 or 2 corresponding to the type of diabetes.
reference, test : array, or list
Glucose values obtained from the reference and predicted methods, preferably
provided in a np.array.
units : str
The SI units which the glucose values are provided in.
Options: 'mmol', 'mgdl' or 'mg/dl'.
x_label : str, optional
The label which is added to the X-axis. If None is provided, a standard
label will be added.
y_label : str, optional
The label which is added to the Y-axis. If None is provided, a standard
label will be added.
title : str, optional
Title of the Parkes-error grid plot. If None is provided, no title will be
plotted.
xlim : list, optional
Minimum and maximum limits for X-axis. Should be provided as list or tuple.
If not set, matplotlib will decide its own bounds.
ylim : list, optional
Minimum and maximum limits for Y-axis. Should be provided as list or tuple.
If not set, matplotlib will decide its own bounds.
color_grid : str, optional
Color of the Clarke error grid lines. Defaults to #000000 which represents
the black color.
color_gridlabels : str, optional
Color of the grid labels (A, B, C, ..) that will be plotted.
Defaults to 'auto' which colors the points according to their relative zones.
color_points : str, optional
Color of the individual differences that will be plotted. Defaults to 'auto'
which colors the points according to their relative zones.
grid : bool, optional
Enable the grid lines of the Parkes error. Defaults to True.
percentage : bool, optional
If True, percentage of the zones will be depicted in the plot.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be square-shaped.
point_kws : dict of key, value mappings, optional
Additional keyword arguments for `plt.scatter`.
grid_kws : dict of key, value mappings, optional
Additional keyword arguments for the grid with `plt.plot`.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
Returns
-------
ax : matplotlib Axes
Axes object with the Parkes error grid plot.
References
----------
[parkes_2000] <NAME>., <NAME>. et al.
Diabetes Care, vol. 23, no. 8, 2000, pp. 1143-1148.
[pfutzner_2013] <NAME>., <NAME>., et al.
J Diabetes Sci Technol, vol. 7, no. 5, 2013, pp. 1275-1281.
"""
plotter: _Parkes = _Parkes(
type,
reference,
test,
units,
x_label,
y_label,
title,
xlim,
ylim,
color_grid,
color_gridlabels,
color_points,
grid,
percentage,
point_kws,
grid_kws,
)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax)
return ax
def parkeszones(type, reference, test, units, numeric=False):
"""Provides the error zones as depicted by the
Parkes error grid analysis for each point in the reference and test datasets.
Parameters
----------
type : int
Parkes error grid differ for each type of diabetes. This should be either
1 or 2 corresponding to the type of diabetes.
reference, test : array, or list
Glucose values obtained from the reference and predicted methods,
preferably provided in a np.array.
units : str
The SI units which the glucose values are provided in.
Options: 'mmol', 'mgdl' or 'mg/dl'.
numeric : bool, optional
If this is set to true, returns integers (0 to 4) instead of characters
for each of the zones.
Returns
-------
parkeszones : list
Returns a list depicting the zones for each of the reference and test values.
"""
# obtain zones from a Clarke reference object
_zones = _Parkes(
type,
reference,
test,
units,
None,
None,
None,
None,
None,
True,
False,
"#000000",
"auto",
"auto",
None,
None,
)._calc_error_zone()
if numeric:
return _zones
else:
labels = ["A", "B", "C", "D", "E"]
return [labels[i] for i in _zones]
class _SEG(object):
"""Internal class for drawing a surveillance error grid error grid plot"""
def __init__(
self,
reference,
test,
units,
x_title,
y_title,
graph_title,
xlim,
ylim,
percentage,
point_kws,
):
# variables assignment
self.reference: np.array = np.asarray(reference)
self.test: np.array = np.asarray(test)
self.units = units
self.graph_title: str = graph_title
self.x_title: str = x_title
self.y_title: str = y_title
self.xlim: list = xlim
self.ylim: list = ylim
self.percentage: bool = percentage
self.point_kws = {} if point_kws is None else point_kws.copy()
self._check_params()
self._derive_params()
def _check_params(self):
if len(self.reference) != len(self.test):
raise ValueError("Length of reference and test values are not equal")
if self.units not in ["mmol", "mg/dl", "mgdl"]:
raise ValueError(
"The provided units should be one of the following: "
"mmol, mgdl or mg/dl."
)
if any(
[
x is not None and not isinstance(x, str)
for x in [self.x_title, self.y_title]
]
):
raise ValueError("Axes labels arguments should be provided as a str.")
def _derive_params(self):
if self.x_title is None:
_unit = "mmol/L" if "mmol" else "mg/dL"
self.x_title = "Reference glucose concentration ({})".format(_unit)
if self.y_title is None:
_unit = "mmol/L" if "mmol" else "mg/dL"
self.y_title = "Predicted glucose concentration ({})".format(_unit)
def _calc_error_score(self):
n = 18 if self.units == "mmol" else 1
ref = self.reference * n
pred = self.test * n
_zones = []
from . import static # temporary fix
data = np.loadtxt(pkg_resources.open_text(static, "seg.csv"))
_zones = np.array([data.T[int(p), int(t)] for p, t in zip(pred, ref)])
return _zones
def plot(self, ax):
# ref, pred
# ref = self.reference NOT USED
# pred = self.test NOT USED
from . import static # temporary fix
# _data = np.loadtxt(pkg_resources.open_text(static, "seg.csv"))
# calculate conversion factor if needed
n = 18 if self.units == "mmol" else 1
maxX = self.xlim or 600
maxY = self.ylim or 600
# Define colormaps
_colors = [
(0, 165 / 256, 0),
(0, 255 / 256, 0),
(255 / 256, 255 / 256, 0),
(255 / 256, 0, 0),
(128 / 256, 0, 0),
]
_nodes = [0.0, 0.4375, 1.0625, 2.7500, 4.000]
cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
"", list(zip([x / 4 for x in _nodes], _colors))
)
# Plot color axes
# from . import static # temporary fix # ALREADY DONE
grid_path = str(path_func(static, "seg600.png"))
cax = ax.imshow(
np.flipud(np.array(plt.imread(grid_path))),
origin="lower",
cmap=cmap,
vmin=0,
vmax=4,
)
# Plot color bar
cbar = plt.colorbar(
cax,
ticks=[0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0],
orientation="vertical",
fraction=0.15,
aspect=6,
)
cbar.ax.tick_params(labelsize=8)
cbar.ax.yaxis.set_label_position("left")
cbar.ax.set_ylabel("Risk score")
# Separators
for s in [0, 0.5, 1.5, 2.5, 3.5, 4]:
cbar.ax.plot(
[6, 6.5], [s] * 2, "-", color="black", lw=1, alpha=1, clip_on=False
)
# Labels
for label in [
(0.25, "None"),
(1.0, "Slight"),
(2.0, "Moderate"),
(3.0, "High"),
(3.75, "Extreme"),
]:
cbar.ax.text(
6.2,
label[0] - 0.008,
label[1],
ha="left",
va="center",
rotation=0,
fontsize=10,
)
if self.percentage:
seg_scores = self._calc_error_score()
_zones_sub = [[] for _ in range(8)]
edges = list(np.arange(0, 4.5, 0.5))
for x in range(len(edges) - 1):
_zones_sub[x] = np.array(
seg_scores[(seg_scores >= edges[x]) & (seg_scores < edges[x + 1])]
)
perc_zones = [(len(x) / len(seg_scores)) * 100 for x in _zones_sub]
for i, x in enumerate(perc_zones):
cbar.ax.plot(
[0, 5], [(i * 0.5) + 0.5] * 2, "--", color="grey", lw=1, alpha=0.6
)
if x > 0:
if round(x, 2) == 0:
_str = "<0.01%"
else:
_str = "{:.2f}%".format(x)
cbar.ax.text(
2, (i * 0.5) + 0.25, _str, ha="center", va="center", fontsize=9
)
ax.scatter(
self.reference * n,
self.test * n,
marker="o",
edgecolors="black",
facecolors="white",
alpha=0.8,
s=8,
**self.point_kws
)
# limits and ticks
_ticks = [0, 90, 180, 270, 360, 450, 540]
ax.set_xticks([round(x, 1) for x in _ticks])
ax.set_yticks([round(x, 1) for x in _ticks])
ax.set_xticklabels([round(x / n, 1) for x in _ticks])
ax.set_yticklabels([round(x / n, 1) for x in _ticks])
ax.set_xlim(0, maxX)
ax.set_ylim(0, maxY)
# graph labels
ax.set_ylabel(self.y_title)
ax.set_xlabel(self.x_title)
if self.graph_title is not None:
ax.set_title(self.graph_title)
def seg(
reference,
test,
units,
x_label=None,
y_label=None,
title=None,
xlim=None,
ylim=None,
percentage=False,
point_kws=None,
square=False,
ax=None,
):
"""Provide a glucose error grid analyses as designed by the surveillance error grid.
This is an Axis-level function which will draw the surveillance error grid plot.
onto the current active Axis object unless ``ax`` is provided.
Parameters
----------
reference, test : array, or list
Glucose values obtained from the reference and predicted methods,
preferably provided in a np.array.
units : str
The SI units which the glucose values are provided in.
Options: 'mmol', 'mgdl' or 'mg/dl'.
x_label : str, optional
The label which is added to the X-axis. If None is provided, a standard
label will be added.
y_label : str, optional
The label which is added to the Y-axis. If None is provided, a standard
label will be added.
title : str, optional
Title of the plot. If None is provided, no title will be plotted.
xlim : list, optional
Minimum and maximum limits for X-axis. Should be provided as list or tuple.
If not set, matplotlib will decide its own bounds.
ylim : list, optional
Minimum and maximum limits for Y-axis. Should be provided as list or tuple.
If not set, matplotlib will decide its own bounds.
percentage : bool, optional
If True, percentage of the zones will be depicted in the plot.
point_kws : dict of key, value mappings, optional
Additional keyword arguments for `plt.scatter`.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be square-shaped.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
Returns
-------
ax : matplotlib Axes
Axes object with the Surveillance error grid plot.
References
---------
[klonoff_2014] <NAME>., <NAME>., et al.
J Diabetes Sci Technol, vol. 8, no. 4, 2014, pp 658-672.
[kovatchev_2014] <NAME>., <NAME>., et al.
J Diabetes Sci Technol, vol 8, no. 4, 2014, pp. 673-684.
"""
plotter: _SEG = _SEG(
reference,
test,
units,
x_label,
y_label,
title,
xlim,
ylim,
percentage,
point_kws,
)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax)
return ax
def segscores(reference, test, units):
"""Provides the raw error values as depicted by the
surveillance error grid analysis for each point in the reference and test datasets.
Parameters
----------
reference, test : array, or list
Glucose values obtained from the reference and predicted methods,
preferably provided in a np.array.
units : str
The SI units which the glucose values are provided in.
Options: 'mmol', 'mgdl' or 'mg/dl'.
Returns
-------
segscores : list
Returns a list with a SEG score for each test, reference pair.
"""
# obtain zones from a Clarke reference object
_zones = _SEG(
reference, test, units, None, None, None, None, None, None, None
)._calc_error_score()
return _zones
| 2.375 | 2 |
onepercentclub/tests/factory_models/donation_factories.py | jfterpstra/onepercentclub-site | 7 | 12757884 | import datetime
import factory
import uuid
from apps.fund.models import Donation, Order
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from onepercentclub.tests.factory_models.project_factories import OnePercentProjectFactory
def random_order_number(length=30):
return unicode(uuid.uuid4().hex)[0:length]
class OrderFactory(factory.DjangoModelFactory):
FACTORY_FOR = Order
user = factory.SubFactory(BlueBottleUserFactory)
order_number = factory.LazyAttribute(lambda t: random_order_number())
class DonationFactory(factory.DjangoModelFactory):
FACTORY_FOR = Donation
user = factory.SubFactory(BlueBottleUserFactory)
amount = 20
project = factory.SubFactory(OnePercentProjectFactory)
order = factory.SubFactory(OrderFactory)
status = 'pending'
| 2.3125 | 2 |
1401-1500/1476-Subrectangle Queries/1476-Subrectangle Queries.py | jiadaizhao/LeetCode | 49 | 12757885 | class SubrectangleQueries:
def __init__(self, rectangle: List[List[int]]):
self.rec = rectangle
self.newRec = []
def updateSubrectangle(self, row1: int, col1: int, row2: int, col2: int, newValue: int) -> None:
self.newRec.append((row1, col1, row2, col2, newValue))
def getValue(self, row: int, col: int) -> int:
for i in range(len(self.newRec) - 1, -1, -1):
if self.newRec[i][0] <= row <= self.newRec[i][2] and self.newRec[i][1] <= col <= self.newRec[i][3]:
return self.newRec[i][4]
return self.rec[row][col]
# Your SubrectangleQueries object will be instantiated and called as such:
# obj = SubrectangleQueries(rectangle)
# obj.updateSubrectangle(row1,col1,row2,col2,newValue)
# param_2 = obj.getValue(row,col)
| 3.3125 | 3 |
tests/nnapi/specs/Ex/transpose_conv_ex_float_1.mod.py | bogus-sudo/ONE-1 | 255 | 12757886 | <gh_stars>100-1000
# model
model = Model()
i0 = Input("op_shape", "TENSOR_INT32", "{4}")
weights = Parameter("ker", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
i1 = Input("in", "TENSOR_FLOAT32", "{1, 4, 4, 1}" )
pad = Int32Scalar("pad_same", 1)
s_x = Int32Scalar("stride_x", 1)
s_y = Int32Scalar("stride_y", 1)
i2 = Output("op", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
model = model.Operation("TRANSPOSE_CONV_EX", i0, weights, i1, pad, s_x, s_y).To(i2)
# Example 1. Input in operand 0,
input0 = {i0: # output shape
[1, 4, 4, 1],
i1: # input 0
[1.0, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0,
13.0, 14.0, 15.0, 16.0]}
output0 = {i2: # output 0
[29.0, 62.0, 83.0, 75.0,
99.0, 192.0, 237.0, 198.0,
207.0, 372.0, 417.0, 330.0,
263.0, 446.0, 485.0, 365.0]}
# Instantiate an example
Example((input0, output0))
| 2.203125 | 2 |
django_photos/photos/forms.py | StGerman/assigments | 0 | 12757887 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Photo
class PhotoForm(forms.ModelForm):
name = forms.TextInput()
image = forms.ImageField()
class Meta:
model = Photo
fields = ["name", "image"]
class RegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ["username", "email", "<PASSWORD>", "<PASSWORD>"]
| 2.15625 | 2 |
test/service/task/test_task.py | HansBug/pji | 0 | 12757888 | import codecs
import os
import tempfile
import pytest
from pji.control.model import Identification, ResourceLimit
from .base import TASK_TEMPLATE_SUCCESS_1, TASK_TEMPLATE_SUCCESS_2
from ..section.section.base import COMPLEX_TEXT
# noinspection DuplicatedCode
@pytest.mark.unittest
class TestServiceTaskTask:
def test_task_simple(self):
tt = TASK_TEMPLATE_SUCCESS_1
with tempfile.TemporaryDirectory() as scriptdir:
with codecs.open(os.path.join(scriptdir, 'README.md'), 'w') as of:
of.write(COMPLEX_TEXT)
t = tt(
scriptdir=scriptdir,
resources=dict(max_real_time='1.0s'),
environ=dict(K='233'),
)
assert t.name == 'task1_x233x'
assert t.identification == Identification.loads('nobody')
assert t.resources == ResourceLimit.loads(dict(max_real_time='1.0s'))
assert t.environ == {'K': '233', 'NAME': 'x233x', 'PJI_TASK_NAME': 'task1_x233x'}
assert len(t.sections.getters) == 1
assert repr(t) == "<Task name: 'task1_x233x', identification: <Identification user: nobody, " \
"group: nogroup>, resources: <ResourceLimit real time: 1.000s>, " \
"sections: <SectionCollection sections: ('name_233',)>>"
def test_task_invalid(self):
tt = TASK_TEMPLATE_SUCCESS_1
with tempfile.TemporaryDirectory() as scriptdir:
with codecs.open(os.path.join(scriptdir, 'README.md'), 'w') as of:
of.write(COMPLEX_TEXT)
with pytest.raises(ValueError):
tt(
scriptdir=scriptdir,
resources=dict(max_real_time='1.0s'),
environ=dict(K='???'),
)
def test_task_call(self):
tt = TASK_TEMPLATE_SUCCESS_2
with tempfile.TemporaryDirectory() as scriptdir:
with codecs.open(os.path.join(scriptdir, 'README.md'), 'w') as of:
of.write(COMPLEX_TEXT)
t = tt(
scriptdir=scriptdir,
resources=dict(max_real_time='1.0s'),
environ=dict(K='233', ENV='xxx', VF='123'),
)
_success, _results = t()
assert _success
_name_1, (_section_1_success, _section_1_results, _section_1_info) = _results[0]
assert _name_1 == 'name_233'
assert _section_1_success
assert len(_section_1_results) == 4
assert _section_1_results[0].ok
assert _section_1_results[1].ok
assert _section_1_results[2].ok
assert _section_1_results[3].ok
assert _section_1_info == {'static': 'this is v : 233', 'value': 233,
'local': 'I have a dream that one day, down in Alabama, with its '
'vicious racists, \nwith its governor having his lips '
'dripping with the words of "interposition" and "nullification"\n'
' -- one day right there in Alabama little black boys and black '
'girls will be able to join \n hands with little white boys and '
'white girls as sisters and brothers.',
'tag': 'I have a dream that one day, down in Alabama, with its vicious '
'racists, \nwith its governor having his lips dripping with the '
'words of "interposition" and "nullification"\n -- one day right '
'there in Alabama little black boys and black girls will be able to '
'join \n hands with little white boys and white girls as sisters '
'and brothers.',
'base64': 'SSBoYXZlIGEgZHJlYW0gdGhhdCBvbmUgZGF5LCBkb3duIGluIEFsYWJhbWEsIHd'
'pdGggaXRzIHZp\nY2lvdXMgcmFjaXN0cywgCndpdGggaXRzIGdvdmVybm9yIGhh'
'dmluZyBoaXMgbGlwcyBkcmlwcGlu\nZyB3aXRoIHRoZSB3b3JkcyBvZiAiaW50Z'
'XJwb3NpdGlvbiIgYW5kICJudWxsaWZpY2F0aW9uIgog\nLS0gb25lIGRheSByaW'
'dodCB0aGVyZSBpbiBBbGFiYW1hIGxpdHRsZSBibGFjayBib3lzIGFuZCBi\nbGF'
'jayBnaXJscyB3aWxsIGJlIGFibGUgdG8gam9pbiAKIGhhbmRzIHdpdGggbGl0dG'
'xlIHdoaXRl\nIGJveXMgYW5kIHdoaXRlIGdpcmxzIGFzIHNpc3RlcnMgYW5kIGJ'
'yb3RoZXJzLg==\n'}
_name_2, (_section_2_success, _section_2_results, _section_2_info) = _results[1]
assert _name_2 == 'name_2_123233'
assert _section_2_success
assert len(_section_2_results) == 3
assert _section_2_results[0].ok
assert _section_2_results[1].ok
assert _section_2_results[2].ok
assert _section_2_info == {'static': 'this is vt : 123233',
'tag_1': 'I have a dream that one day, down in Alabama, with its vicious '
'racists, \nwith its governor having his lips dripping with the '
'words of "interposition" and "nullification"\n -- one day right '
'there in Alabama little black boys and black girls will be able '
'to join \n hands with little white boys and white girls as sisters '
'and brothers.',
'tag_2': '<KEY>IGEgZ<KEY>Z<KEY>IG<KEY>sIHdpdGgg'
'aXRzIHZp\nY2lvdXMgcmFjaXN0cywgCndpdGggaXRzIGdvdmVybm9yIGhhdmluZyBoaX'
'MgbGlwcyBkcmlwcGlu\nZyB3aXRoIHRoZSB3b3JkcyBvZiAiaW50ZXJwb3NpdGlvbiIg'
'YW5kICJudWxsaWZpY2F0aW9uIgog\nLS0gb25lIGRheSByaWdodCB0aGVyZSBpbiBBbG'
'<KEY>ZSBibGFjayBib3lzIG<KEY>IGFi'
'bGUgdG8gam9pbiAKIGhhbmRzIHdpdGggbGl0dGxlIHdoaXRl\nIGJveXMgYW5kIHdoaX'
'RlIGdpcmxzIGFzIHNpc3RlcnMgYW5kIG<KEY>g==\n',
'tag_3t': 'sys\n',
'tag_4t': 'SSBoYXZlIGEgZHJlYW0gdGhhdCBvbmUgZGF5LCBkb3duIGluIEFsYWJhbWEsIHdpdGg'
'gaXRzIHZp\nY2lvdXMgcmFjaXN0cywgCndpdGggaXRzIGdvdmVybm9yIGhhdmluZyBo'
'aXMgbGlwcyBkcmlwcGlu\nZyB3aXRoIHRoZSB3b3JkcyBvZiAiaW50ZXJwb3NpdGlvb'
'iIgYW5kICJudWxsaWZpY2F0aW9uIgog\nLS0gb25lIGRheSByaW<KEY>i'
'BBbGFiYW1hIG<KEY>IGFuZCBi\nbGFjayBnaXJscyB3aWxsIGJ'
'lIGFibGUgdG8gam9pbiAKIGhhbmRzIHdpdGggbGl0dGxlIHdoaXRl\nIGJveXMgYW5k'
'IHdoaXRlIGdpcmxzIGFzIHNpc3RlcnMgYW5kIGJyb3RoZXJzLg==\n',
'tag_5t': 'U1NCb1lYWmxJR0VnWkhKbFlXMGdkR2hoZENCdmJtVWdaR0Y1TENCa2IzZHVJR2x1SUV'
'Gc1lXSmhi\nV0VzSUhkcGRHZ2dhWFJ6SUhacApZMmx2ZFhNZ2NtRmphWE4wY3l3Z0Nu'
'ZHBkR2dnYVhSeklHZHZk\nbVZ5Ym05eUlHaGhkbWx1WnlCb2FYTWdiR2x3Y3lCa2Ntb'
'HdjR2x1Clp5QjNhWFJvSUhSb1pTQjNi\nM0prY3lCdlppQWlhVzUwWlhKd2IzTnBkR2'
'x2YmlJZ1lXNWtJQ0p1ZFd4c2FXWnBZMkYwYVc5dUln\nb2cKTFMwZ2IyNWxJR1JoZVN'
'CeWFXZG9kQ0IwYUdWeVpTQnBiaUJCYkdGaVlXMWhJR3hwZEhSc1pT\nQmliR0ZqYXlC'
'aWIzbHpJR0Z1WkNCaQpiR0ZqYXlCbmFYSnNjeUIzYVd4c0lHSmxJR0ZpYkdVZ2RH\nO'
'GdhbTlwYmlBS0lHaGhibVJ6SUhkcGRHZ2diR2wwZEd4bElIZG9hWFJsCklHSnZlWE1n'
'WVc1a0lI\nZG9hWFJsSUdkcGNteHpJR0Z6SUhOcGMzUmxjbk1nWVc1a0lHSnliM1JvW'
'lhKekxnPT0K\n'}
| 2.25 | 2 |
tests/test_models/__init__.py | mecomontes/AirBnB-clone-one | 1 | 12757889 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 13:28:17 2020
@author: <NAME>
<NAME>
"""
| 1.210938 | 1 |
app/db/mongodb_utils.py | BrunoComitre/todo-fastapi-mongodb | 0 | 12757890 | <gh_stars>0
import logging
from fastapi import FastAPI
from motor.motor_asyncio import AsyncIOMotorClient
from ..core.config import MONGODB_URL, MAX_CONNECTIONS_COUNT, MIN_CONNECTIONS_COUNT
from .mongodb import db
app = FastAPI()
@app.on_event("startup")
async def connect_to_mongo():
logging.info("Connecting...")
db.client = AsyncIOMotorClient(
str(MONGODB_URL),
maxPoolSize=MAX_CONNECTIONS_COUNT,
minPoolSize=MIN_CONNECTIONS_COUNT,
)
logging.info("Connected")
@app.on_event("shutdown")
async def disconnect_to_mongo():
logging.info("Disconnecting...")
db.client.close()
logging.info("Disconnected")
## TESTAR ##
# async def register_db(app: FastAPI):
# @app.on_event('startup')
# def connect_to_mongo():
# db.client = AsyncIOMotorClient(str(MONGODB_URL),
# maxPoolSize=MAX_CONNECTIONS_COUNT,
# minPoolSize=MIN_CONNECTIONS_COUNT)
# logging.info("Connected"))
# @app.on_event('shutdown')
# def disconnect_to_mongo():
# logging.info("Disconnecting...")
# db.client.close()
# logging.info("Disconnected")
| 2.375 | 2 |
tests/ssh/test_ssh_cross_versions.py | jordimassaguerpla/salt-toaster | 25 | 12757891 | <reponame>jordimassaguerpla/salt-toaster
import re
import pytest
import itertools
from saltcontainers.factories import ContainerFactory, MasterFactory
pytestmark = pytest.mark.xfail
MASTERS = [
'registry.mgr.suse.de/toaster-sles15-products-next',
'registry.mgr.suse.de/toaster-sles12sp3-products-next',
'registry.mgr.suse.de/toaster-sles11sp4-products-old-testing',
]
CLIENTS = [
'registry.mgr.suse.de/toaster-sles11sp4-products-old-testing',
'registry.mgr.suse.de/toaster-sles12sp3-products-next',
'registry.mgr.suse.de/toaster-sles15-products-next',
]
@pytest.fixture()
def client(request):
obj = ContainerFactory(
config__image=request.param,
config__salt_config=None,
ssh_config={'user': 'root', 'password': '<PASSWORD>'})
request.addfinalizer(obj.remove)
return obj
@pytest.fixture()
def master(request, salt_root, client):
obj = MasterFactory(
container__config__salt_config__tmpdir=salt_root,
container__config__salt_config__conf_type='master',
container__config__image=request.param,
container__config__salt_config__extra_configs={
"file_roots": {
"file_roots": {"base": ["/etc/salt/sls"]}
},
"thin_extra_mods": {
"thin_extra_mods": "msgpack"
},
},
container__config__salt_config__sls=['tests/sls/echo.sls',],
container__config__salt_config__roster=[client]
)
request.addfinalizer(obj['container'].remove)
return obj
def pytest_generate_tests(metafunc):
matrix = itertools.product(MASTERS, CLIENTS)
def _ids(it):
return re.match('registry.mgr.suse.de/toaster-(.+)', it).group(1).replace('-', '_')
metafunc.parametrize(
'master,client',
matrix,
ids=_ids,
indirect=['master', 'client'])
def test_ping(master, client):
assert master.salt_ssh(client, "test.ping") is True
def test_state_apply_log_file_created(master, client):
res = master.salt_ssh(client, "state.apply echo")
assert res['cmd_|-test_|-echo "test1"_|-run']['changes']['stdout'] == 'test1'
| 1.789063 | 2 |
binding.gyp | fbzhong/node-cityhash | 7 | 12757892 | {
"targets": [
{
"target_name": "cityhash",
"include_dirs": ["cityhash/"],
"sources": [
"binding.cc",
"cityhash/city.cc"
]
}
]
}
| 1.054688 | 1 |
src/readercake/module.py | Jaasim2008/cakereader | 0 | 12757893 | <gh_stars>0
import colorama
from colorama import Fore
import os
class ReaderCake:
def __init__(self, filename, encoding, filedir):
self.filename = filename
self.encoding = encoding
self.filedir = filedir
def read_all(self):
with open(self.filename) as file:
file_data = file.read()
return file_data
def write_data(self, word):
with open(self.filename, 'w') as file:
file.write(word)
print(f'{Fore.RED}Pass')
def append_data(self, word):
with open(self.filename, 'a', encoding='utf-8') as file:
file.write(f' {word}')
print(f'{Fore.RED}Pass')
def read_line(self, line):
with open(self.filename, 'r') as file:
my_data = file.readline(line)
return my_data
def read_lines(self, line):
try:
with open(self.filename, 'r') as file:
my_data = file.readlines(line)
full_data_word = my_data[0]
return full_data_word
except Exception as e:
print('')
try:
os.system('cls')
finally:
os.system('clear')
| 3.15625 | 3 |
test/datasets/VANiLLA/results/old/utils/utils.py | librairy/explainable-qa | 1 | 12757894 | import pandas as pd
import json
from pprint import pprint
def JSONLineToDict(JSONRoute):
'''
Funcion auxiliar que dado un archivo json con JSONObjects en cada linea,
lo abre y lo convierte a lista de diccionarios
'''
with open(JSONRoute) as f:
jsonList = list(f)
return json.loads(json.dumps([json.loads(jsonLine) for jsonLine in jsonList]))
def findValueIndex(dictList, key, value):
'''
Funcion auxiliar que dada una lista de diccionarios y un valor de este,
encuentra en qué diccionario está dicho valor
'''
for i, dict in enumerate(dictList):
if dict[key] == value:
return i
return -1
def extractLatestQuestionCSV(csvRoute):
df = pd.read_csv(csvRoute, sep=";")
return df.iloc[-1,0]
dictList = JSONLineToDict("Vanilla_Dataset_Test.json")
#pprint(dictList)
#print(len(dictList))
#dictList[:] = [value for counter, value in enumerate(dictList) if counter > 10635]
#print(len(dictList))
question = extractLatestQuestionCSV("VANiLLA.csv")
#print(question)
print("Index:", findValueIndex(dictList, 'question', question)) | 3.40625 | 3 |
benchmark_pytorch.py | muszka95/M2U-Net | 20 | 12757895 | <gh_stars>10-100
from pathlib import Path
import torch
from torch.utils.data import DataLoader
import torchvision.transforms.functional as VF
import torch.backends.cudnn as cudnn
import time
import numpy as np
from PIL import Image
from argparse import ArgumentParser
from dataset import get_file_lists, RetinaDataset
# Networks
from m2unet import m2unet
from driu import DRIU
from erfnet import Net as ERFNet
from unet import UNet
def run(model,dataloader,batch_size,threshold,device,save_prob=False,save_binary_mask=False
,pred_path=None,file_names=None):
"""
"""
model.eval().to(device)
sigm = torch.nn.Sigmoid()
with torch.no_grad():
batch_times = []
prob_images_so_far = 0
bin_images_so_far = 0
# inference loop
for i,inputs in enumerate(dataloader):
# start timer
start_time = time.perf_counter()
# to device
inputs = inputs.to(device)
# forward pass
outputs = model(inputs)
# prob and threshold
pred_prob = sigm(outputs)
preds = torch.gt(pred_prob,threshold).float()
# end timer
end_time = time.perf_counter()
inf_time = (end_time - start_time)/batch_size
print('Batch {}/{} inference time per image: {:.5f}s'.format(i+1,len(dataloader),inf_time))
batch_times.append(inf_time)
# save binary mask
if save_binary_mask:
for j in range(inputs.size()[0]):
preds_img = VF.to_pil_image(preds.cpu().data[j])
pred_name = '{}_vessel_binary.gif'.format(file_names[bin_images_so_far])
preds_img.save(pred_path.joinpath(pred_name))
bin_images_so_far += 1
# save vessel probabilities
if save_prob:
for j in range(inputs.size()[0]):
preds_img = VF.to_pil_image(pred_prob.cpu().data[j])
pred_name = '{}_vessel_prob.gif'.format(file_names[prob_images_so_far])
preds_img.save(pred_path.joinpath(pred_name))
prob_images_so_far += 1
# ignore first batch for warm up
batch_avg = np.mean(batch_times[1:])
print()
print('Mean inference time per image: {:.5f}s'.format(batch_avg))
def main():
parser = ArgumentParser()
arg = parser.add_argument
arg('--model',default='M2UNet',type=str,choices=['M2UNet','DRIU','ERFNet','UNet'])
arg('--state_dict',default='M2UNetDRIVE.pth',type=str,help='pretrained model weights file, stored in models')
arg('--dataset',default='DRIVE',choices=['DRIVE','CHASE_DB1','HRF'],type=str,help='determines the dataset directory and the amount of cropping that is performed to ensure that the loaded images are multiples of 32.')
arg('--threshold',default=0.5215,type=float,help='threshold to convert probability vessel map to binary map')
arg('--devicename',default='cpu',type=str,help='device type, default: "cpu"')
arg('--batch_size',default=1,type=int,help='inference batch size, default: 1')
arg('--save_prob',default=False,help='save probability vessel maps to disk')
arg('--save_binary_mask',default=False,help='save binary mask to disk')
# Paths
model_path = Path('models')
data_path = Path('data')
log_path = Path('logs')
# parse arguments
args = parser.parse_args()
state_dict_path = model_path.joinpath(args.state_dict)
dataset_path = data_path.joinpath(args.dataset)
image_file_path = dataset_path.joinpath('test/images')
prediction_output_path = dataset_path.joinpath('predictions')
threshold = args.threshold
devicename = args.devicename
batch_size = args.batch_size
dataset = args.dataset
save_prob = args.save_prob
save_binary_mask = args.save_binary_mask
# default device type is 'cuda:0'
pin_memory = True
cudnn.benchmark = True
device = torch.device(devicename)
if devicename == 'cpu':
# if run on cpu, disable cudnn benchmark and do not pin memory
cudnn.benchmark = False
pin_memory = False
# only run on one core
torch.set_num_threads(1)
if args.model == 'M2UNet':
model = m2unet()
if args.model == 'DRIU' and dataset == 'DRIVE':
model = DRIU()
if args.model == 'DRIU' and dataset == 'CHASE_DB1':
model = DRIU()
if args.model == 'ERFNet':
model = ERFNet(1)
if args.model == 'UNet':
model = UNet(in_channels=3, n_classes=1, depth=5, wf=6, padding=True,batch_norm=False, up_mode='upconv')
state_dict = torch.load(str(state_dict_path),map_location=devicename)
model.load_state_dict(state_dict,strict=True)
model.eval()
# list of all files include path
file_paths = get_file_lists(image_file_path)
# list of file names only
file_names = list(map(lambda x: x.stem,file_paths))
# dataloader
dataloader = DataLoader(dataset = RetinaDataset(file_paths,dataset)
,batch_size = batch_size
,shuffle=False
,num_workers=1
,pin_memory=pin_memory
)
run(model,dataloader,batch_size,threshold,device,save_prob,save_binary_mask,prediction_output_path,file_names)
if __name__ == '__main__':
main() | 1.898438 | 2 |
src/datashare/azext_datashare/vendored_sdks/datashare/aio/operations_async/__init__.py | Mannan2812/azure-cli-extensions | 207 | 12757896 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._account_operations_async import AccountOperations
from ._consumer_invitation_operations_async import ConsumerInvitationOperations
from ._data_set_operations_async import DataSetOperations
from ._data_set_mapping_operations_async import DataSetMappingOperations
from ._invitation_operations_async import InvitationOperations
from ._operation_operations_async import OperationOperations
from ._share_operations_async import ShareOperations
from ._provider_share_subscription_operations_async import ProviderShareSubscriptionOperations
from ._share_subscription_operations_async import ShareSubscriptionOperations
from ._consumer_source_data_set_operations_async import ConsumerSourceDataSetOperations
from ._synchronization_setting_operations_async import SynchronizationSettingOperations
from ._trigger_operations_async import TriggerOperations
__all__ = [
'AccountOperations',
'ConsumerInvitationOperations',
'DataSetOperations',
'DataSetMappingOperations',
'InvitationOperations',
'OperationOperations',
'ShareOperations',
'ProviderShareSubscriptionOperations',
'ShareSubscriptionOperations',
'ConsumerSourceDataSetOperations',
'SynchronizationSettingOperations',
'TriggerOperations',
]
| 1.234375 | 1 |
learn-to-code-with-python/08-Control-Flow/the-elif-statement.py | MaciejZurek/python_practicing | 0 | 12757897 | # def positive_or_negative(value):
# if value > 0:
# return "Positive!"
# elif value < 0:
# return "Negative!"
# else:
# return "It's zero!"
# number = int(input("Wprowadz liczbe: "))
# print(positive_or_negative(number))
def calculator(operation, a, b):
if operation == "add":
return a + b
elif operation == "subtract":
return a - b
elif operation == "multiple":
return a * b
elif operation == "divide":
return a / b
else:
print("There is no such an operation!")
operacja = str(input("Co chcesz zrobic? "))
num1 = int(input("Pierwszy skladnik: "))
num2 = int(input("Drugi skladnik: "))
print(calculator(operacja, num1, num2))
| 4.25 | 4 |
dyno/debug/__init__.py | mequanta/z-dyno | 0 | 12757898 | #from . handlers import DebugHandler
from .qdbclientserver import DynoQdbClientServer
| 1.070313 | 1 |
checker.py | ViggAlm/PasswordKit | 1 | 12757899 | <reponame>ViggAlm/PasswordKit
# Credits to NeuralNine's video for helping me out here ( https://www.youtube.com/watch?v=iJ01q-sRJAw&t )
import log
import main
import string
import time
def check():
log.general("This is the password strength checker.")
log.question("Please enter your password:")
password = input("")
upper_case = any([1 if c in string.ascii_uppercase else 0 for c in password])
lower_case = any([1 if c in string.ascii_lowercase else 0 for c in password])
special = any([1 if c in string.punctuation else 0 for c in password])
digits = any([1 if c in string.digits else 0 for c in password])
characters = [upper_case, lower_case, special, digits]
length = len(password)
score = 0
with open("common.txt", "r") as f:
common = f.read().splitlines()
if password in common:
log.error("Password found in top 10 000 most common passwords, change immediately. Score: 0 / 7")
exit()
if length > 8:
score += 1
if length > 12:
score += 1
if length > 16:
score += 1
if length > 20:
score += 1
log.general(f"Password length is {str(length)}, added {str(score)} points to your total.")
if sum(characters) > 1:
score += 1
if sum(characters) > 2:
score += 1
if sum(characters) > 3:
score += 1
log.general(f"Password has {str(sum(characters))} different types of characters, added {str(sum(characters) - 1)} points to your total.")
if score < 4:
log.error(f"Password is weak, password change recommended. Score: {str(score)} / 7")
elif score <= 5:
log.general(f"Password is okay, 2 factor authentication recommended and possible password change recommended. Score: {str(score)} / 7")
elif score >= 6:
log.result(f"Password is good, 2 factor authentication recommended. Score: {str(score)} / 7")
log.question("Press 'C' to check another password, press 'M' to open the menu or press any other key to exit:")
next_action = input("").lower()
if next_action == "c":
log.result("Chose check another password.")
time.sleep(1)
main.clear()
check()
elif next_action == "m":
log.result("Chose open the menu.")
time.sleep(1)
main.clear()
main.menu()
else:
log.general("Thank you for using this program!")
time.sleep(1)
| 3.609375 | 4 |
sparrow_cloud/apps/permission_command/views.py | LaEmma/sparrow_cloud | 15 | 12757900 | <filename>sparrow_cloud/apps/permission_command/views.py<gh_stars>10-100
# from django.shortcuts import render
# from .models import CacheTest
# import uuid
# from rest_framework.response import Response
# from rest_framework.decorators import api_view
# @api_view(('GET',))
# def cachetest_detail(request, *args, **kwargs):
# cache_test_obj = CacheTest.objects.create(id=uuid.uuid4().hex, name="CacheTest001")
# cache_test_obj_g = CacheTest.objects.get(id=cache_test_obj.id)
# return Response(cache_test_obj_g.name) | 1.898438 | 2 |
bot/bot.py | M3nin0/payday-bot | 2 | 12757901 | import logging
import telegram
import datetime
from time import sleep
from toolbox import ToolBox
from threading import Thread
from telegram.ext import Updater
from telegram.ext import Filters
from telegram.ext import MessageHandler
from telegram.ext import CommandHandler
class Bot(object):
def __init__(self):
self.updater = Updater('TOKEN')
self.update_id = None
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.dp = self.updater.dispatcher
# Adicionando os comandos
self.dp.add_handler(CommandHandler("start", self._init_config))
# Filtrando o tipo por document
self.dp.add_handler(MessageHandler(Filters.document, self._receive_file))
# Inicia bot
self.updater.start_polling()
self.updater.idle()
def _init_config(self, bot, update):
try:
self.update_id = bot.get_updates()[0].update_id
except BaseException as e:
self.update_id = None
update.message.reply_text('Olá! Vou auxiliar você a realizar as configurações, \
para que eu possa acessar suas faturas e te lembrar delas =D')
update.message.reply_text('Primeiro insira o arquivo .json com as credênciais de sua API do Google')
def _receive_file(self, bot, update):
table = -1
try:
# Recebe o arquivo .json e faz a conexão com a planilha do usuário
file_id = update.message.document.file_id
_json_key = bot.get_file(file_id)
_json_key.download('bot/keys/' + str(file_id) + '.json')
table = ToolBox.connect_api(str(file_id) + '.json', 'faturas')
if table == -1:
update.message.reply_text('Erro ao tentar se conectar com a planilha de gastos, insira o arquivo novamente')
else:
update.message.reply_text('Pronto! Agora já tenho acesso a suas faturas, logo menos estarei verificando e enviando notificações sobre suas faturas para você')
# Cria thread para evitar que o bot fique inutilizavel durante o processo de verificação
thread = Thread(target = self.send_notify, args = (bot, update, table))
thread.start()
except BaseException as e:
print(e)
def send_notify(self, bot, update, table):
# Faz verificação e envia notificação se necessário
while True:
update.message.reply_text('Estou fazendo a verificação de suas faturas agora')
itens_table = table.get_all_values()[1:]
# Data do dia atual
hoje = datetime.date.today()
for fatura in itens_table:
if fatura[-1] != 'Fechado':
vencimento = fatura[2].split('-')[::-1]
data_vencimento = datetime.date(int(vencimento[0]),
int(vencimento[1]),
int(vencimento[2]))
atraso = (hoje - data_vencimento).days
if atraso > int(fatura[-2]):
update.message.reply_text('A seguinte fatura precisa ser paga: \n ' +
'- Data de emissão: ' + fatura[1] +
'\n - Data de vencimento: ' + fatura[2] +
'\n - Nome da empresa: ' + fatura[3] +
'\n - Valor da conta: ' + fatura[4] +
'\n - Status da fatura: ' + fatura[6])
# Entra em espera durante um dia
sleep(86400)
if __name__ == '__main__':
Bot()
| 2.34375 | 2 |
src/bitmessageqt/messagecompose.py | BeholdersEye/PyBitmessage | 1,583 | 12757902 | """
Message editor with a wheel zoom functionality
"""
# pylint: disable=bad-continuation
from PyQt4 import QtCore, QtGui
class MessageCompose(QtGui.QTextEdit):
"""Editor class with wheel zoom functionality"""
def __init__(self, parent=0):
super(MessageCompose, self).__init__(parent)
self.setAcceptRichText(False)
self.defaultFontPointSize = self.currentFont().pointSize()
def wheelEvent(self, event):
"""Mouse wheel scroll event handler"""
if (
QtGui.QApplication.queryKeyboardModifiers() & QtCore.Qt.ControlModifier
) == QtCore.Qt.ControlModifier and event.orientation() == QtCore.Qt.Vertical:
if event.delta() > 0:
self.zoomIn(1)
else:
self.zoomOut(1)
zoom = self.currentFont().pointSize() * 100 / self.defaultFontPointSize
QtGui.QApplication.activeWindow().statusBar().showMessage(
QtGui.QApplication.translate("MainWindow", "Zoom level %1%").arg(
str(zoom)
)
)
else:
# in QTextEdit, super does not zoom, only scroll
super(MessageCompose, self).wheelEvent(event)
def reset(self):
"""Clear the edit content"""
self.setText('')
| 2.578125 | 3 |
py/testdir_multi_jvm/test_many_cols_enum_multi.py | gigliovale/h2o | 882 | 12757903 | <reponame>gigliovale/h2o<filename>py/testdir_multi_jvm/test_many_cols_enum_multi.py
import unittest, random, sys, time, os
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i
import h2o_exec as h2e
def write_syn_dataset(csvPathname, rowCount, colCount, header, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
for j in range(colCount):
# header names need to be unique
if header and i==0:
r = "a" + str(j)
else:
r = "a"
rowData.append(r)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(3, java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_many_cols_enum(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
# (100, 11000, 0, 'cA', 180),
# (100, 10000, 1, 'cB', 180),
# (100, 8000, 1, 'cD', 180),
# (100, 7000, 0, 'cE', 180),
# (100, 6000, 1, 'cF', 180),
(100, 1000, 0, 'cH', 120),
(100, 1000, 1, 'cI', 120),
(100, 2000, 1, 'cI', 120),
(100, 3000, 1, 'cI', 120),
(100, 4000, 1, 'cI', 120),
(100, 5000, 0, 'cG', 180),
(100, 9000, 0, 'cC', 180),
(100, 10000, 1, 'cB', 180),
]
### h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
cnum = 0
# it's interesting to force the first enum row to be used as header or not
# with many cols, we tend to hit limits about stuff fitting in a chunk (header or data)
for (rowCount, colCount, header, hex_key, timeoutSecs) in tryList:
cnum += 1
csvFilename = 'syn_' + str(SEED) + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, header, SEED)
parseResult = h2i.import_parse(path=csvPathname, schema='put', header=header,
hex_key=hex_key, timeoutSecs=timeoutSecs)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
if not h2o.browse_disable:
h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
time.sleep(5)
# try new offset/view
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], offset=100, view=100)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], offset=99, view=89)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], offset=-1, view=53)
if __name__ == '__main__':
h2o.unit_main()
| 2.40625 | 2 |
software/python/ControlCargaProgramable/Code with issue and results/SAS_v3.py | Setec-Lab/solar_cell_simulator | 0 | 12757904 | <filename>software/python/ControlCargaProgramable/Code with issue and results/SAS_v3.py
import pyvisa
import numpy as np
import matplotlib.pyplot as plt
import math
import controller
import time
sizeArray = 1024
serial = 2
parallel = 3
InputIsc = 0.5196*parallel
InputImp = 0.5029*parallel
InputVmp = 2.409*serial
InputVoc = 2.690*serial
InputVoc_Tcomp = 0
InputVmp_Tcomp = 0
InputPcos = 1
InputT = 0
def scalevars(voc_tcomp, vmp_tcomp, pcos, voc, vmp, imp, isc, t):
sVoc = voc - (t * voc_tcomp)
sVmp = vmp - (t * vmp_tcomp)
sImp = pcos * imp
sIsc = pcos * isc
return sVoc, sVmp, sImp, sIsc
sVoc, sVmp, sImp, sIsc = scalevars(InputVoc_Tcomp, InputVmp_Tcomp, InputPcos, InputVoc, InputVmp, InputImp, InputIsc,
InputT)
def generate_table(Voc, Vmp, Imp, Isc):
Rs = (Voc - Vmp) / Imp
a = (Vmp * (1 + (Rs * Isc) / Voc) + Rs * (Imp - Isc)) / Voc
N = math.log10(2 - 2 ** a) / math.log10(Imp / Isc)
i = np.zeros(sizeArray)
v = np.zeros(sizeArray)
r = np.zeros(sizeArray)
for x in range(0, sizeArray):
I = (x * Isc) / (sizeArray - 1)
V = Voc * math.log10(2 - ((I / Isc) ** N))
V = V / math.log10(2)
V = V - Rs * (I - Isc)
V = V / (1 + ((Rs * Isc) / Voc))
P = V * I
v[x] = V
i[x] = I
if I == 0:
r[x] = 10000
else:
r[x] = V / I
return v, i, r
def VoltageInterpolation(tableVoltage, tableCurrent, inputVoltage):
for i in range(0, sizeArray-1):
if inputVoltage > tableVoltage[i]:
iOut = tableCurrent[i] + (tableCurrent[i+1]-tableCurrent[i])*(inputVoltage-tableVoltage[i])/(tableVoltage[i+1]-tableVoltage[i])
break
return iOut
def CurrentInverseInterpolation(tableVoltage, tableCurrent, inputCurrent):
for i in range(0, sizeArray-1):
if inputCurrent < tableVoltage[i]:
vOut = tableVoltage[i]+(tableVoltage[i+1]-tableVoltage[i])*((inputCurrent - tableCurrent[i])/(tableCurrent[i+1]-tableCurrent[i]))
break
return vOut
def ResistanceInterpolation(tableResistance, tableCurrent, inputResistance):
for i in range(0, sizeArray-1):
if inputResistance>tableResistance[i]:
iOut = tableCurrent[i] + (tableCurrent[i+1]-tableCurrent[i])*(inputResistance-tableResistance[i])/(tableResistance[i+1]-tableResistance[i])
break
return iOut
def CalculateResistance(Vsens, Isens):
if Isens != 0 :
return Vsens/Isens
else :
print("error, I = 0")
return 0
def writeInNotepad(time, counter, vc, ic, rc, vm, im, rm):
file.write("\n")
file.write(str(time))
file.write(",")
file.write(str(counter))
file.write(",")
file.write(str(vc))
file.write(",")
file.write(str(ic))
file.write(",")
file.write(str(vc/ic))
file.write(",")
file.write(str(vm))
file.write(",")
file.write(str(im))
file.write(",")
file.write(str(vm/im))
print("File updated")
#define arrays
v, i, r = generate_table(sVoc, sVmp, sImp, sIsc)
#connect to programmable load
print("connecting to programmable generator and load")
rm = pyvisa.ResourceManager()
fuente = rm.open_resource(rm.list_resources()[1])
carga = rm.open_resource(rm.list_resources()[0])
Fuente = controller.Fuente(fuente, "Fuentesita")
Carga = controller.Carga(carga, "carguita")
print(fuente.query("*IDN?"))
print(carga.query("*IDN?"))
print("Connected")
time.sleep(2)
#round sIsc and sVoc
sIsc = round(sIsc, 2)
sVoc = round(sVoc, 2)
# Turn on power output and setting it to CV
print("Max values: {}V {}A".format(sVoc, sIsc))
print(Fuente.aplicar_voltaje_corriente(1, sVoc, sIsc))
print(Fuente.encender_canal(1))
time.sleep(2)
#Turn on power load and set to 2ohms
Carga.fijar_funcion("RES")
Carga.encender_carga()
Carga.fijar_resistencia(0.5)
voltage = 0
current = 0
resistance = 0
power = 0
outputV = 0
outputI = 0
loadResistance = 0.5
loadVoltage = 0
loadCurrent = 0
boost = 0
counter = 0
#open a file
file = open("SAS_v3test.txt", "w")
file.write("This notepad takes measurements from the generator and then the load")
file.write("\nTime,Counter,Vc,Ic,Rc,Vm,Im,Rm")
#Setting up timer and counter
inicio = time.time()
tiempo = time.time()
while counter <= 10000:
initialLoopTime = time.time()
#Measuring V and I outputed by the generator
voltage, current, power = Fuente.medir_todo(1)
resistance = round(CalculateResistance(voltage, current), 2)
print("Measured values: {}V {}A {}W {}R".format(voltage, current, power, resistance))
if (power != 0):
if voltage>=sVmp :
#we are in CV mode
print("CV mode")
outputI = sIsc
interI = round(ResistanceInterpolation(r, i, resistance), 2)
outputV = round(CurrentInverseInterpolation(v, i, interI),2)
else :
#we are in CC mode
print("CC mode")
outputV = sVoc
outputI = round(ResistanceInterpolation(r, i, resistance), 2)
#updating V and I output values
print("Values to output: {}V. {}A".format(outputV, outputI))
Fuente.aplicar_voltaje_corriente(1, outputV, outputI)
#set boost to 0 because we do not need to bosst signal to sVoc and sIsc
boost = 0
elif (power == 0 and boost != 2):
#if power is 0 we re-output Isc and Voc 2 times before we assume that the load was removed
boost += 1
print("Power = 0! Rebooting {}".format(boost))
print(Fuente.aplicar_voltaje_corriente(1, sVoc, sIsc))
time.sleep(2)
else :
print("Load was removed from generator")
writeInNotepad("There was an issue, test was stopped prematurely")
break
#uptdate notepad
writeInNotepad(time.time()-inicio, counter, outputV, outputI, 0, voltage, current, 0)
print("time elapsed in loop: {}s".format(time.time() - initialLoopTime))
print()
print()
#up resistance
loadResistance += 0.1
print(Carga.fijar_resistencia(loadResistance))
#update counter and wait to not overload generator
time.sleep(1)
counter += 1
elapsado = time.time() - inicio
print("Time elapsed: {}".format(elapsado))
print(Fuente.apagar_canal(1))
print(Carga.apagar_carga())
file.close()
| 2.265625 | 2 |
app/revisioner/tests/e2e/e2e_datastore_generic.py | metamapper-io/metamapper | 3 | 12757905 | """
This tests when a Datastore experiences some typical changes to the underlying definition.
Test Cases:
- Table is renamed.
- Table is created.
- Table is dropped.
- Column is dropped.
- [PENDING] Column attributes are updated.
"""
from app.revisioner.tests.e2e import inspected
from app.revisioner.tests.test_e2e import mutate_inspected
preload_fixtures = ['datastore']
inspected_tables = mutate_inspected(inspected.tables_and_views, [
# (1) We renamed the table `employee.departments` to `employee.depts`.
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16392
),
"metadata": {
"field": "table_name",
"new_value": "depts",
},
},
# (2) We dropped the `app`.`productlines` table at some point.
{
"type": "dropped",
"filters": (
lambda row: row['table_object_id'] == 16456
),
},
# (3) We dropped the `app`.`productlines` table at some point.
{
"type": "dropped",
"filters": (
lambda row: row['table_object_id'] == 16488
),
"column_filters": (
lambda col: col['column_object_id'] == "16488/9"
),
},
# (4) The column `app`.`customers`.`postalcode` has a `default_value` change.
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16442
),
"column_filters": (
lambda col: col['column_name'] == "postalcode"
),
"metadata": {
"field": "columns.default_value",
"new_value": "default_sequence()",
},
},
# (5) The column `app`.`orders`.`status` has a data type change.
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16465
),
"column_filters": (
lambda col: col['column_name'] == "status"
),
"metadata": {
"field": "columns.data_type",
"new_value": "integer",
},
},
# (6) Comment was added to a resource.
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16442
),
"column_filters": (
lambda col: col['column_name'] == "postalcode"
),
"metadata": {
"field": "columns.column_description",
"new_value": "5-digit mailing code",
},
},
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16465
),
"column_filters": (
lambda col: col['column_name'] == "status"
),
"metadata": {
"field": "columns.max_length",
"new_value": 50,
},
},
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16465
),
"column_filters": (
lambda col: col['column_name'] == "status"
),
"metadata": {
"field": "columns.numeric_scale",
"new_value": 0,
},
},
# (6) The column `app`.`orders`.`amount` is changed.
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16478
),
"column_filters": (
lambda col: col['column_name'] == "amount"
),
"metadata": {
"field": "columns.is_nullable",
"new_value": True,
},
},
{
"type": "modified",
"filters": (
lambda row: row['table_object_id'] == 16478
),
"column_filters": (
lambda col: col['column_name'] == "amount"
),
"metadata": {
"field": "columns.column_name",
"new_value": "dollar_amount",
},
},
])
# (7) We create a brand new table called `app.categories`.
inspected_tables += [
{
"schema_object_id": 16441,
"table_schema": "app",
"table_object_id": 99999,
"table_name": "categories",
"table_type": "base table",
"properties": {},
"columns": [
{
"column_object_id": "99999/1",
"column_name": "category_id",
"column_description": None,
"ordinal_position": 1,
"data_type": "integer",
"max_length": 32,
"numeric_scale": 0,
"is_nullable": False,
"is_primary": True,
"default_value": ""
},
{
"column_object_id": "99999/1",
"column_name": "name",
"column_description": None,
"ordinal_position": 2,
"data_type": "varchar",
"max_length": 256,
"numeric_scale": None,
"is_nullable": False,
"is_primary": False,
"default_value": ""
},
]
}
]
test_cases = [
{
"model": "Table",
"description": "Expect `employees.departments` table to be renamed.",
"filters": {
"schema__name": "employees",
"object_ref": "16392",
},
"assertions": [
{
"evaluation": lambda datastore, table: table.name,
"pass_value": "depts",
},
# It has a new object identifier due to the name change.
{
"evaluation": lambda datastore, table: table.object_id,
"pass_value": "2<PASSWORD>",
},
# It retains associated metadata.
{
"evaluation": lambda datastore, table: table.tags,
"pass_value": ["one", "two"],
},
]
},
{
"model": "Table",
"description": "Expect `app.departments` table NOT be be renamed.",
"filters": {
"schema__name": "app",
"object_ref": "16522",
},
"assertions": [
{
"evaluation": lambda datastore, table: table.name,
"pass_value": "departments",
},
{
"evaluation": lambda datastore, table: table.object_id,
"pass_value": "<PASSWORD>",
},
]
},
{
"model": "Table",
"description": "Expect `app.productlines` table to be deleted.",
"filters": {
"schema__name": "app",
"name": "productlines",
},
"assertions": [
{
"evaluation": lambda datastore, table: table,
"pass_value": None,
}
],
},
{
"model": "Column",
"description": "Expect `app.productlines` columns to be deleted.",
"filters": {
"table__schema__name": "app",
"table__name": "productlines",
},
"assertions": [
{
"evaluation": lambda datastore, column: column,
"pass_value": None,
}
],
},
{
"model": "Column",
"description": "Expect `app.products.msrp` column to be deleted.",
"filters": {
"pk": 44,
},
"assertions": [
{
"evaluation": lambda datastore, column: column,
"pass_value": None,
}
],
},
{
"model": "Table",
"description": "Expect `app.categories` table to be created.",
"filters": {
"schema__name": "app",
"name": "categories",
"object_ref": "99999",
},
"assertions": [
{
"evaluation": lambda datastore, table: table.name,
"pass_value": "categories",
},
{
"evaluation": lambda datastore, table: table.columns.count(),
"pass_value": 2,
}
]
},
{
"model": "Column",
"description": "The column `app`.`customers`.`postalcode` has a default_value change.",
"filters": {
"table__schema__name": "app",
"table__object_ref": "16442",
"name": "postalcode",
},
"assertions": [
{
"evaluation": lambda datastore, column: column.ordinal_position,
"pass_value": 10,
},
{
"evaluation": lambda datastore, column: column.default_value,
"pass_value": "default_sequence()",
},
{
"evaluation": lambda datastore, column: column.db_comment,
"pass_value": "5-digit mailing code",
}
]
},
{
"model": "Column",
"description": "The column `app`.`orders`.`status` has a data type change.",
"filters": {
"table__schema__name": "app",
"table__object_ref": "16465",
"name": "status",
},
"assertions": [
{
"evaluation": lambda datastore, column: column.data_type,
"pass_value": "integer",
},
{
"evaluation": lambda datastore, column: column.max_length,
"pass_value": 50,
},
{
"evaluation": lambda datastore, column: column.numeric_scale,
"pass_value": 0,
},
]
},
{
"model": "Column",
"description": "The column `app`.`orders`.`amount` has changed.",
"filters": {
"table__schema__name": "app",
"table__object_ref": "16478",
"ordinal_position": 4,
},
"assertions": [
{
"evaluation": lambda datastore, column: column.full_data_type,
"pass_value": "numeric(10, 2)",
},
{
"evaluation": lambda datastore, column: column.name,
"pass_value": "dollar_amount",
},
{
"evaluation": lambda datastore, column: column.is_nullable,
"pass_value": True,
},
]
},
]
| 2.21875 | 2 |
main/eventlog/template.py | RoastVeg/cports | 46 | 12757906 | pkgname = "eventlog"
pkgver = "0.2.13"
pkgrel = 0
_commit = "a5c19163ba131f79452c6dfe4e31c2b4ce4be741"
build_style = "gnu_configure"
hostmakedepends = ["pkgconf", "automake", "libtool"]
pkgdesc = "API to format and send structured log messages"
maintainer = "q66 <<EMAIL>>"
license = "BSD-3-Clause"
url = "https://github.com/balabit/eventlog"
source = f"{url}/archive/{_commit}.tar.gz"
sha256 = "ddd8c19cf70adced542eeb067df275cb2c0d37a5efe1ba9123102eb9b4967c7b"
def pre_configure(self):
self.do("autoreconf", "-if")
def post_install(self):
self.install_license("COPYING")
@subpackage("eventlog-devel")
def _devel(self):
return self.default_devel()
| 1.625 | 2 |
nucypher/examples/finnegans_wake_demo/finnegans-wake-concise-demo.py | kanzeparov/NuCypher | 0 | 12757907 | <gh_stars>0
import datetime
import os
import shutil
import sys
import json
import struct
from time import sleep
import maya
from twisted.logger import globalLogPublisher
from nucypher.characters.lawful import Alice, Bob, Ursula
from nucypher.data_sources import DataSource as Enrico
from nucypher.network.middleware import RestMiddleware
from nucypher.utilities.logging import simpleObserver
from umbral.keys import UmbralPublicKey
from werkzeug.utils import secure_filename
from flask import Flask
from flask import request, render_template, redirect, url_for
UPLOAD_FOLDER = './static/uploads'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
import re
import json
import ipfsapi
######################
# Boring setup stuff #
######################
# Execute the download script (download_finnegans_wake.sh) to retrieve the book
BOOK_PATH = os.path.join('.', 'finnegans-wake.txt')
# Twisted Logger
globalLogPublisher.addObserver(simpleObserver)
# Temporary file storage
TEMP_FILES_DIR = "{}/examples-runtime-cruft".format(os.path.dirname(os.path.abspath(__file__)))
TEMP_DEMO_DIR = "{}/finnegans-wake-demo".format(TEMP_FILES_DIR)
TEMP_CERTIFICATE_DIR = "{}/certs".format(TEMP_DEMO_DIR)
# Remove previous demo files and create new ones
shutil.rmtree(TEMP_FILES_DIR, ignore_errors=True)
os.mkdir(TEMP_FILES_DIR)
os.mkdir(TEMP_DEMO_DIR)
os.mkdir(TEMP_CERTIFICATE_DIR)
#######################################
# Finnegan's Wake on NuCypher Testnet #
# (will fail with bad connection) #####
#######################################
TESTNET_LOAD_BALANCER = "eu-federated-balancer-40be4480ec380cd7.elb.eu-central-1.amazonaws.com"
##############################################
# Ursula, the Untrusted Re-Encryption Proxy #
##############################################
ursula = Ursula.from_seed_and_stake_info(host=TESTNET_LOAD_BALANCER,
certificates_directory=TEMP_CERTIFICATE_DIR,
federated_only=True,
minimum_stake=0)
IPSF_CONN = ipfsapi.connect('127.0.0.1', 5001)
@app.route("/show")
def show():
BOB.join_policy(label, alices_pubkey_bytes_saved_for_posterity)
# Now that Bob has joined the Policy, let's show how Enrico the Encryptor
# can share data with the members of this Policy and then how Bob retrieves it.
with open(BOOK_PATH, 'rb') as file:
finnegans_wake = file.readlines()
for counter, plaintext in enumerate(finnegans_wake):
#########################
# Enrico, the Encryptor #
#########################
enciro = Enrico(policy_pubkey_enc=policy.public_key)
# In this case, the plaintext is a
# single passage from <NAME>'s Finnegan's Wake.
# The matter of whether encryption makes the passage more or less readable
# is left to the reader to determine.
single_passage_ciphertext, _signature = enciro.encapsulate_single_message(plaintext)
data_source_public_key = bytes(enciro.stamp)
del enciro
###############
# Back to Bob #
###############
enrico_as_understood_by_bob = Enrico.from_public_keys(
policy_public_key=policy.public_key,
datasource_public_key=data_source_public_key,
label=label
)
# Now Bob can retrieve the original message.
alice_pubkey_restored_from_ancient_scroll = UmbralPublicKey.from_bytes(alices_pubkey_bytes_saved_for_posterity)
delivered_cleartexts = BOB.retrieve(message_kit=single_passage_ciphertext,
data_source=enrico_as_understood_by_bob,
alice_verifying_key=alice_pubkey_restored_from_ancient_scroll)
# We show that indeed this is the passage originally encrypted by Enrico.
assert plaintext == delivered_cleartexts[0]
#print("Retrieved: {}".format(delivered_cleartexts[0]))
@app.route("/share1")
def sharepage1():
return render_template('share1.html')
@app.route("/upload", methods=['POST'])
def uploadfile():
file = request.files['file']
basedir = os.path.abspath(os.path.dirname(__file__))
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(basedir, app.config['UPLOAD_FOLDER'], filename))
document = IPSF_CONN.add(basedir + '/' + app.config['UPLOAD_FOLDER'] + '/' + filename)
print (document)
json_file = basedir + "/alice.json"
with open(json_file, mode='r') as feedsjson:
feeds = json.load(feedsjson)
with open(json_file, mode='w') as feedsjson:
json.dump([], feedsjson)
with open(json_file, mode='w') as feedsjson:
feeds["Docs"].append(document)
json.dump(feeds, feedsjson)
feedsjson.close()
with open(json_file, mode='r') as feedsjson:
docs = json.load(feedsjson)
login = "Alice"
length = len(docs["Docs"])
return render_template("main.html", login=login, docs=docs, length=length)
@app.route("/share", methods=['POST'])
def sharepage():
hesh = request.form['Hash']
name = request.form['Name']
return render_template('share.html', hesh=hesh, name=name)
@app.route("/sharedoc", methods=['POST'])
def sharedoc():
hesh = request.form['Hash']
name = request.form['Name']
basedir = os.path.abspath(os.path.dirname(__file__))
share_file = basedir + "/share.json"
document = {'Hash': hesh, 'Name': name}
with open(share_file, mode='r') as feedsjson:
feeds = json.load(feedsjson)
with open(share_file, mode='w') as feedsjson:
json.dump([], feedsjson)
with open(share_file, mode='w') as feedsjson:
feeds["Docs"].append(document)
json.dump(feeds, feedsjson)
feedsjson.close()
json_file = basedir + "/alice.json"
with open(json_file, mode='r') as feedsjson:
docs = json.load(feedsjson)
login = "Alice"
length = 1
return render_template('main.html', login=login, docs=docs, length=length)
@app.route("/addnewdoc")
def addnewdoc():
return render_template('addnewdoc.html')
@app.route("/main", methods=['POST'])
def mainpage():
login = request.form['login']
basedir = os.path.abspath(os.path.dirname(__file__))
json_file = basedir + "/alice.json"
share_file = basedir + "/share.json"
if login == "Alice":
with open(json_file, mode='r') as feedsjson:
docs = json.load(feedsjson)
else:
#with open(share_file, mode='r') as feedsjson:
# docs = json.load(feedsjson)
# Here are our Policy details.
json_string = """"""
policy_end_datetime = maya.now() + datetime.timedelta(days=5)
m, n = 2, 3
label = b"secret/files/and/stuff"
######################################
# Alice, the Authority of the Policy #
######################################
ALICE = Alice(network_middleware=RestMiddleware(),
known_nodes=[ursula],
learn_on_same_thread=True,
federated_only=True,
known_certificates_dir=TEMP_CERTIFICATE_DIR)
BOB = Bob(known_nodes=[ursula],
network_middleware=RestMiddleware(),
federated_only=True,
start_learning_now=True,
learn_on_same_thread=True,
known_certificates_dir=TEMP_CERTIFICATE_DIR)
ALICE.start_learning_loop(now=True)
policy = ALICE.grant(BOB,
label,
m=m, n=n,
expiration=policy_end_datetime)
# Alice puts her public key somewhere for Bob to find later...
alices_pubkey_bytes_saved_for_posterity = bytes(ALICE.stamp)
# ...and then disappears from the internet.
del ALICE
BOB.join_policy(label, alices_pubkey_bytes_saved_for_posterity)
# Now that Bob has joined the Policy, let's show how Enrico the Encryptor
# can share data with the members of this Policy and then how Bob retrieves it.
with open(share_file, 'rb') as file:
finnegans_wake = file.readlines()
for counter, plaintext in enumerate(finnegans_wake):
#########################
# Enrico, the Encryptor #
#########################
enciro = Enrico(policy_pubkey_enc=policy.public_key)
# In this case, the plaintext is a
# single passage from <NAME>'s Finnegan's Wake.
# The matter of whether encryption makes the passage more or less readable
# is left to the reader to determine.
single_passage_ciphertext, _signature = enciro.encapsulate_single_message(plaintext)
data_source_public_key = bytes(enciro.stamp)
del enciro
###############
# Back to Bob #
###############
enrico_as_understood_by_bob = Enrico.from_public_keys(
policy_public_key=policy.public_key,
datasource_public_key=data_source_public_key,
label=label
)
# Now Bob can retrieve the original message.
alice_pubkey_restored_from_ancient_scroll = UmbralPublicKey.from_bytes(alices_pubkey_bytes_saved_for_posterity)
delivered_cleartexts = BOB.retrieve(message_kit=single_passage_ciphertext,
data_source=enrico_as_understood_by_bob,
alice_verifying_key=alice_pubkey_restored_from_ancient_scroll)
# We show that indeed this is the passage originally encrypted by Enrico.
assert plaintext == delivered_cleartexts[0]
json_string = json_string + format(delivered_cleartexts[0])
#Replace excess chars from json-string after re-encrypt (or after update json-file)
json_string1 = re.sub(r'\s+', ' ', json_string)
json_string1 = re.sub(r'$|\t|\n|\r', '', json_string1)
print(json_string1.replace('b\'', '').replace('\'', '"'))
jso = json_string1.replace('b\'', '').replace('}\'', '}')
#Convert string to json & render a template
docs = json.loads(jso)
length = len(docs["Docs"])
return render_template('main.html', login=login, docs=docs, length=length)
@app.route("/")
def startpage():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0")
| 1.8125 | 2 |
lightcurve/utils.py | codacy-badger/lightcurve | 7 | 12757908 | <gh_stars>1-10
"""General purpose utility functions"""
import os
__all__ = ['expand_refname',
'enlarge',
'is_uniq']
#-------------------------------------------------------------------------------
def expand_refname(refname):
'''Expand header reference file name to full path if $ is present.
Parameters
----------
refname, str
reference file name
Returns
-------
reffile, str
expanded full path to reference file
'''
if '$' in refname:
refpath, reffile = refname.split('$')
try:
reffile = os.path.join(os.environ[refpath], reffile)
except KeyError:
pass
else:
refpath = './'
reffile = refname
return reffile
#-------------------------------------------------------------------------------
def enlarge(a, x=2, y=None):
"""Enlarges 2D image array a using simple pixel repetition in both dimensions.
Enlarges by factor x horizontally and factor y vertically.
If y is left as None, uses factor x for both dimensions."""
assert a.ndim == 2
if y == None:
y = x
for factor in (x, y):
assert factor.__class__ == int
assert factor > 0
return a.repeat(y, axis=0).repeat(x, axis=1)
#-------------------------------------------------------------------------------
def is_uniq(values):
""" Check if input items are unique
Parameters
----------
values : set
set of all values
Returns
-------
True/False, MULTI/unique value
"""
if len(values) == 0:
return True, ''
elif len(values) == 1:
return True, list(values)[0]
else:
return False, 'MULTI'
#-------------------------------------------------------------------------------
| 3 | 3 |
scripts/gen_sql.py | 2kranki/genapp | 5 | 12757909 | #!/usr/bin/env python3
# vi:nu:et:sts=4 ts=4 sw=4
""" Generate SQL Applications for all the Test01 Input Data
Test01 Input Data has test data for each SQL Server type supported
by genapp so that it can be properly tested. This program scans
./misc/ for all the test01 application definitions and generates
them. Included in the generation process is the generation of
Jenkins support for building, testing and deploying to Docker Hub
each of the applications.
To simplify things, this script must be self-contained using only
the standard Python Library.
TODO:
- Finish jenkins/build generation
- Finish jenkins/deploy generation
- Finish jenkins/push generation
- Finish jenkins/test generation
- Finish jenkinsfile generation
- Finish application generation
"""
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import subprocess
import sys
sys.path.insert(0, './scripts')
import util # pylint: disable=wrong-import-position
################################################################################
# Object Classes and Functions
################################################################################
class Main(util.MainBase):
""" Main Command Execution Class
"""
def __init__(self):
super().__init__()
self.test_suffixes = ['01ma', '01ms', '01my', '01pg', '01sq']
self.genapp_name = 'genapp'
def arg_parse_add(self):
""" Add additional arguments.
"""
self.arg_prs.add_argument('-b', '--build', action='store_false', dest='flg_build',
default=True, help='Do not build genapp before using it'
)
self.arg_prs.add_argument('--appdir', action='store', dest='app_dir',
default='/tmp', help='Set Application Base Directory'
)
self.arg_prs.add_argument('--appname', action='store', dest='app_name',
default='app01', help='Set Application Base Name'
)
self.arg_prs.add_argument('--bindir', action='store', dest='bin_dir',
default='/tmp/bin', help='Set Binary Directory'
)
self.arg_prs.add_argument('--srcdir', action='store', dest='src_dir',
default='./cmd', help='Set genapp Source Directory'
)
self.arg_prs.add_argument('--mdldir', action='store', dest='mdl_dir',
default='./models', help='Set genapp Model Directory'
)
self.arg_prs.add_argument('--mscdir', action='store', dest='msc_dir',
default='./misc', help='Set genapp Misc Directory'
)
self.arg_prs.add_argument('--tstdir', action='store', dest='tst_dir',
default='./misc', help='Set genapp Test Directory'
)
def arg_parse_exec(self):
""" Execute the argument parsing.
Warning - Main should override this method if additional cli
arguments are needed or argparse needs some form of modification
before execution.
"""
self.arg_parse_setup()
self.arg_parse_add()
self.arg_parse_parse()
self.args.app_path = os.path.join(self.args.bin_dir, self.genapp_name)
def build(self):
""" Build the Golang program, genapp.
"""
try:
src_path = os.path.join(self.args.src_dir, self.genapp_name, '*.go')
if self.args.flg_debug:
print("\tapp_path:", self.args.app_path)
print("\tsrc_path:", src_path)
cmd = 'go build -o "{0}" -v -race {1}'.format(self.args.app_path, src_path)
if not self.args.flg_exec:
print("\tWould have executed:", cmd)
self.result_code = 0
else:
if not os.path.exists(self.args.bin_dir):
if self.args.flg_debug:
print("\tCreating dir:", self.args.bin_dir)
os.makedirs(self.args.bin_dir, 0o777)
print("\tExecuting:", cmd)
self.result_code = subprocess.call(cmd, shell=True)
except: # pylint: disable=bare-except
self.result_code = 8
def exec_pgm(self): # pylint: disable=no-self-use
""" Program Execution
Warning - Main should override this method and make certain that
it returns an exit code in self.result_code.
"""
if len(self.args.args) > 0:
print("ERROR - too many command arguments!")
self.arg_prs.print_help()
self.result_code = 0
return
if self.args.flg_debug:
print('\tsrc_dir:', self.args.src_dir)
# Set up base objects, files and directories.
if not os.path.exists(self.args.app_dir):
print("\tCreating Directory:", self.args.app_dir)
if self.args.flg_exec:
os.makedirs(self.args.app_dir)
else:
print("\tWould have executed: mkdir -p", self.args.app_dir)
# Perform the specified actions.
try:
# Build genapp if needed.
if self.args.flg_build:
print("\tBuilding genapp...")
self.build()
# Generate the application subdirectories.
for suffix in self.test_suffixes:
print("\tCreating app for app{0}...".format(suffix))
self.genapp("test{0}.exec.json.txt".format(suffix))
if self.result_code != 0:
break
finally:
pass
print()
def genapp(self, file_name):
""" Generate a test application.
:arg szExecFileName: Exec JSON file name which is expected
to be in the szMiscDir.
:arg szOutPath: path to write the output to.
"""
exec_path = os.path.join(self.args.msc_dir, file_name)
app_path = os.path.join(self.args.bin_dir, self.genapp_name)
cmd = '"{0}" --mdldir {1} -x {2}'.format(app_path, self.args.mdl_dir, exec_path)
try:
self.result_code = 0
if self.args.flg_exec:
print("\tExecuting:", cmd)
os.system(cmd)
else:
print("\tWould have executed:", cmd)
except: # pylint: disable=bare-except
self.result_code = 4
################################################################################
# Command-line interface
################################################################################
if __name__ == '__main__':
Main().run()
| 1.453125 | 1 |
nbviewer/providers/local/handlers.py | SylvainCorlay/nbviewer | 0 | 12757910 | <gh_stars>0
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import errno
import io
import os
from datetime import datetime
from tornado import (
gen,
web,
iostream,
)
from tornado.log import app_log
from ...utils import url_path_join
from ..base import (
cached,
RenderingHandler,
)
class LocalFileHandler(RenderingHandler):
"""Renderer for /localfile
Serving notebooks from the local filesystem
"""
# cache key is full uri to avoid mixing download vs view paths
_cache_key_attr = 'uri'
@property
def localfile_path(self):
return os.path.abspath(self.settings.get('localfile_path', ''))
@gen.coroutine
def download(self, abspath):
"""Download the file at the given absolute path.
Parameters
==========
abspath: str
Absolute path to the file
"""
filename = os.path.basename(abspath)
st = os.stat(abspath)
self.set_header('Content-Length', st.st_size)
self.set_header('Content-Disposition',
'attachment; filename={};'.format(filename))
content = web.StaticFileHandler.get_content(abspath)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
@cached
@gen.coroutine
def get(self, path):
"""Get a directory listing, rendered notebook, or raw file
at the given path based on the type and URL query parameters.
If the path points to an accessible directory, render its contents.
If the path points to an accessible notebook file, render it.
If the path points to an accessible file and the URL contains a
'download' query parameter, respond with the file as a download.
Parameters
==========
path: str
Local filesystem path
"""
abspath = os.path.abspath(os.path.join(
self.localfile_path,
path
))
if not abspath.startswith(self.localfile_path):
app_log.warn("directory traversal attempt: '%s'" %
self.localfile_path)
raise web.HTTPError(404)
if not os.path.exists(abspath):
raise web.HTTPError(404)
if os.path.isdir(abspath):
html = self.show_dir(abspath, path)
raise gen.Return(self.cache_and_finish(html))
is_download = self.get_query_arguments('download')
if is_download:
self.download(abspath)
return
try:
with io.open(abspath, encoding='utf-8') as f:
nbdata = f.read()
except IOError as ex:
if ex.errno == errno.EACCES:
# py2/3: can't read the file, so don't give away it exists
raise web.HTTPError(404)
raise ex
yield self.finish_notebook(nbdata,
download_url='?download',
msg="file from localfile: %s" % path,
public=False,
format=self.format,
request=self.request)
def show_dir(self, abspath, path):
"""Render the directory view template for a given filesystem path.
Parameters
==========
abspath: string
Absolute path on disk to show
path: string
URL path equating to the path on disk
Returns
=======
str
Rendered HTML
"""
base_url = '/localfile'
breadcrumbs = [{
'url': url_path_join(self.base_url, base_url),
'name': 'home'
}]
breadcrumbs.extend(self.breadcrumbs(path, base_url))
entries = []
dirs = []
ipynbs = []
try:
contents = os.listdir(abspath)
except IOError as ex:
if ex.errno == errno.EACCES:
# py2/3: can't access the dir, so don't give away its presence
raise web.HTTPError(404)
for f in contents:
absf = os.path.join(abspath, f)
entry = {}
entry['name'] = f
# skip hidden or "hidden" files
if f.startswith('.') or f.startswith('_'):
continue
elif os.path.isdir(absf):
if not os.access(absf, os.X_OK | os.R_OK):
# skip directories we cannot visit
continue
st = os.stat(absf)
dt = datetime.utcfromtimestamp(st.st_mtime)
entry['modtime'] = dt.strftime('%Y-%m-%d %H:%M:%S')
entry['url'] = url_path_join(base_url, path, f)
entry['class'] = 'fa fa-folder-open'
dirs.append(entry)
elif f.endswith('.ipynb'):
if not os.access(absf, os.R_OK):
# skip files we cannot read
continue
st = os.stat(absf)
dt = datetime.utcfromtimestamp(st.st_mtime)
entry['modtime'] = dt.strftime('%Y-%m-%d %H:%M:%S')
entry['url'] = url_path_join(base_url, path, f)
entry['class'] = 'fa fa-book'
ipynbs.append(entry)
dirs.sort(key=lambda e: e['name'])
ipynbs.sort(key=lambda e: e['name'])
entries.extend(dirs)
entries.extend(ipynbs)
html = self.render_template('dirview.html',
entries=entries,
breadcrumbs=breadcrumbs)
return html
| 2.140625 | 2 |
apps/banks/models.py | KarpovDenis74/banking_analytics | 0 | 12757911 | from statistics import mode
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import UniqueConstraint
from django.db.models.deletion import CASCADE
User = get_user_model()
class Bank(models.Model):
long_name = models.CharField(
verbose_name="Официальное польное наименование",
max_length=1000,
)
short_name = models.CharField(
verbose_name="Официальное сокращенное наименование",
max_length=500,
)
bic = models.CharField(
max_length=9,
verbose_name="Код BIC кредитной организации",
)
reg_date = models.DateField(
verbose_name="Дата регистрации кредитной организации в ЦБ",
)
name = models.CharField(
verbose_name="Наименование кредитной организации",
max_length=256,
)
ogrn = models.CharField(
verbose_name="Основной государственный регистрационный номер",
max_length=256,
)
reg_number = models.CharField(
verbose_name="Регистрационный номер кредитной организации в ЦБ",
max_length=256,
)
internal_number = models.CharField(
verbose_name="Внутренний номер кредитной организации в ЦБ",
max_length=256,
)
cregnr = models.CharField(
verbose_name="Дополнительный регистрационный номер в ЦБ",
max_length=256,
blank=True
)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class Region(models.Model):
name = models.CharField(
verbose_name='Название региона',
max_length=1000,
)
code = models.PositiveSmallIntegerField(
verbose_name='Код региона',
unique=True
)
def __str__(self):
return f'{self.code} - {self.name}'
class Meta:
ordering = ['name']
class BalanceAccount(models.Model):
# Например, 202 или 20202
indCode = models.CharField(
verbose_name='Номер счета',
max_length=30,
unique=True
)
name = models.CharField(
verbose_name='Название счетов баланса',
max_length=1000,
)
# Порядок балансового счета (1 или 2)
indType = models.CharField(
verbose_name='Код счета',
max_length=30,
)
# Например, Раздел - "А"
indChapter = models.CharField(
verbose_name='Код раздела',
max_length=30,
)
def __str__(self):
return f'{self.name}'
class Meta:
ordering = ['indCode']
class BanksBalance(models.Model):
date = models.DateField(
verbose_name="Балансовые данные на дату",
)
bank = models.ForeignKey(Bank,
verbose_name="Банк",
on_delete=CASCADE,
blank=False,
)
# Например, 202 или 20202
indCode = models.ForeignKey(BalanceAccount,
verbose_name="Номер счета",
on_delete=CASCADE,
blank=False,
)
rub_balance = models.IntegerField(
verbose_name='Остаток в рублях на дату',
)
cur_balance = models.IntegerField(
verbose_name='Остаток в валюте на дату',
)
itog_balance = models.IntegerField(
verbose_name='Итоговый остаток в рублях и в валюте на дату',
)
ora = models.IntegerField(
verbose_name='Оборот в рублях по дебиту',
)
ova = models.IntegerField(
verbose_name='Оборот в валюте по дебиту',
)
oitga = models.IntegerField(
verbose_name='Итоговый оборот в рублях и в валюте по дебету',
)
orp = models.IntegerField(
verbose_name='Оборот в рублях по кредиту',
)
ovp = models.IntegerField(
verbose_name='Оборот в валюте по кредиту',
)
oitgp = models.IntegerField(
verbose_name='Итоговый оборот в рублях и в валюте по кредиту',
)
def __str__(self):
return f'{self.value} : {self.indCode} {self.itog_balance}'
class Meta:
UniqueConstraint(fields=['date', 'indCode', 'bank'],
name='unique_date_indCode_bank')
verbose_name = 'Остатки на дату и обороты за предыдущий период'
verbose_name_plural = 'Остатки на дату и обороты за предыдущий период'
ordering = ['-date']
| 2.234375 | 2 |
telepybot/modules/echo.py | anttilip/telepybot | 1 | 12757912 | """Echoes everything you say.
Usage:
/echo
/echo Hi!
Type 'cancel' to stop echoing.
"""
def handle_update(bot, update, update_queue, **kwargs):
"""Echo messages that user sends.
This is the main function that modulehander calls.
Args:
bot (telegram.Bot): Telegram bot itself
update (telegram.Update): Update that will be processed
update_queue (Queue): Queue containing all incoming and unhandled updates
kwargs: All unused keyword arguments. See more from python-telegram-bot
"""
try:
# e.g. message is "/echo I'm talking to a bot!"
text = update.message.text.split(' ', 1)[1]
except IndexError:
# e.g message is just "/echo"
text = "What do you want me to echo?"
bot.sendMessage(chat_id=update.message.chat_id, text=text)
# If module is more conversational, it can utilize the update_queue
while True:
update = update_queue.get()
if update.message.text == "":
text = "Couldn't echo that"
bot.sendMessage(chat_id=update.message.chat_id, text=text)
elif update.message.text.lower() == "cancel":
text = "Ok, I'll stop echoing..."
bot.sendMessage(chat_id=update.message.chat_id, text=text)
break
elif update.message.text.startswith('/'):
# User accesses another bot
update_queue.put(update)
break
else:
bot.sendMessage(
chat_id=update.message.chat_id, text=update.message.text)
| 3.140625 | 3 |
test_install.py | flothesof/sfepy | 510 | 12757913 | <filename>test_install.py
#!/usr/bin/env python
"""
Simple script for testing various SfePy functionality, examples not
covered by tests, and running the tests.
The script just runs the commands specified in its main() using the
`subprocess` module, captures the output and compares one or more key
words to the expected ones.
The output of failed commands is saved to 'test_install.log' file.
"""
from __future__ import print_function
from __future__ import absolute_import
import time
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import shlex
import subprocess
import logging
import re
DEBUG_FMT = '*' * 55 + '\n%s\n' + '*' * 55
def _get_logger(filename='test_install.log'):
"""
Convenience function to set-up output and logging.
"""
logger = logging.getLogger('test_install.py')
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
file_handler = logging.FileHandler(filename)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
logger = _get_logger()
def check_output(cmd):
"""
Run the specified command and capture its outputs.
Returns
-------
out : tuple
The (stdout, stderr) output tuple.
"""
logger.info(cmd)
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = [ii.decode() for ii in p.communicate()]
return out
def report(out, name, line, item, value, eps=None, return_item=False,
match_numbers=False):
"""
Check that `item` at `line` of the output string `out` is equal
to `value`. If not, print the output.
"""
try:
if match_numbers:
status = out.split('\n')[line]
else:
status = out.split('\n')[line].split()
except IndexError:
logger.error(' not enough output from command!')
ok = False
else:
try:
if match_numbers:
pat = '([-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?[jJ]?)'
matches = re.findall(pat, status)
status_item = matches[item]
else:
status_item = status[item]
logger.info(' comparing: %s %s', status_item, value)
if eps is None:
ok = (status_item == value)
else:
try:
ok = abs(float(status_item) - float(value)) < eps
except:
ok = False
except IndexError:
ok = False
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, status[item]
else:
return ok
def report2(out, name, items, return_item=False):
"""
Check that `items` are in the output string `out`.
If not, print the output.
"""
ok = True
for s in items:
logger.info(' checking: %s', s)
if s not in out:
ok = False
break
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, s
else:
return ok
def report_tests(out, return_item=False):
"""
Check that all tests in the output string `out` passed.
If not, print the output.
"""
search = re.compile('([0-9]+) test file\(s\) executed in ([0-9.]+) s, ([0-9]+) failure\(s\) of ([0-9]+) test\(s\)').search
try:
stats = search(out).groups()
except AttributeError:
stats = '0', '0', '-1', '0'
ok = False
ok = stats[2] == '0'
logger.info(' %s test file(s) executed in %s s, %s failure(s) of %s test(s)'
% (stats[0], stats[1], stats[2], stats[3]))
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, stats[2]
else:
return ok
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.parse_args()
fd = open('test_install.log', 'w')
fd.close()
if sys.version_info[0] < 3:
cmd = 'python2'
else:
cmd = 'python3'
eok = 0
t0 = time.time()
out, err = check_output('%s ./script/blockgen.py' % cmd)
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('%s ./script/cylindergen.py' % cmd)
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('%s ./script/convert_mesh.py meshes/3d/cylinder.vtk out.mesh' % cmd)
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('%s ./script/tile_periodic_mesh.py -r 2,2 meshes/elements/2_4_2.mesh out-per.mesh' % cmd)
eok += report(out, '...', -2, 1, 'done.')
out, err = check_output('%s ./script/extract_surface.py meshes/various_formats/octahedron.node -' % cmd)
eok += report(out, '...', -2, 0, '1185')
out, err = check_output('%s ./simple.py examples/diffusion/poisson.py' % cmd)
eok += report(out, '...', -3, 5, '1.173819e-16', eps=1e-15)
out, err = check_output("""%s ./simple.py -c "ebc_2 : {'name' : 't2', 'region' : 'Gamma_Right', 'dofs' : {'t.0' : -5.0}}" examples/diffusion/poisson.py""" %cmd)
eok += report(out, '...', -3, 5, '2.308051e-16', eps=1e-15)
out, err = check_output('%s ./simple.py examples/diffusion/poisson_iga.py' % cmd)
eok += report(out, '...', -3, 5, '3.373487e-15', eps=1e-14)
out, err = check_output('%s ./simple.py examples/navier_stokes/stokes.py' % cmd)
eok += report(out, '...', -3, 5, '1.210678e-13', eps=1e-11)
out, err = check_output('%s ./simple.py examples/diffusion/poisson_parametric_study.py' % cmd)
eok += report(out, '...', -3, 5, '1.606408e-14', eps=1e-13)
out, err = check_output('%s ./simple.py examples/linear_elasticity/its2D_3.py' % cmd)
eok += report(out, '...', -24, 5, '3.964886e-12', eps=1e-11)
eok += report(out, '...', -4, 4, '2.58660e+01', eps=1e-5)
out, err = check_output('%s ./simple.py examples/linear_elasticity/linear_elastic.py --format h5' % cmd)
eok += report(out, '...', -3, 5, '4.638192e-18', eps=1e-15)
out, err = check_output('%s ./extractor.py -d cylinder.h5' % cmd)
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('%s ./postproc.py -n --no-offscreen -o cylinder.png cylinder.h5' % cmd)
eok += report(out, '...', -3, 2, 'cylinder.png...')
out, err = check_output('%s ./phonon.py examples/phononic/band_gaps.py' % cmd)
eok += report(out, '...', -9, 0, '2.08545116e+08', match_numbers=True)
eok += report(out, '...', -8, 1, '1.16309223e+11', match_numbers=True)
out, err = check_output('%s ./phonon.py examples/phononic/band_gaps.py --phase-velocity' % cmd)
eok += report(out, '...', -2, 0, '4189.41229592', match_numbers=True)
eok += report(out, '...', -2, 1, '2620.55608256', match_numbers=True)
out, err = check_output('%s ./phonon.py examples/phononic/band_gaps.py -d' % cmd)
eok += report(out, '...', -6, 1, '[0,')
out, err = check_output('%s ./phonon.py examples/phononic/band_gaps_rigid.py' % cmd)
eok += report(out, '...', -9, 0, '4.58709531e+07', match_numbers=True)
eok += report(out, '...', -8, 1, '1.13929200e+11', match_numbers=True)
out, err = check_output('%s ./simple.py examples/quantum/hydrogen.py' % cmd)
eok += report(out, '...', -2, -2, '-0.01913506', eps=1e-4)
out, err = check_output('%s ./homogen.py examples/homogenization/perfusion_micro.py' % cmd)
eok += report2(out, '...', ['computing EpA', 'computing PA_3',
'computing GA', 'computing EmA',
'computing KA'])
out, err = check_output('%s examples/homogenization/rs_correctors.py -n' % cmd)
eok += report(out, '...', -2, -1, '1.644e-01', match_numbers=True)
out, err = check_output('%s examples/large_deformation/compare_elastic_materials.py -n' % cmd)
eok += report(out, '...', -3, 5, '1.068759e-14', eps=1e-13)
out, err = check_output('%s examples/linear_elasticity/linear_elastic_interactive.py' % cmd)
eok += report(out, '...', -16, 0, '1.62128841139e-14', eps=1e-13)
out, err = check_output('%s examples/linear_elasticity/modal_analysis.py' % cmd)
eok += report(out, '...', -12, 5, '12142.11470773', eps=1e-13)
out, err = check_output('%s examples/multi_physics/thermal_electric.py' % cmd)
eok += report(out, '...', -4, 5, '2.612933e-14', eps=1e-13)
out, err = check_output('%s examples/diffusion/laplace_refine_interactive.py output' % cmd)
eok += report(out, '...', -3, 5, '2.675866e-15', eps=1e-13)
out, err = check_output('%s examples/diffusion/laplace_iga_interactive.py -o output-tests' % cmd)
eok += report(out, '...', -3, 5, '1.028134e-13', eps=1e-12)
out, err = check_output('%s examples/dg/imperative_burgers_1D.py -o output-tests' % cmd)
eok += report(out, '...', -3, 3, 'moment_1D_limiter')
out, err = check_output('mpiexec -n 2 %s examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --silent -ksp_monitor' % cmd)
eok += report(out, '...', -2, 4, '8.021313824020e-07', eps=1e-6)
out, err = check_output('mpiexec -n 2 %s examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --silent -ksp_monitor' % cmd)
eok += report(out, '...', -2, 4, '3.787214380277e-09', eps=1e-8)
t1 = time.time()
out, err = check_output('%s ./run_tests.py' % cmd)
tok, failed = report_tests(out, return_item=True)
tok = {True : 'ok', False : 'fail'}[tok]
t2 = time.time()
fd = open('test_install_times.log', 'a+')
fd.write('%s: examples: %.2f [s] (%d), tests: %.2f [s] (%s: %s)\n'
% (time.ctime(t0), t1 - t0, eok, t2 - t1, tok, failed))
fd.close()
if __name__ == '__main__':
main()
| 2.890625 | 3 |
kanmail/server/mail/util.py | vilhelmprytz/Kanmail | 0 | 12757914 | <gh_stars>0
import email.header
import quopri
import re
from base64 import b64decode
from binascii import Error as BinasciiError
from markdown import markdown
from mdx_linkify.mdx_linkify import LinkifyExtension
from kanmail.log import logger
from .contacts import add_contacts
def markdownify(text, linkify=True):
extensions = [
'markdown.extensions.extra',
'markdown.extensions.nl2br', # turn newlines into breaks
'markdown.extensions.sane_lists',
]
if linkify:
extensions.append(LinkifyExtension())
return markdown(text, extensions)
def format_address(address):
bits = []
if address.mailbox:
bits.append(decode_string(address.mailbox))
if address.host:
bits.append(decode_string(address.host))
return '@'.join(bits)
def make_contact_tuple(address):
name = decode_header(address.name) if address.name else None
email = format_address(address)
return (name, email)
def make_contact_tuples(addresses):
if not addresses:
return []
return [make_contact_tuple(address) for address in addresses]
def make_email_headers(account, folder, uid, data, parts, save_contacts=True):
# Parse references header into list of reference message IDs
headers = extract_headers(
data[b'BODY[HEADER.FIELDS (REFERENCES CONTENT-TRANSFER-ENCODING)]'],
)
references = headers.get('References')
if references:
references = references.split()
# This is a fix for some badly build email clients that join message ID
# references by comma, rather than the standard space *rolls eyes*.
if len(references) == 1 and '>,<' in references[0]:
references = references[0].split(',')
body_meta = None
if '1' in parts:
body_meta = parts['1']
elif '1.1' in parts:
body_meta = parts['1.1']
if not body_meta:
content_transfer_encoding = headers.get('Content-Transfer-Encoding')
if content_transfer_encoding:
body_meta = {
'encoding': content_transfer_encoding,
}
encoding = body_meta.get('encoding') if body_meta else None
# Attempt to extract an excerpt
excerpt = extract_excerpt(
data[b'BODY[1]<0>'],
body_meta,
)
# Make the summary dict!
envelope = data[b'ENVELOPE']
subject = decode_header(envelope.subject)
date = None
if envelope.date:
date = envelope.date.isoformat()
from_ = make_contact_tuples(envelope.from_)
to = make_contact_tuples(envelope.to)
send = make_contact_tuples(envelope.sender)
cc = make_contact_tuples(envelope.cc)
bcc = make_contact_tuples(envelope.bcc)
reply_to = make_contact_tuples(envelope.reply_to)
if save_contacts:
all_contacts = set((*from_, *to, *send, *cc, *bcc, *reply_to))
add_contacts(all_contacts)
return {
'uid': uid,
'seq': data[b'SEQ'],
'flags': data[b'FLAGS'],
'size': data[b'RFC822.SIZE'],
'excerpt': excerpt,
'content_encoding': encoding,
'parts': parts,
# Internal meta
'account_name': account.name,
'server_folder_name': folder.name,
'folder_name': folder.alias_name,
# Envelope data
'date': date,
'subject': subject,
# Address data
'from': from_,
'to': to,
'send': send,
'cc': cc,
'bcc': bcc,
'reply_to': reply_to,
# Threading
'in_reply_to': envelope.in_reply_to,
'message_id': envelope.message_id,
'references': references,
}
def decode_header(subject):
if subject is None:
return ''
bits = []
if isinstance(subject, bytes):
subject = decode_string(subject)
for output, encoding in email.header.decode_header(subject):
if encoding:
output = output.decode(encoding)
elif isinstance(output, bytes):
output = decode_string(output)
bits.append(output)
return ''.join(bits)
def decode_string(string, string_meta=None, as_str=True):
encoding = None
charset = None
if string_meta:
# Encoding *must* be provided
encoding = string_meta['encoding'].lower()
if 'charset' in string_meta:
charset = string_meta['charset'].lower()
# Remove any quoted printable stuff
if encoding == 'quoted-printable':
string = quopri.decodestring(string)
if encoding == 'base64':
try:
string = b64decode(string)
# Handle incomplete payloads (we only fetch the first 1024 bytes of a
# message). Split into lines and attempt to decode.
except BinasciiError:
string_bits = string.split()
valid_bits = []
for bit in string_bits:
try:
valid_bits.append(b64decode(bit))
except Exception:
pass
if not valid_bits:
raise
string = b'\n'.join(valid_bits)
if charset:
string = string.decode(charset, 'replace')
if as_str and isinstance(string, bytes):
string = string.decode('utf-8', 'replace')
return string
def _extract_excerpt(raw_body, raw_body_meta):
# Decode the body first
raw_body = decode_string(raw_body, raw_body_meta)
# Remove any style tags *and* content
raw_body = re.sub(r'<style.*>.*(?:</style>)?', '', raw_body, flags=re.DOTALL)
# Remove any other tags
raw_body = re.sub(r'<.*?>', '', raw_body)
# Remove any tag starts (ie <thing ... with no end due to cutoff)
raw_body = re.sub(r'<[^>]*', '', raw_body)
lines = []
for line in raw_body.splitlines():
line = line.strip()
if not line:
continue
if line[0] in ('#', '-'):
continue
if re.match(r'^Content-[A-Za-z\-]+:', line):
continue
if line in lines: # remove duplicates (ie text+html versions)
continue
lines.append(line)
if not lines:
return
body = '\n'.join(lines)
return body
def extract_excerpt(raw_body, raw_body_meta):
try:
return _extract_excerpt(
raw_body,
raw_body_meta,
)
except Exception as e:
logger.warning((
'Could not extract excerpt: '
f'{e} (data={raw_body}, meta={raw_body_meta})'
))
def extract_headers(raw_message):
message = decode_string(raw_message)
parser = email.parser.HeaderParser()
return {
key: decode_header(header)
for key, header in parser.parsestr(message).items()
}
def _parse_bodystructure_list(items):
'''
Given a list of items ('KEY', 'value', 'OTHER_KEY', 'other_value'), returns
a dict representation. Handles nested lists.
'''
data = {}
for i in range(0, len(items), 2):
key = items[i]
if not isinstance(key, (str, bytes)):
continue
value = items[i + 1]
if isinstance(value, tuple):
value = _parse_bodystructure_list(value)
data[key.upper()] = value
return data
def _parse_bodystructure(bodystructure, item_number=None):
items = {}
type_or_bodies = bodystructure[0]
if isinstance(type_or_bodies, list):
for i, body in enumerate(type_or_bodies, 1):
if item_number:
nested_item_number = f'{item_number}.{i}'
else:
nested_item_number = f'{i}'
items.update(_parse_bodystructure(
body,
item_number=nested_item_number,
))
else:
subtype = decode_string(bodystructure[1])
encoding = decode_string(bodystructure[5])
size = bodystructure[6]
content_id = bodystructure[3]
if content_id:
content_id = decode_string(content_id)
content_id = content_id.strip('<>')
data = {
'type': decode_string(type_or_bodies),
'subtype': subtype,
'encoding': encoding,
'content_id': content_id,
'size': size,
}
extra_data = {}
if bodystructure[2]:
extra_data.update(_parse_bodystructure_list(bodystructure[2]))
for bit in bodystructure[7:]:
if isinstance(bit, tuple) and len(bit) > 1:
extra_data.update(_parse_bodystructure_list(bit))
if b'CHARSET' in extra_data:
data['charset'] = decode_string(extra_data[b'CHARSET'])
if b'NAME' in extra_data:
data['name'] = decode_string(extra_data[b'NAME'])
any_attachment_data = extra_data.get(b'ATTACHMENT') or extra_data.get(b'INLINE')
if any_attachment_data:
if b'FILENAME' in any_attachment_data:
data['name'] = decode_string(any_attachment_data[b'FILENAME'])
item_number = item_number or 1
items[item_number] = data
return items
def parse_bodystructure(bodystructure):
try:
items = _parse_bodystructure(bodystructure)
except Exception as e:
logger.warning(f'Could not parse bodystructure: {e} (struct={bodystructure})')
raise
# Attach shortcuts -> part IDs
items['attachments'] = []
for number, part in list(items.items()):
if number == 'attachments':
continue
if part['type'].upper() == 'TEXT':
subtype = part['subtype'].upper()
if 'html' not in items and subtype == 'HTML':
items['html'] = number
continue
if 'plain' not in items and subtype == 'PLAIN':
items['plain'] = number
continue
items['attachments'].append(number)
return items
| 2.140625 | 2 |
hyperbo/basics/linalg_test.py | google-research/hyperbo | 3 | 12757915 | # coding=utf-8
# Copyright 2022 HyperBO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for linalg.py."""
import copy
from absl.testing import absltest
from hyperbo.basics import linalg
import jax
from jax import random
import jax.numpy as jnp
import jax.scipy.linalg as jspla
import numpy as np
grad = jax.grad
def test_grad(fun, params, index, eps=1e-4, cached_cholesky=False):
key = random.PRNGKey(0)
key, subkey = random.split(key)
vec = random.normal(subkey, params[index].shape)
if index == 0:
vec = 0.5 * jnp.dot(vec.T, vec)
unitvec = vec / jnp.sqrt(jnp.vdot(vec, vec))
else:
unitvec = vec / jnp.sqrt(jnp.vdot(vec, vec))
params_copy = copy.deepcopy(params)
params_copy[index] += eps / 2. * unitvec
if cached_cholesky:
params_copy[2] = jspla.cholesky(params_copy[0], lower=True)
f1 = fun(*params_copy)
params_copy = copy.deepcopy(params)
params_copy[index] -= eps / 2. * unitvec
if cached_cholesky:
params_copy[2] = jspla.cholesky(params_copy[0], lower=True)
f2 = fun(*params_copy)
exact_grad_prod = jnp.vdot(grad(fun, index)(*params), unitvec)
return {'Numerical': (f1 - f2) / eps, 'Exact': exact_grad_prod}
class LinalgTest(absltest.TestCase):
def test_inverse_spdmatrix_vector_product(self):
np.random.seed(1)
dim = 10
noise = 1e-3
num_replicas = 10
def fun(spd_matrix, x):
return jnp.dot(x, linalg.inverse_spdmatrix_vector_product(spd_matrix, x))
def test_grad_at_index(index):
for _ in range(num_replicas):
matrix = np.random.randn(dim, dim)
spd_matrix = matrix.T.dot(matrix) + noise * np.eye(matrix.shape[0])
x = np.random.randn(dim)
params = [spd_matrix, x]
grads = test_grad(fun, params, index)
numerical_grad = grads['Numerical']
exact_grad = grads['Exact']
self.assertTrue(jnp.allclose(numerical_grad, exact_grad, rtol=1))
test_grad_at_index(0)
test_grad_at_index(1)
def test_inverse_spdmatrix_vector_product_cached_cholesky(self):
"""Tests if the gradient works when the Cholesky factor is given."""
np.random.seed(1)
dim = 10
noise = 1e-3
num_replicas = 10
def fun(spd_matrix, x, cached_cholesky):
return jnp.dot(
x,
linalg.inverse_spdmatrix_vector_product(
spd_matrix, x, cached_cholesky=cached_cholesky))
def test_grad_at_index(index):
for _ in range(num_replicas):
matrix = np.random.randn(dim, dim)
spd_matrix = matrix.T.dot(matrix) + noise * np.eye(matrix.shape[0])
chol_factor = jspla.cholesky(spd_matrix, lower=True)
x = np.random.randn(dim)
params = [spd_matrix, x, chol_factor]
grads = test_grad(fun, params, index, cached_cholesky=True)
numerical_grad = grads['Numerical']
exact_grad = grads['Exact']
print(numerical_grad, exact_grad)
self.assertTrue(jnp.allclose(numerical_grad, exact_grad, rtol=1))
test_grad_at_index(0)
test_grad_at_index(1)
if __name__ == '__main__':
absltest.main()
| 1.648438 | 2 |
examples/examples.py | YashaPushak/PCS | 1 | 12757916 | mport PCS
pcs = PCS.PCS('params-lkh.pcs')
with open('params-lkh-copy.pcs','w') as f_out:
f_out.write(pcs.printDocument())
pcs2 = PCS.PCS('params-cplex.pcs')
with open('params-cplex-copy.pcs','w') as f_out:
f_out.write(pcs2.printDocument())
print('\n' + '*'*50)
print("ASCENT_CANDIDATES is not a child, so it should always be active, regardless of the configuration passed in. We are passing in an empty configuration and seeing if it is active.")
print("pcs.isActive('ASCENT_CANDIDATES',{})? " + str(pcs.isActive('ASCENT_CANDIDATES',{})))
print('\n' + '*'*50)
print("KICKS is a child, it should only be active if KICK_WALK=NO.")
print("pcs.isActive('KICKS',{'KICK_WALK':'NO'})? " + str(pcs.isActive('KICKS',{'KICK_WALK':'NO'})))
print('\n' + '*'*50)
print("Here we can see it works as well..")
print("pcs.isActive('KICKS',{'KICK_WALK':'YES'})? " + str(pcs.isActive('KICKS',{'KICK_WALK':'YES'})))
print('\n' + '*'*50)
print("Note that MAYBE is not one of the allowed values for it's parent. But we don't check for that.")
try:
exception = False
print("pcs.isActive('KICKS',{'KICK_WALK':'MAYBE'})? " + str(pcs.isActive('KICKS',{'KICK_WALK':'MAYBE'})))
except Exception:
print("An exception was raised")
exception = True
if(not exception):
print("No exception was raised")
print('\n' + '*'*50)
print("However, if there is no information specified about it's parents then an exception to be raised because we cannot determine whether or not it is active.")
try:
exception = False
print("pcs.isActive('KICKS',{})? " + str(pcs.isActive('KICKS',{})))
except Exception:
print("An exception was raised")
exception = True
if(not exception):
print("No exception was raised")
print('\n' + '*'*50)
print("You can also get the default configuration as a dict. For example:")
print(pcs.getDefault())
| 2.625 | 3 |
google/analytics/admin_v1alpha/types/resources.py | LaudateCorpus1/python-analytics-admin | 0 | 12757917 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.analytics.admin.v1alpha",
manifest={
"IndustryCategory",
"ServiceLevel",
"ActorType",
"ActionType",
"ChangeHistoryResourceType",
"GoogleSignalsState",
"GoogleSignalsConsent",
"LinkProposalInitiatingProduct",
"LinkProposalState",
"Account",
"Property",
"AndroidAppDataStream",
"IosAppDataStream",
"WebDataStream",
"DataStream",
"UserLink",
"AuditUserLink",
"FirebaseLink",
"GlobalSiteTag",
"GoogleAdsLink",
"DataSharingSettings",
"AccountSummary",
"PropertySummary",
"MeasurementProtocolSecret",
"ChangeHistoryEvent",
"ChangeHistoryChange",
"DisplayVideo360AdvertiserLink",
"DisplayVideo360AdvertiserLinkProposal",
"LinkProposalStatusDetails",
"ConversionEvent",
"GoogleSignalsSettings",
"CustomDimension",
"CustomMetric",
"DataRetentionSettings",
},
)
class IndustryCategory(proto.Enum):
r"""The category selected for this property, used for industry
benchmarking.
"""
INDUSTRY_CATEGORY_UNSPECIFIED = 0
AUTOMOTIVE = 1
BUSINESS_AND_INDUSTRIAL_MARKETS = 2
FINANCE = 3
HEALTHCARE = 4
TECHNOLOGY = 5
TRAVEL = 6
OTHER = 7
ARTS_AND_ENTERTAINMENT = 8
BEAUTY_AND_FITNESS = 9
BOOKS_AND_LITERATURE = 10
FOOD_AND_DRINK = 11
GAMES = 12
HOBBIES_AND_LEISURE = 13
HOME_AND_GARDEN = 14
INTERNET_AND_TELECOM = 15
LAW_AND_GOVERNMENT = 16
NEWS = 17
ONLINE_COMMUNITIES = 18
PEOPLE_AND_SOCIETY = 19
PETS_AND_ANIMALS = 20
REAL_ESTATE = 21
REFERENCE = 22
SCIENCE = 23
SPORTS = 24
JOBS_AND_EDUCATION = 25
SHOPPING = 26
class ServiceLevel(proto.Enum):
r"""Various levels of service for Google Analytics."""
SERVICE_LEVEL_UNSPECIFIED = 0
GOOGLE_ANALYTICS_STANDARD = 1
GOOGLE_ANALYTICS_360 = 2
class ActorType(proto.Enum):
r"""Different kinds of actors that can make changes to Google
Analytics resources.
"""
ACTOR_TYPE_UNSPECIFIED = 0
USER = 1
SYSTEM = 2
SUPPORT = 3
class ActionType(proto.Enum):
r"""Types of actions that may change a resource."""
ACTION_TYPE_UNSPECIFIED = 0
CREATED = 1
UPDATED = 2
DELETED = 3
class ChangeHistoryResourceType(proto.Enum):
r"""Types of resources whose changes may be returned from change
history.
"""
CHANGE_HISTORY_RESOURCE_TYPE_UNSPECIFIED = 0
ACCOUNT = 1
PROPERTY = 2
WEB_DATA_STREAM = 3
ANDROID_APP_DATA_STREAM = 4
IOS_APP_DATA_STREAM = 5
FIREBASE_LINK = 6
GOOGLE_ADS_LINK = 7
GOOGLE_SIGNALS_SETTINGS = 8
CONVERSION_EVENT = 9
MEASUREMENT_PROTOCOL_SECRET = 10
CUSTOM_DIMENSION = 11
CUSTOM_METRIC = 12
DATA_RETENTION_SETTINGS = 13
DISPLAY_VIDEO_360_ADVERTISER_LINK = 14
DISPLAY_VIDEO_360_ADVERTISER_LINK_PROPOSAL = 15
class GoogleSignalsState(proto.Enum):
r"""Status of the Google Signals settings (i.e., whether this
feature has been enabled for the property).
"""
GOOGLE_SIGNALS_STATE_UNSPECIFIED = 0
GOOGLE_SIGNALS_ENABLED = 1
GOOGLE_SIGNALS_DISABLED = 2
class GoogleSignalsConsent(proto.Enum):
r"""Consent field of the Google Signals settings (i.e., whether
the user has consented to the Google Signals terms of service.)
"""
GOOGLE_SIGNALS_CONSENT_UNSPECIFIED = 0
GOOGLE_SIGNALS_CONSENT_CONSENTED = 2
GOOGLE_SIGNALS_CONSENT_NOT_CONSENTED = 1
class LinkProposalInitiatingProduct(proto.Enum):
r"""An indication of which product the user initiated a link
proposal from.
"""
LINK_PROPOSAL_INITIATING_PRODUCT_UNSPECIFIED = 0
GOOGLE_ANALYTICS = 1
LINKED_PRODUCT = 2
class LinkProposalState(proto.Enum):
r"""The state of a link proposal resource."""
LINK_PROPOSAL_STATE_UNSPECIFIED = 0
AWAITING_REVIEW_FROM_GOOGLE_ANALYTICS = 1
AWAITING_REVIEW_FROM_LINKED_PRODUCT = 2
WITHDRAWN = 3
DECLINED = 4
EXPIRED = 5
OBSOLETE = 6
class Account(proto.Message):
r"""A resource message representing a Google Analytics account.
Attributes:
name (str):
Output only. Resource name of this account.
Format: accounts/{account}
Example: "accounts/100".
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this account was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when account payload fields
were last updated.
display_name (str):
Required. Human-readable display name for
this account.
region_code (str):
Country of business. Must be a Unicode CLDR
region code.
deleted (bool):
Output only. Indicates whether this Account
is soft-deleted or not. Deleted accounts are
excluded from List results unless specifically
requested.
"""
name = proto.Field(proto.STRING, number=1,)
create_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
display_name = proto.Field(proto.STRING, number=4,)
region_code = proto.Field(proto.STRING, number=5,)
deleted = proto.Field(proto.BOOL, number=6,)
class Property(proto.Message):
r"""A resource message representing a Google Analytics GA4
property.
Attributes:
name (str):
Output only. Resource name of this property. Format:
properties/{property_id} Example: "properties/1000".
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the entity was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when entity payload fields
were last updated.
parent (str):
Immutable. Resource name of this property's
logical parent.
Note: The Property-Moving UI can be used to
change the parent. Format: accounts/{account}
Example: "accounts/100".
display_name (str):
Required. Human-readable display name for
this property.
The max allowed display name length is 100
UTF-16 code units.
industry_category (google.analytics.admin_v1alpha.types.IndustryCategory):
Industry associated with this property Example: AUTOMOTIVE,
FOOD_AND_DRINK
time_zone (str):
Required. Reporting Time Zone, used as the day boundary for
reports, regardless of where the data originates. If the
time zone honors DST, Analytics will automatically adjust
for the changes.
NOTE: Changing the time zone only affects data going
forward, and is not applied retroactively.
Format: https://www.iana.org/time-zones Example:
"America/Los_Angeles".
currency_code (str):
The currency type used in reports involving monetary values.
Format: https://en.wikipedia.org/wiki/ISO_4217 Examples:
"USD", "EUR", "JPY".
service_level (google.analytics.admin_v1alpha.types.ServiceLevel):
Output only. The Google Analytics service
level that applies to this property.
delete_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. If set, the time at which this
property was trashed. If not set, then this
property is not currently in the trash can.
expire_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. If set, the time at which this
trashed property will be permanently deleted. If
not set, then this property is not currently in
the trash can and is not slated to be deleted.
account (str):
Immutable. The resource name of the parent account Format:
accounts/{account_id} Example: "accounts/123".
"""
name = proto.Field(proto.STRING, number=1,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
parent = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=5,)
industry_category = proto.Field(proto.ENUM, number=6, enum="IndustryCategory",)
time_zone = proto.Field(proto.STRING, number=7,)
currency_code = proto.Field(proto.STRING, number=8,)
service_level = proto.Field(proto.ENUM, number=10, enum="ServiceLevel",)
delete_time = proto.Field(
proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp,
)
expire_time = proto.Field(
proto.MESSAGE, number=12, message=timestamp_pb2.Timestamp,
)
account = proto.Field(proto.STRING, number=13,)
class AndroidAppDataStream(proto.Message):
r"""A resource message representing a Google Analytics Android
app stream.
Attributes:
name (str):
Output only. Resource name of this Data Stream. Format:
properties/{property_id}/androidAppDataStreams/{stream_id}
Example: "properties/1000/androidAppDataStreams/2000".
firebase_app_id (str):
Output only. ID of the corresponding Android
app in Firebase, if any. This ID can change if
the Android app is deleted and recreated.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this stream was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when stream payload fields
were last updated.
package_name (str):
Immutable. The package name for the app being
measured. Example: "com.example.myandroidapp".
display_name (str):
Human-readable display name for the Data
Stream.
The max allowed display name length is 255
UTF-16 code units.
"""
name = proto.Field(proto.STRING, number=1,)
firebase_app_id = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
package_name = proto.Field(proto.STRING, number=5,)
display_name = proto.Field(proto.STRING, number=6,)
class IosAppDataStream(proto.Message):
r"""A resource message representing a Google Analytics IOS app
stream.
Attributes:
name (str):
Output only. Resource name of this Data Stream. Format:
properties/{property_id}/iosAppDataStreams/{stream_id}
Example: "properties/1000/iosAppDataStreams/2000".
firebase_app_id (str):
Output only. ID of the corresponding iOS app
in Firebase, if any. This ID can change if the
iOS app is deleted and recreated.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this stream was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when stream payload fields
were last updated.
bundle_id (str):
Required. Immutable. The Apple App Store
Bundle ID for the app Example:
"com.example.myiosapp".
display_name (str):
Human-readable display name for the Data
Stream.
The max allowed display name length is 255
UTF-16 code units.
"""
name = proto.Field(proto.STRING, number=1,)
firebase_app_id = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
bundle_id = proto.Field(proto.STRING, number=5,)
display_name = proto.Field(proto.STRING, number=6,)
class WebDataStream(proto.Message):
r"""A resource message representing a Google Analytics web
stream.
Attributes:
name (str):
Output only. Resource name of this Data Stream. Format:
properties/{property_id}/webDataStreams/{stream_id} Example:
"properties/1000/webDataStreams/2000".
measurement_id (str):
Output only. Analytics "Measurement ID",
without the "G-" prefix. Example: "G-1A2BCD345E"
would just be "1A2BCD345E".
firebase_app_id (str):
Output only. ID of the corresponding web app
in Firebase, if any. This ID can change if the
web app is deleted and recreated.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this stream was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when stream payload fields
were last updated.
default_uri (str):
Immutable. Domain name of the web app being
measured, or empty. Example:
"http://www.google.com",
"https://www.google.com".
display_name (str):
Required. Human-readable display name for the
Data Stream.
The max allowed display name length is 255
UTF-16 code units.
"""
name = proto.Field(proto.STRING, number=1,)
measurement_id = proto.Field(proto.STRING, number=2,)
firebase_app_id = proto.Field(proto.STRING, number=3,)
create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
default_uri = proto.Field(proto.STRING, number=6,)
display_name = proto.Field(proto.STRING, number=7,)
class DataStream(proto.Message):
r"""A resource message representing a data stream.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
web_stream_data (google.analytics.admin_v1alpha.types.DataStream.WebStreamData):
Data specific to web streams. Must be populated if type is
WEB_DATA_STREAM.
This field is a member of `oneof`_ ``stream_data``.
android_app_stream_data (google.analytics.admin_v1alpha.types.DataStream.AndroidAppStreamData):
Data specific to Android app streams. Must be populated if
type is ANDROID_APP_DATA_STREAM.
This field is a member of `oneof`_ ``stream_data``.
ios_app_stream_data (google.analytics.admin_v1alpha.types.DataStream.IosAppStreamData):
Data specific to iOS app streams. Must be populated if type
is IOS_APP_DATA_STREAM.
This field is a member of `oneof`_ ``stream_data``.
name (str):
Output only. Resource name of this Data Stream. Format:
properties/{property_id}/dataStreams/{stream_id} Example:
"properties/1000/dataStreams/2000".
type_ (google.analytics.admin_v1alpha.types.DataStream.DataStreamType):
Required. Immutable. The type of this
DataStream resource.
display_name (str):
Human-readable display name for the Data
Stream.
Required for web data streams.
The max allowed display name length is 255
UTF-16 code units.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this stream was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when stream payload fields
were last updated.
"""
class DataStreamType(proto.Enum):
r"""The type of the data stream."""
DATA_STREAM_TYPE_UNSPECIFIED = 0
WEB_DATA_STREAM = 1
ANDROID_APP_DATA_STREAM = 2
IOS_APP_DATA_STREAM = 3
class WebStreamData(proto.Message):
r"""Data specific to web streams.
Attributes:
measurement_id (str):
Output only. Analytics "Measurement ID",
without the "G-" prefix. Example: "G-1A2BCD345E"
would just be "1A2BCD345E".
firebase_app_id (str):
Output only. ID of the corresponding web app
in Firebase, if any. This ID can change if the
web app is deleted and recreated.
default_uri (str):
Immutable. Domain name of the web app being
measured, or empty. Example:
"http://www.google.com",
"https://www.google.com".
"""
measurement_id = proto.Field(proto.STRING, number=1,)
firebase_app_id = proto.Field(proto.STRING, number=2,)
default_uri = proto.Field(proto.STRING, number=3,)
class AndroidAppStreamData(proto.Message):
r"""Data specific to Android app streams.
Attributes:
firebase_app_id (str):
Output only. ID of the corresponding Android
app in Firebase, if any. This ID can change if
the Android app is deleted and recreated.
package_name (str):
Immutable. The package name for the app being
measured. Example: "com.example.myandroidapp".
"""
firebase_app_id = proto.Field(proto.STRING, number=1,)
package_name = proto.Field(proto.STRING, number=2,)
class IosAppStreamData(proto.Message):
r"""Data specific to iOS app streams.
Attributes:
firebase_app_id (str):
Output only. ID of the corresponding iOS app
in Firebase, if any. This ID can change if the
iOS app is deleted and recreated.
bundle_id (str):
Required. Immutable. The Apple App Store
Bundle ID for the app Example:
"com.example.myiosapp".
"""
firebase_app_id = proto.Field(proto.STRING, number=1,)
bundle_id = proto.Field(proto.STRING, number=2,)
web_stream_data = proto.Field(
proto.MESSAGE, number=6, oneof="stream_data", message=WebStreamData,
)
android_app_stream_data = proto.Field(
proto.MESSAGE, number=7, oneof="stream_data", message=AndroidAppStreamData,
)
ios_app_stream_data = proto.Field(
proto.MESSAGE, number=8, oneof="stream_data", message=IosAppStreamData,
)
name = proto.Field(proto.STRING, number=1,)
type_ = proto.Field(proto.ENUM, number=2, enum=DataStreamType,)
display_name = proto.Field(proto.STRING, number=3,)
create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
class UserLink(proto.Message):
r"""A resource message representing a user's permissions on an
Account or Property resource.
Attributes:
name (str):
Output only. Example format:
properties/1234/userLinks/5678
email_address (str):
Immutable. Email address of the user to link
direct_roles (Sequence[str]):
Roles directly assigned to this user for this account or
property.
Valid values: predefinedRoles/viewer predefinedRoles/analyst
predefinedRoles/editor predefinedRoles/admin
predefinedRoles/no-cost-data predefinedRoles/no-revenue-data
Excludes roles that are inherited from a higher-level
entity, group, or organization admin role.
A UserLink that is updated to have an empty list of
direct_roles will be deleted.
"""
name = proto.Field(proto.STRING, number=1,)
email_address = proto.Field(proto.STRING, number=2,)
direct_roles = proto.RepeatedField(proto.STRING, number=3,)
class AuditUserLink(proto.Message):
r"""Read-only resource used to summarize a principal's effective
roles.
Attributes:
name (str):
Example format:
properties/1234/userLinks/5678
email_address (str):
Email address of the linked user
direct_roles (Sequence[str]):
Roles directly assigned to this user for this
entity.
Format: predefinedRoles/viewer
Excludes roles that are inherited from an
account (if this is for a property), group, or
organization admin role.
effective_roles (Sequence[str]):
Union of all permissions a user has at this
account or property (includes direct
permissions, group-inherited permissions, etc.).
Format: predefinedRoles/viewer
"""
name = proto.Field(proto.STRING, number=1,)
email_address = proto.Field(proto.STRING, number=2,)
direct_roles = proto.RepeatedField(proto.STRING, number=3,)
effective_roles = proto.RepeatedField(proto.STRING, number=4,)
class FirebaseLink(proto.Message):
r"""A link between a GA4 property and a Firebase project.
Attributes:
name (str):
Output only. Example format:
properties/1234/firebaseLinks/5678
project (str):
Immutable. Firebase project resource name. When creating a
FirebaseLink, you may provide this resource name using
either a project number or project ID. Once this resource
has been created, returned FirebaseLinks will always have a
project_name that contains a project number.
Format: 'projects/{project number}' Example: 'projects/1234'
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this FirebaseLink was
originally created.
"""
name = proto.Field(proto.STRING, number=1,)
project = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
class GlobalSiteTag(proto.Message):
r"""Read-only resource with the tag for sending data from a
website to a WebDataStream.
Attributes:
name (str):
Output only. Resource name for this
GlobalSiteTag resource. Format:
properties/{propertyId}/globalSiteTag
snippet (str):
Immutable. JavaScript code snippet to be
pasted as the first item into the head tag of
every webpage to measure.
"""
name = proto.Field(proto.STRING, number=1,)
snippet = proto.Field(proto.STRING, number=2,)
class GoogleAdsLink(proto.Message):
r"""A link between a GA4 property and a Google Ads account.
Attributes:
name (str):
Output only. Format:
properties/{propertyId}/googleAdsLinks/{googleAdsLinkId}
Note: googleAdsLinkId is not the Google Ads
customer ID.
customer_id (str):
Immutable. Google Ads customer ID.
can_manage_clients (bool):
Output only. If true, this link is for a
Google Ads manager account.
ads_personalization_enabled (google.protobuf.wrappers_pb2.BoolValue):
Enable personalized advertising features with
this integration. Automatically publish my
Google Analytics audience lists and Google
Analytics remarketing events/parameters to the
linked Google Ads account. If this field is not
set on create/update, it will be defaulted to
true.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this link was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this link was last
updated.
creator_email_address (str):
Output only. Email address of the user that
created the link. An empty string will be
returned if the email address can't be
retrieved.
"""
name = proto.Field(proto.STRING, number=1,)
customer_id = proto.Field(proto.STRING, number=3,)
can_manage_clients = proto.Field(proto.BOOL, number=4,)
ads_personalization_enabled = proto.Field(
proto.MESSAGE, number=5, message=wrappers_pb2.BoolValue,
)
create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,)
creator_email_address = proto.Field(proto.STRING, number=9,)
class DataSharingSettings(proto.Message):
r"""A resource message representing data sharing settings of a
Google Analytics account.
Attributes:
name (str):
Output only. Resource name.
Format: accounts/{account}/dataSharingSettings
Example: "accounts/1000/dataSharingSettings".
sharing_with_google_support_enabled (bool):
Allows Google support to access the data in
order to help troubleshoot issues.
sharing_with_google_assigned_sales_enabled (bool):
Allows Google sales teams that are assigned
to the customer to access the data in order to
suggest configuration changes to improve
results. Sales team restrictions still apply
when enabled.
sharing_with_google_any_sales_enabled (bool):
Allows any of Google sales to access the data
in order to suggest configuration changes to
improve results.
sharing_with_google_products_enabled (bool):
Allows Google to use the data to improve
other Google products or services.
sharing_with_others_enabled (bool):
Allows Google to share the data anonymously
in aggregate form with others.
"""
name = proto.Field(proto.STRING, number=1,)
sharing_with_google_support_enabled = proto.Field(proto.BOOL, number=2,)
sharing_with_google_assigned_sales_enabled = proto.Field(proto.BOOL, number=3,)
sharing_with_google_any_sales_enabled = proto.Field(proto.BOOL, number=4,)
sharing_with_google_products_enabled = proto.Field(proto.BOOL, number=5,)
sharing_with_others_enabled = proto.Field(proto.BOOL, number=6,)
class AccountSummary(proto.Message):
r"""A virtual resource representing an overview of an account and
all its child GA4 properties.
Attributes:
name (str):
Resource name for this account summary. Format:
accountSummaries/{account_id} Example:
"accountSummaries/1000".
account (str):
Resource name of account referred to by this account summary
Format: accounts/{account_id} Example: "accounts/1000".
display_name (str):
Display name for the account referred to in
this account summary.
property_summaries (Sequence[google.analytics.admin_v1alpha.types.PropertySummary]):
List of summaries for child accounts of this
account.
"""
name = proto.Field(proto.STRING, number=1,)
account = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=3,)
property_summaries = proto.RepeatedField(
proto.MESSAGE, number=4, message="PropertySummary",
)
class PropertySummary(proto.Message):
r"""A virtual resource representing metadata for a GA4 property.
Attributes:
property (str):
Resource name of property referred to by this property
summary Format: properties/{property_id} Example:
"properties/1000".
display_name (str):
Display name for the property referred to in
this property summary.
"""
property = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
class MeasurementProtocolSecret(proto.Message):
r"""A secret value used for sending hits to Measurement Protocol.
Attributes:
name (str):
Output only. Resource name of this secret.
This secret may be a child of any type of
stream. Format:
properties/{property}/webDataStreams/{webDataStream}/measurementProtocolSecrets/{measurementProtocolSecret}
display_name (str):
Required. Human-readable display name for
this secret.
secret_value (str):
Output only. The measurement protocol secret value. Pass
this value to the api_secret field of the Measurement
Protocol API when sending hits to this secret's parent
property.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
secret_value = proto.Field(proto.STRING, number=3,)
class ChangeHistoryEvent(proto.Message):
r"""A set of changes within a Google Analytics account or its
child properties that resulted from the same cause. Common
causes would be updates made in the Google Analytics UI, changes
from customer support, or automatic Google Analytics system
changes.
Attributes:
id (str):
ID of this change history event. This ID is
unique across Google Analytics.
change_time (google.protobuf.timestamp_pb2.Timestamp):
Time when change was made.
actor_type (google.analytics.admin_v1alpha.types.ActorType):
The type of actor that made this change.
user_actor_email (str):
Email address of the Google account that made
the change. This will be a valid email address
if the actor field is set to USER, and empty
otherwise. Google accounts that have been
deleted will cause an error.
changes_filtered (bool):
If true, then the list of changes returned
was filtered, and does not represent all changes
that occurred in this event.
changes (Sequence[google.analytics.admin_v1alpha.types.ChangeHistoryChange]):
A list of changes made in this change history
event that fit the filters specified in
SearchChangeHistoryEventsRequest.
"""
id = proto.Field(proto.STRING, number=1,)
change_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
actor_type = proto.Field(proto.ENUM, number=3, enum="ActorType",)
user_actor_email = proto.Field(proto.STRING, number=4,)
changes_filtered = proto.Field(proto.BOOL, number=5,)
changes = proto.RepeatedField(
proto.MESSAGE, number=6, message="ChangeHistoryChange",
)
class ChangeHistoryChange(proto.Message):
r"""A description of a change to a single Google Analytics
resource.
Attributes:
resource (str):
Resource name of the resource whose changes
are described by this entry.
action (google.analytics.admin_v1alpha.types.ActionType):
The type of action that changed this
resource.
resource_before_change (google.analytics.admin_v1alpha.types.ChangeHistoryChange.ChangeHistoryResource):
Resource contents from before the change was
made. If this resource was created in this
change, this field will be missing.
resource_after_change (google.analytics.admin_v1alpha.types.ChangeHistoryChange.ChangeHistoryResource):
Resource contents from after the change was
made. If this resource was deleted in this
change, this field will be missing.
"""
class ChangeHistoryResource(proto.Message):
r"""A snapshot of a resource as before or after the result of a
change in change history.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
account (google.analytics.admin_v1alpha.types.Account):
A snapshot of an Account resource in change
history.
This field is a member of `oneof`_ ``resource``.
property (google.analytics.admin_v1alpha.types.Property):
A snapshot of a Property resource in change
history.
This field is a member of `oneof`_ ``resource``.
web_data_stream (google.analytics.admin_v1alpha.types.WebDataStream):
A snapshot of a WebDataStream resource in
change history.
This field is a member of `oneof`_ ``resource``.
android_app_data_stream (google.analytics.admin_v1alpha.types.AndroidAppDataStream):
A snapshot of an AndroidAppDataStream
resource in change history.
This field is a member of `oneof`_ ``resource``.
ios_app_data_stream (google.analytics.admin_v1alpha.types.IosAppDataStream):
A snapshot of an IosAppDataStream resource in
change history.
This field is a member of `oneof`_ ``resource``.
firebase_link (google.analytics.admin_v1alpha.types.FirebaseLink):
A snapshot of a FirebaseLink resource in
change history.
This field is a member of `oneof`_ ``resource``.
google_ads_link (google.analytics.admin_v1alpha.types.GoogleAdsLink):
A snapshot of a GoogleAdsLink resource in
change history.
This field is a member of `oneof`_ ``resource``.
google_signals_settings (google.analytics.admin_v1alpha.types.GoogleSignalsSettings):
A snapshot of a GoogleSignalsSettings
resource in change history.
This field is a member of `oneof`_ ``resource``.
display_video_360_advertiser_link (google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLink):
A snapshot of a DisplayVideo360AdvertiserLink
resource in change history.
This field is a member of `oneof`_ ``resource``.
display_video_360_advertiser_link_proposal (google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLinkProposal):
A snapshot of a
DisplayVideo360AdvertiserLinkProposal resource
in change history.
This field is a member of `oneof`_ ``resource``.
conversion_event (google.analytics.admin_v1alpha.types.ConversionEvent):
A snapshot of a ConversionEvent resource in
change history.
This field is a member of `oneof`_ ``resource``.
measurement_protocol_secret (google.analytics.admin_v1alpha.types.MeasurementProtocolSecret):
A snapshot of a MeasurementProtocolSecret
resource in change history.
This field is a member of `oneof`_ ``resource``.
custom_dimension (google.analytics.admin_v1alpha.types.CustomDimension):
A snapshot of a CustomDimension resource in
change history.
This field is a member of `oneof`_ ``resource``.
custom_metric (google.analytics.admin_v1alpha.types.CustomMetric):
A snapshot of a CustomMetric resource in
change history.
This field is a member of `oneof`_ ``resource``.
data_retention_settings (google.analytics.admin_v1alpha.types.DataRetentionSettings):
A snapshot of a data retention settings
resource in change history.
This field is a member of `oneof`_ ``resource``.
"""
account = proto.Field(
proto.MESSAGE, number=1, oneof="resource", message="Account",
)
property = proto.Field(
proto.MESSAGE, number=2, oneof="resource", message="Property",
)
web_data_stream = proto.Field(
proto.MESSAGE, number=3, oneof="resource", message="WebDataStream",
)
android_app_data_stream = proto.Field(
proto.MESSAGE, number=4, oneof="resource", message="AndroidAppDataStream",
)
ios_app_data_stream = proto.Field(
proto.MESSAGE, number=5, oneof="resource", message="IosAppDataStream",
)
firebase_link = proto.Field(
proto.MESSAGE, number=6, oneof="resource", message="FirebaseLink",
)
google_ads_link = proto.Field(
proto.MESSAGE, number=7, oneof="resource", message="GoogleAdsLink",
)
google_signals_settings = proto.Field(
proto.MESSAGE, number=8, oneof="resource", message="GoogleSignalsSettings",
)
display_video_360_advertiser_link = proto.Field(
proto.MESSAGE,
number=9,
oneof="resource",
message="DisplayVideo360AdvertiserLink",
)
display_video_360_advertiser_link_proposal = proto.Field(
proto.MESSAGE,
number=10,
oneof="resource",
message="DisplayVideo360AdvertiserLinkProposal",
)
conversion_event = proto.Field(
proto.MESSAGE, number=11, oneof="resource", message="ConversionEvent",
)
measurement_protocol_secret = proto.Field(
proto.MESSAGE,
number=12,
oneof="resource",
message="MeasurementProtocolSecret",
)
custom_dimension = proto.Field(
proto.MESSAGE, number=13, oneof="resource", message="CustomDimension",
)
custom_metric = proto.Field(
proto.MESSAGE, number=14, oneof="resource", message="CustomMetric",
)
data_retention_settings = proto.Field(
proto.MESSAGE, number=15, oneof="resource", message="DataRetentionSettings",
)
resource = proto.Field(proto.STRING, number=1,)
action = proto.Field(proto.ENUM, number=2, enum="ActionType",)
resource_before_change = proto.Field(
proto.MESSAGE, number=3, message=ChangeHistoryResource,
)
resource_after_change = proto.Field(
proto.MESSAGE, number=4, message=ChangeHistoryResource,
)
class DisplayVideo360AdvertiserLink(proto.Message):
r"""A link between a GA4 property and a Display & Video 360
advertiser.
Attributes:
name (str):
Output only. The resource name for this
DisplayVideo360AdvertiserLink resource. Format:
properties/{propertyId}/displayVideo360AdvertiserLinks/{linkId}
Note: linkId is not the Display & Video 360
Advertiser ID
advertiser_id (str):
Immutable. The Display & Video 360
Advertiser's advertiser ID.
advertiser_display_name (str):
Output only. The display name of the Display
& Video 360 Advertiser.
ads_personalization_enabled (google.protobuf.wrappers_pb2.BoolValue):
Enables personalized advertising features
with this integration. If this field is not set
on create/update, it will be defaulted to true.
campaign_data_sharing_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables the import of campaign
data from Display & Video 360 into the GA4
property. After link creation, this can only be
updated from the Display & Video 360 product.
If this field is not set on create, it will be
defaulted to true.
cost_data_sharing_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables the import of cost data from Display &
Video 360 into the GA4 property. This can only be enabled if
campaign_data_sharing_enabled is enabled. After link
creation, this can only be updated from the Display & Video
360 product. If this field is not set on create, it will be
defaulted to true.
"""
name = proto.Field(proto.STRING, number=1,)
advertiser_id = proto.Field(proto.STRING, number=2,)
advertiser_display_name = proto.Field(proto.STRING, number=3,)
ads_personalization_enabled = proto.Field(
proto.MESSAGE, number=4, message=wrappers_pb2.BoolValue,
)
campaign_data_sharing_enabled = proto.Field(
proto.MESSAGE, number=5, message=wrappers_pb2.BoolValue,
)
cost_data_sharing_enabled = proto.Field(
proto.MESSAGE, number=6, message=wrappers_pb2.BoolValue,
)
class DisplayVideo360AdvertiserLinkProposal(proto.Message):
r"""A proposal for a link between a GA4 property and a Display &
Video 360 advertiser.
A proposal is converted to a DisplayVideo360AdvertiserLink once
approved. Google Analytics admins approve inbound proposals
while Display & Video 360 admins approve outbound proposals.
Attributes:
name (str):
Output only. The resource name for this
DisplayVideo360AdvertiserLinkProposal resource.
Format:
properties/{propertyId}/displayVideo360AdvertiserLinkProposals/{proposalId}
Note: proposalId is not the Display & Video 360
Advertiser ID
advertiser_id (str):
Immutable. The Display & Video 360
Advertiser's advertiser ID.
link_proposal_status_details (google.analytics.admin_v1alpha.types.LinkProposalStatusDetails):
Output only. The status information for this
link proposal.
advertiser_display_name (str):
Output only. The display name of the Display
& Video Advertiser. Only populated for proposals
that originated from Display & Video 360.
validation_email (str):
Input only. On a proposal being sent to
Display & Video 360, this field must be set to
the email address of an admin on the target
advertiser. This is used to verify that the
Google Analytics admin is aware of at least one
admin on the Display & Video 360 Advertiser.
This does not restrict approval of the proposal
to a single user. Any admin on the Display &
Video 360 Advertiser may approve the proposal.
ads_personalization_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables personalized advertising
features with this integration. If this field is
not set on create, it will be defaulted to true.
campaign_data_sharing_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables the import of campaign
data from Display & Video 360. If this field is
not set on create, it will be defaulted to true.
cost_data_sharing_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables the import of cost data from Display &
Video 360. This can only be enabled if
campaign_data_sharing_enabled is enabled. If this field is
not set on create, it will be defaulted to true.
"""
name = proto.Field(proto.STRING, number=1,)
advertiser_id = proto.Field(proto.STRING, number=2,)
link_proposal_status_details = proto.Field(
proto.MESSAGE, number=3, message="LinkProposalStatusDetails",
)
advertiser_display_name = proto.Field(proto.STRING, number=4,)
validation_email = proto.Field(proto.STRING, number=5,)
ads_personalization_enabled = proto.Field(
proto.MESSAGE, number=6, message=wrappers_pb2.BoolValue,
)
campaign_data_sharing_enabled = proto.Field(
proto.MESSAGE, number=7, message=wrappers_pb2.BoolValue,
)
cost_data_sharing_enabled = proto.Field(
proto.MESSAGE, number=8, message=wrappers_pb2.BoolValue,
)
class LinkProposalStatusDetails(proto.Message):
r"""Status information for a link proposal.
Attributes:
link_proposal_initiating_product (google.analytics.admin_v1alpha.types.LinkProposalInitiatingProduct):
Output only. The source of this proposal.
requestor_email (str):
Output only. The email address of the user
that proposed this linkage.
link_proposal_state (google.analytics.admin_v1alpha.types.LinkProposalState):
Output only. The state of this proposal.
"""
link_proposal_initiating_product = proto.Field(
proto.ENUM, number=1, enum="LinkProposalInitiatingProduct",
)
requestor_email = proto.Field(proto.STRING, number=2,)
link_proposal_state = proto.Field(proto.ENUM, number=3, enum="LinkProposalState",)
class ConversionEvent(proto.Message):
r"""A conversion event in a Google Analytics property.
Attributes:
name (str):
Output only. Resource name of this conversion event. Format:
properties/{property}/conversionEvents/{conversion_event}
event_name (str):
Immutable. The event name for this conversion
event. Examples: 'click', 'purchase'
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this conversion event
was created in the property.
deletable (bool):
Output only. If set, this event can currently
be deleted via DeleteConversionEvent.
custom (bool):
Output only. If set to true, this conversion
event refers to a custom event. If set to
false, this conversion event refers to a default
event in GA. Default events typically have
special meaning in GA. Default events are
usually created for you by the GA system, but in
some cases can be created by property admins.
Custom events count towards the maximum number
of custom conversion events that may be created
per property.
"""
name = proto.Field(proto.STRING, number=1,)
event_name = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
deletable = proto.Field(proto.BOOL, number=4,)
custom = proto.Field(proto.BOOL, number=5,)
class GoogleSignalsSettings(proto.Message):
r"""Settings values for Google Signals. This is a singleton
resource.
Attributes:
name (str):
Output only. Resource name of this setting. Format:
properties/{property_id}/googleSignalsSettings Example:
"properties/1000/googleSignalsSettings".
state (google.analytics.admin_v1alpha.types.GoogleSignalsState):
Status of this setting.
consent (google.analytics.admin_v1alpha.types.GoogleSignalsConsent):
Output only. Terms of Service acceptance.
"""
name = proto.Field(proto.STRING, number=1,)
state = proto.Field(proto.ENUM, number=3, enum="GoogleSignalsState",)
consent = proto.Field(proto.ENUM, number=4, enum="GoogleSignalsConsent",)
class CustomDimension(proto.Message):
r"""A definition for a CustomDimension.
Attributes:
name (str):
Output only. Resource name for this
CustomDimension resource. Format:
properties/{property}/customDimensions/{customDimension}
parameter_name (str):
Required. Immutable. Tagging parameter name
for this custom dimension.
If this is a user-scoped dimension, then this is
the user property name. If this is an event-
scoped dimension, then this is the event
parameter name.
May only contain alphanumeric and underscore
characters, starting with a letter. Max length
of 24 characters for user-scoped dimensions, 40
characters for event-scoped dimensions.
display_name (str):
Required. Display name for this custom
dimension as shown in the Analytics UI. Max
length of 82 characters, alphanumeric plus space
and underscore starting with a letter. Legacy
system-generated display names may contain
square brackets, but updates to this field will
never permit square brackets.
description (str):
Optional. Description for this custom
dimension. Max length of 150 characters.
scope (google.analytics.admin_v1alpha.types.CustomDimension.DimensionScope):
Required. Immutable. The scope of this
dimension.
disallow_ads_personalization (bool):
Optional. If set to true, sets this dimension
as NPA and excludes it from ads personalization.
This is currently only supported by user-scoped
custom dimensions.
"""
class DimensionScope(proto.Enum):
r"""Valid values for the scope of this dimension."""
DIMENSION_SCOPE_UNSPECIFIED = 0
EVENT = 1
USER = 2
name = proto.Field(proto.STRING, number=1,)
parameter_name = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=3,)
description = proto.Field(proto.STRING, number=4,)
scope = proto.Field(proto.ENUM, number=5, enum=DimensionScope,)
disallow_ads_personalization = proto.Field(proto.BOOL, number=6,)
class CustomMetric(proto.Message):
r"""A definition for a custom metric.
Attributes:
name (str):
Output only. Resource name for this
CustomMetric resource. Format:
properties/{property}/customMetrics/{customMetric}
parameter_name (str):
Required. Immutable. Tagging name for this
custom metric.
If this is an event-scoped metric, then this is
the event parameter name.
May only contain alphanumeric and underscore
charactes, starting with a letter. Max length of
40 characters for event-scoped metrics.
display_name (str):
Required. Display name for this custom metric
as shown in the Analytics UI. Max length of 82
characters, alphanumeric plus space and
underscore starting with a letter. Legacy
system-generated display names may contain
square brackets, but updates to this field will
never permit square brackets.
description (str):
Optional. Description for this custom
dimension. Max length of 150 characters.
measurement_unit (google.analytics.admin_v1alpha.types.CustomMetric.MeasurementUnit):
Required. The type for the custom metric's
value.
scope (google.analytics.admin_v1alpha.types.CustomMetric.MetricScope):
Required. Immutable. The scope of this custom
metric.
"""
class MeasurementUnit(proto.Enum):
r"""Possible types of representing the custom metric's value.
Currency representation may change in the future, requiring a
breaking API change.
"""
MEASUREMENT_UNIT_UNSPECIFIED = 0
STANDARD = 1
CURRENCY = 2
FEET = 3
METERS = 4
KILOMETERS = 5
MILES = 6
MILLISECONDS = 7
SECONDS = 8
MINUTES = 9
HOURS = 10
class MetricScope(proto.Enum):
r"""The scope of this metric."""
METRIC_SCOPE_UNSPECIFIED = 0
EVENT = 1
name = proto.Field(proto.STRING, number=1,)
parameter_name = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=3,)
description = proto.Field(proto.STRING, number=4,)
measurement_unit = proto.Field(proto.ENUM, number=5, enum=MeasurementUnit,)
scope = proto.Field(proto.ENUM, number=6, enum=MetricScope,)
class DataRetentionSettings(proto.Message):
r"""Settings values for data retention. This is a singleton
resource.
Attributes:
name (str):
Output only. Resource name for this
DataRetentionSetting resource. Format:
properties/{property}/dataRetentionSettings
event_data_retention (google.analytics.admin_v1alpha.types.DataRetentionSettings.RetentionDuration):
The length of time that event-level data is
retained.
reset_user_data_on_new_activity (bool):
If true, reset the retention period for the
user identifier with every event from that user.
"""
class RetentionDuration(proto.Enum):
r"""Valid values for the data retention duration."""
RETENTION_DURATION_UNSPECIFIED = 0
TWO_MONTHS = 1
FOURTEEN_MONTHS = 3
TWENTY_SIX_MONTHS = 4
THIRTY_EIGHT_MONTHS = 5
FIFTY_MONTHS = 6
name = proto.Field(proto.STRING, number=1,)
event_data_retention = proto.Field(proto.ENUM, number=2, enum=RetentionDuration,)
reset_user_data_on_new_activity = proto.Field(proto.BOOL, number=3,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 1.226563 | 1 |
core/vault_exporter.py | vinicelms/vault-token-exporter | 0 | 12757918 | from prometheus_client.core import GaugeMetricFamily
import prometheus_client as prom
import time
from vault_integration import Vault
class CustomVaultExporter:
def __init__(self):
pass
def collect(self):
vault = Vault()
tokens_info = vault.get_key_data_from_vault()
for token_info in tokens_info:
gauge = GaugeMetricFamily(
name="vault_token_expire_time",
documentation="Collect time remaining to expire Vault service token",
labels=['display_name', 'time_format']
)
gauge.add_metric(
labels=[token_info.name, 'minute'],
value=token_info.expiration_time
)
yield gauge
if __name__ == "__main__":
custom_exporter = CustomVaultExporter()
prom.REGISTRY.register(custom_exporter)
prom.start_http_server(9121)
while True:
time.sleep(30) | 2.34375 | 2 |
tools/evaluators/manual/convertManualToSspd.py | lee4138/6d-pose-estimation-with-ml-in-ar | 7 | 12757919 | <filename>tools/evaluators/manual/convertManualToSspd.py
import glob, os, sys
import json
def createLabelContent():
print('creating labels and deleting pics where the obj is partly outside of frame')
with open('annotation-results.6dan.json', 'r') as json_file:
data = json.load(json_file)
for lbl in data:
p = lbl['sspd']
f = open(os.path.join('./manual',lbl['image'].split('.')[0] + '.txt'), "w+")
f.write("0 %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f" % (p[1],p[2], p[15],p[16],p[17],p[18],p[11],p[12],p[13],p[14],p[7],p[8],p[9],p[10],p[3],p[4],p[5],p[6],p[19],p[20]))
f.close()
def createTestAndTrainFiles(counter):
print('creating test files')
f_test = open(os.path.join('./', 'test.txt'), "w+")
for i in range(counter):
img_type = ".jpg"
f_test.write('LINEMOD/kuka_real/JPEGImages/' + format(i, '06') + img_type + "\n")
f_test.close()
if __name__ == "__main__":
createTestAndTrainFiles(len(os.listdir('./labels')))
| 2.203125 | 2 |
plastic/clustering.py | plastic-phy/plastic | 1 | 12757920 | """
For more informations on the contents of this module:
- help(plastic.GenotypeMatrix)
- help(clustering.cluster_mutations)
--------
Module that exposes the clustering algorithm presented at
https://github.com/AlgoLab/celluloid
Simple example workflow:
from plastic import clustering
to_cluster = cl.GenotypeMatrix.from_files('to_cluster.txt', mutations_file = 'mutations.txt')
# Reduce the size of the input down to 50 to speed up some complex computation
# (for instance SASC tree inference)
clustered = clustering.cluster_mutations(to_cluster, k = 50)
# Get the clustered mutations as comma separated lists of simple mutations
muts = clustered.mutations()
# Save the matrix and use it for some intensive computation
clustering.GenotypeMatrix.to_files('clustered.txt', mutations_file = 'clustered_mutations.txt')
"""
from ._core.genotypematrix import GenotypeMatrix
import numpy as np
from kmodes.kmodes import KModes
from collections import defaultdict
def cluster_mutations(
genotype_matrix,
k,
n_inits=10,
max_iter=100,
verbose=False,
**kwargs):
"""
Clusters the mutations in a genotype matrix by applying kmodes
Parameters:
genotype_matrix(GenotypeMatrix):
A matrix representing the results of single-cell sequencing.
k(int):
The number of clustered mutations in the output matrix.
Note that empty clusters will be discarded after clustering.
n_inits(int):
The number of initiliazations in the clustering process.
max_iter(int):
The maximum number of iterations in the clustering process.
verbose (bool)
**kwargs:
Additional arguments passed to KModes process.
Returns:
GenotypeMatrix:
The result of the clustering process. Each column in the matrix
will be the centroid of a non-empty cluster, and will be labeled with
a comma-separated list of the labels of the mutations within the cluster.
Cell labels are left unaltered.
"""
if type(k) != int or k < 1:
raise ValueError(f'the number of clusters must be a positive integer, but {k} is not.')
if type(max_iter) != int or max_iter < 1:
raise ValueError(f'the number of iterations must be a positive integer, but {max_iter} is not.')
if type(n_inits) != int or n_inits < 1:
raise ValueError(f'the number of initializations must be a positive integer, but {n_inits} is not.')
return _celluloid(genotype_matrix, k, n_inits, max_iter,verbose,**kwargs)
def _conflict_dissim(a, b, **_):
v = np.vectorize(lambda ai, bi: ai != 2 and bi != 2 and ai != bi)
return np.sum(v(a, b), axis=1)
def _celluloid(
genotype_matrix,
k,
n_inits,
max_iter,
verbose,
**kwargs
):
"""
Clusters the mutations in a genotype matrix by applying kmodes
Parameters:
genotype_matrix(GenotypeMatrix):
A matrix representing the results of single-cell sequencing.
k(int):
The number of clustered mutations in the output matrix.
Note that empty clusters will be discarded after clustering.
n_inits(int):
The number of initiliazations in the clustering process.
max_iter(int):
The maximum number of iterations in the clustering process.
verbose (bool)
**kwargs:
Additional arguments passed to KModes process.
Returns:
GenotypeMatrix:
The result of the clustering process. Each column in the matrix
will be the centroid of a non-empty cluster, and will be labeled with
a comma-separated list of the labels of the mutations within the cluster.
Cell labels are left unaltered.
"""
mutations_as_points = np.array(genotype_matrix.matrix(), dtype='int').transpose()
mutation_labels = genotype_matrix.mutation_labels
km = KModes(
n_clusters=k,
cat_dissim=_conflict_dissim,
init='huang',
n_init=n_inits,
max_iter=max_iter,
verbose=(1 if verbose else 0),
**kwargs
)
clusters = km.fit_predict(mutations_as_points)
# Each cluster will be labeled with the labels of its components.
clusters_of_mutations = km.labels_
clustered_mutation_labels = defaultdict(list)
for mutation_label, mutation_cluster in zip(mutation_labels, clusters_of_mutations):
clustered_mutation_labels[mutation_cluster].append(mutation_label)
nonempty_clusters = clustered_mutation_labels.keys()
# build the output matrix and the mutation labels as strings
cluster_centroids = km.cluster_centroids_
clustered_mutation_labels_strings = [','.join(clustered_mutation_labels[cluster_id]) for cluster_id in
sorted(nonempty_clusters)]
out_matrix = [cluster_centroids[cluster_id] for cluster_id in sorted(nonempty_clusters)]
# the matrix needs to be transposed back to its original orientation
out_matrix = np.array(out_matrix).transpose()
return GenotypeMatrix(out_matrix, cell_labels=genotype_matrix.cell_labels,
mutation_labels=clustered_mutation_labels_strings)
| 3.171875 | 3 |
Gds/src/fprime_gds/wxgui/tools/PexpectRunnerConsolImpl.py | chrisdonlan/fprime | 5 | 12757921 | # -*- coding: utf-8 -*-
import wx
import PexpectRunnerConsolGUI
###########################################################################
## Class PexpectRunnerImp
###########################################################################
class PexpectRunnerImpl ( PexpectRunnerConsolGUI.PexpectRunnerGUI ):
def __init__( self, parent ):
PexpectRunnerConsolGUI.PexpectRunnerGUI.__init__ ( self, parent)
def __del__( self ):
pass
# TODO Doesn't really work right now. Just going to leave it autoscrolling which is default. Someone can fix this in the future.
def AppendMessage(self, msg_text):
print(self.TextCtrlConsol.GetScrollPos(wx.VERTICAL))
print(self.TextCtrlConsol.GetScrollRange(wx.VERTICAL))
print(self.TextCtrlConsol.GetScrollThumb(wx.VERTICAL))
# If we are at the bottom of the consol, shift the box to show new text
if self.TextCtrlConsol.GetScrollPos(wx.VERTICAL) + self.TextCtrlConsol.GetScrollThumb(wx.VERTICAL) == self.TextCtrlConsol.GetScrollRange(wx.VERTICAL):
print('At bottom...')
self.TextCtrlConsol.write(msg_text + '\n') # Write text with new line to consol
self.TextCtrlConsol.SetScrollPos(wx.VERTICAL, self.TextCtrlConsol.GetScrollRange(wx.VERTICAL)) # Set the scroll to the end
# If we are not at the botton of the consol, just append the text and do nothing else
else:
print('Not at bottom...')
self.TextCtrlConsol.Freeze()
self.TextCtrlConsol.write(msg_text + '\n')
self.TextCtrlConsol.Thaw()
# Override these
def onWindowClose( self, event ):
event.Skip()
def onMouseWheel( self, event ):
event.Skip()
| 2.828125 | 3 |
solarforecastarbiter/reports/template.py | SolarArbiter/solarforecastarbiter-core | 22 | 12757922 | """
Inserts metadata and figures into the report template.
"""
import base64
import json
import logging
from pathlib import Path
import re
import subprocess
import tempfile
from bokeh import __version__ as bokeh_version
from jinja2 import Environment, PackageLoader, select_autoescape, ChoiceLoader
from jinja2.runtime import Undefined
from plotly import __version__ as plotly_version
from solarforecastarbiter import datamodel
from solarforecastarbiter.reports.figures import plotly_figures
logger = logging.getLogger(__name__)
def build_metrics_json(report):
"""Creates a dict from the metrics results in the report.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
Returns
-------
str
The json representing the report metrics. The string will be a string
representing an empty json array if the report does not have a
computed raw_report.
"""
if getattr(report, 'raw_report') is not None:
df = plotly_figures.construct_metrics_dataframe(
list(filter(lambda x: not getattr(x, 'is_summary', False),
report.raw_report.metrics)),
rename=plotly_figures.abbreviate)
return df.to_json(orient="records")
else:
return "[]"
def build_summary_stats_json(report):
"""Creates a dict from the summary statistics in the report.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
Returns
-------
str
The json representing the summary statistics. Will be a string
representing an empty json array if the report does not have a
computed raw_report.
Raises
------
ValueError
If report.raw_report is populated but no
report.raw_report.metrics have `is_summary == True`
indicating that the report was made without
summary statistics.
"""
if getattr(report, 'raw_report') is not None:
df = plotly_figures.construct_metrics_dataframe(
list(filter(lambda x: getattr(x, 'is_summary', False),
report.raw_report.metrics)),
rename=plotly_figures.abbreviate)
if df.empty:
raise ValueError('No summary statistics in report.')
return df.to_json(orient="records")
else:
return "[]"
def build_metadata_json(report):
"""Creates a JSON array of ProcessedForecastObservations parameters
in the report.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
Returns
-------
str
The JSON representing the report forecast-observation metadata.
"""
if getattr(report, 'raw_report') is None:
return "[]"
drop_keys = {
'__blurb__', 'site', 'aggregate',
}
def _process_forecast(fx):
if fx is None:
return None
out = {k: v for k, v in fx.to_dict().items()
if k not in drop_keys}
if isinstance(fx, datamodel.ProbabilisticForecast):
out['constant_values'] = [
cdf.constant_value for cdf in fx.constant_values]
return out
out = []
for pfxobs in report.raw_report.processed_forecasts_observations:
minp = pfxobs.replace(original=None)
thisout = {k: v for k, v in minp.to_dict().items()
if k in (
'name', 'interval_value_type', 'interval_length',
'interval_label', 'normalization_factor',
'uncertainty', 'cost')}
thisout['forecast'] = _process_forecast(pfxobs.original.forecast)
thisout['reference_forecast'] = _process_forecast(
pfxobs.original.reference_forecast)
thisout['observation'] = None
thisout['aggregate'] = None
if hasattr(pfxobs.original, 'observation'):
thisout['observation'] = {
k: v for k, v in pfxobs.original.observation.to_dict().items()
if k not in drop_keys
}
elif hasattr(pfxobs.original, 'aggregate'):
thisout['aggregate'] = {
k: v for k, v in pfxobs.original.aggregate.to_dict().items()
if k not in drop_keys or k == 'observations'
}
obs = []
for aggobs in pfxobs.original.aggregate.observations:
obsd = aggobs.to_dict()
obsd['observation_id'] = obsd.pop('observation')[
'observation_id']
obs.append(obsd)
thisout['aggregate']['observations'] = obs
out.append(thisout)
return json.dumps(out).replace('NaN', 'null')
def _get_render_kwargs(report, dash_url, with_timeseries):
"""Creates a dictionary of key word template arguments for a jinja2
report template.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
dash_url: str
URL of the Solar Forecast arbiter dashboard to use when building links.
with_timeseries: bool
Whether or not to include timeseries plots. If an error occurs when
trying to generate timeseries plots, the `timeseries_spec`,
`scatter_spec`, and `timeseries_prob_spec` arguments will not be
defined.
Returns
-------
kwargs: dict
Dictionary of template variables to unpack as key word arguments when
rendering.
"""
# macros render columns for every allowed summary statistic, so be
# specific about which columns to include to avoid unnecessary blanks.
# Check that the report is complete, and if the processed forecasts are
# all event forecasts. Checking processed forecast pairs instead of
# report_parameters.object pairs allows us to skip the step of loading
# or shuffling around forecasts when working with a raw api response on
# the dashboard without the aid of solarforecastarbiter.io.api's
# process_report_dict. See issue 694 for context.
if report.status == "complete" and all(
type(x.original.forecast) is datamodel.EventForecast for x in
report.raw_report.processed_forecasts_observations
):
human_statistics = datamodel.ALLOWED_EVENT_SUMMARY_STATISTICS
else:
human_statistics = datamodel.ALLOWED_DETERMINISTIC_SUMMARY_STATISTICS
# macros only render columns/plots for metrics that actually exist,
# so no need to be specific to avoid unnecessary blanks
kwargs = dict(
human_categories=datamodel.ALLOWED_CATEGORIES,
human_metrics=datamodel.ALLOWED_METRICS,
human_statistics=human_statistics,
report=report,
category_blurbs=datamodel.CATEGORY_BLURBS,
dash_url=dash_url,
metrics_json=build_metrics_json(report),
metadata_json=build_metadata_json(report),
templating_messages=[]
)
report_plots = getattr(report.raw_report, 'plots', None)
# get plotting library versions used when plots were generated.
# if plot generation failed, fallback to the curent version
plot_bokeh = getattr(report_plots, 'bokeh_version', None)
kwargs['bokeh_version'] = plot_bokeh if plot_bokeh else bokeh_version
plot_plotly = getattr(report_plots, 'plotly_version', None)
kwargs['plotly_version'] = plot_plotly if plot_plotly else plotly_version
try:
kwargs['summary_stats'] = build_summary_stats_json(report)
except ValueError:
kwargs['templating_messages'].append(
'No data summary statistics were calculated with this report.')
kwargs['summary_stats'] = '[]'
if with_timeseries:
try:
timeseries_specs = plotly_figures.timeseries_plots(report)
except Exception:
logger.exception(
'Failed to make Plotly items for timeseries and scatterplot')
else:
if timeseries_specs[0] is not None:
kwargs['timeseries_spec'] = timeseries_specs[0]
if timeseries_specs[1] is not None:
kwargs['scatter_spec'] = timeseries_specs[1]
if timeseries_specs[2] is not None:
kwargs['timeseries_prob_spec'] = timeseries_specs[2]
kwargs['includes_distribution'] = timeseries_specs[3]
return kwargs
def _pretty_json(value):
if isinstance(value, Undefined): # pragma: no cover
return value
return json.dumps(value, indent=4, separators=(',', ':'))
def _figure_name_filter(value):
"""replace characters that may cause problems for html/javascript ids"""
if isinstance(value, Undefined):
return value
out = (value
.replace('^', '-')
.replace(' ', '-')
.replace('.', 'dot')
.replace('%', 'percent')
.replace('<', 'lt')
.replace('>', 'gt')
.replace('=', 'eq')
.replace('(', 'lp')
.replace(')', 'rp')
.replace('/', 'fsl')
.replace('\\', 'bsl')
)
out = re.sub('[^\\w-]', 'special', out)
return out
def _unique_flags_filter(proc_fxobs_list, before_resample):
# use a dict to preserve order and guarantee uniqueness of keys
names = {}
for proc_fxobs in proc_fxobs_list:
for val_result in proc_fxobs.validation_results:
if val_result.before_resample == before_resample:
names[val_result.flag] = None
unique_names = list(names.keys())
return unique_names
def get_template_and_kwargs(report, dash_url, with_timeseries, body_only):
"""Returns the jinja2 Template object and a dict of template variables for
the report. If the report failed to compute, the template and kwargs will
be for an error page.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
dash_url: str
URL of the Solar Forecast arbiter dashboard to use when building links.
with_timeseries: bool
Whether or not to include timeseries plots.
body_only: bool
When True, returns a div for injecting into another template,
otherwise returns a full html document with the required
<html> and <head> tags.
Returns
-------
template: jinja2.environment.Template
kwargs: dict
Dictionary of template variables to use as keyword arguments to
template.render().
"""
env = Environment(
loader=ChoiceLoader([
PackageLoader('solarforecastarbiter.reports', 'templates/html'),
PackageLoader('solarforecastarbiter.reports', 'templates'),
]),
autoescape=select_autoescape(['html', 'xml']),
lstrip_blocks=True,
trim_blocks=True
)
env.filters['pretty_json'] = _pretty_json
env.filters['figure_name_filter'] = _figure_name_filter
env.filters['unique_flags_filter'] = _unique_flags_filter
kwargs = _get_render_kwargs(report, dash_url, with_timeseries)
if report.status == 'complete':
template = env.get_template('body.html')
elif report.status == 'failed':
template = env.get_template('failure.html')
elif report.status == 'pending':
template = env.get_template('pending.html')
else:
raise ValueError(f'Unknown status for report {report.status}')
if body_only:
kwargs['base_template'] = env.get_template('empty_base.html')
else:
kwargs['base_template'] = env.get_template('base.html')
return template, kwargs
def render_html(report, dash_url=datamodel.DASH_URL,
with_timeseries=True, body_only=False):
"""Create full html file.
The Solar Forecast Arbiter dashboard will likely use its own
templates for rendering the full html.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
dash_url: str
URL of the Solar Forecast arbiter dashboard to use when building links.
with_timeseries: bool
Whether or not to include timeseries plots.
body_only: bool
When True, returns a div for injecting into another template,
otherwise returns a full html document with the required
<html> and <head> tags.
Returns
-------
str
The rendered html report
"""
template, kwargs = get_template_and_kwargs(
report, dash_url, with_timeseries, body_only)
out = template.render(**kwargs)
return out
def _link_filter(value):
"""convert html href markup to tex href markup"""
if isinstance(value, Undefined): # pragma: no cover
return value
match = re.search(
"""<a\\s+(?:[^>]*?\\s+)?href=(["'])(.*?)(["'])>(.*?)<\\/a>""",
value, re.DOTALL)
if match:
new = "\\href{" + match.group(2) + "}{" + match.group(4) + "}"
out = value[:match.start()] + new + value[match.end():]
return out
else:
return value
def _html_to_tex(value):
if isinstance(value, Undefined):
return value
value = (value
.replace('<p>', '')
.replace('</p>', '\n')
.replace('<em>', '\\emph{')
.replace('</em>', '}')
.replace('<code>', '\\verb|')
.replace('</code>', '|')
.replace('<b>', '\\textbf{')
.replace('</b>', '}')
.replace('<ol>', '\\begin{enumerate}')
.replace('</ol>', '\\end{enumerate}')
.replace('<li>', '\\item ')
.replace('</li>', '\n')
.replace('</a>', '')
.replace('<=', '$\\leq$')
.replace("%", "\\%")
.replace('W/m^2', '$W/m^2$')
)
value = re.sub('\\<a.*\\>', '', value)
return value
def render_pdf(report, dash_url, max_runs=5):
"""
Create a PDF report using LaTeX.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
dash_url: str
URL of the Solar Forecast Arbiter dashboard to use when building links.
max_runs: int, default 5
Maximum number of times to run pdflatex
Returns
-------
bytes
The rendered PDF report
Notes
-----
This code was inspired by the latex package available at
https://github.com/mbr/latex/ under the following license:
Copyright (c) 2015, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of latex nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" # NOQA
env = Environment(
loader=ChoiceLoader([
PackageLoader('solarforecastarbiter.reports', 'templates/pdf'),
PackageLoader('solarforecastarbiter.reports', 'templates'),
]),
autoescape=False,
lstrip_blocks=True,
trim_blocks=True,
block_start_string='\\BLOCK{',
block_end_string='}',
variable_start_string='\\VAR{',
variable_end_string='}',
comment_start_string='\\#{',
comment_end_string='}',
line_statement_prefix='%-',
line_comment_prefix='%#'
)
env.filters['html_to_tex'] = _html_to_tex
env.filters['link_filter'] = _link_filter
env.filters['pretty_json'] = _pretty_json
env.filters['unique_flags_filter'] = _unique_flags_filter
kwargs = _get_render_kwargs(report, dash_url, False)
with tempfile.TemporaryDirectory() as _tmpdir:
tmpdir = Path(_tmpdir)
logfile, auxfile = _prepare_latex_support_files(tmpdir, env, kwargs)
_save_figures_to_pdf(tmpdir, report)
_compile_files_into_pdf(tmpdir, logfile, auxfile, max_runs)
return (tmpdir / 'out.pdf').read_bytes()
def _prepare_latex_support_files(tmpdir, env, kwargs):
template = env.get_template('base.tex')
tex = template.render(**kwargs)
texfile = tmpdir / 'out.tex'
texfile.write_text(tex)
auxfile = tmpdir / 'out.aux'
logfile = tmpdir / 'out.log'
return logfile, auxfile
def _save_figures_to_pdf(tmpdir, report):
figdir = tmpdir / 'figs'
figdir.mkdir()
for fig in report.raw_report.plots.figures:
name = (
fig.category + '+' + fig.metric + '+' +
fig.name
).replace('^', '-').replace(' ', '+').replace('_', '+').replace(
'<=', 'lte').replace('%', 'pct').replace('.', '').replace('/', '-')
name += '.pdf'
# handle characters that will cause problems for tex
figpath = figdir / name
figpath.write_bytes(base64.a85decode(fig.pdf))
def _compile_files_into_pdf(tmpdir, logfile, auxfile, max_runs):
args = (
'pdflatex',
'-interaction=batchmode',
'-halt-on-error',
'-no-shell-escape',
'-file-line-error',
'out.tex'
)
runs_left = max_runs
prev_aux = 'nothing to see here'
# run pdflatex until it settles
while runs_left > 0:
try:
subprocess.run(args, check=True, cwd=str(tmpdir.absolute()))
except subprocess.CalledProcessError:
try:
logger.exception(logfile.read_text())
except Exception:
logger.exception('Pdflatex failed and so did reading log')
raise
aux = auxfile.read_text()
if aux == prev_aux:
break
else:
prev_aux = aux
runs_left -= 1
else:
raise RuntimeError(
f'PDF generation unstable after {max_runs} runs')
| 2.71875 | 3 |
classroom/reward_learner.py | norabelrose/whisper | 0 | 12757923 | from abc import ABC, abstractmethod
from numpy.typing import ArrayLike
from typing import SupportsFloat
class RewardLearner(ABC):
"""
Abstract class for reward models.
"""
@abstractmethod
def predict_reward(self, state, action, next_state) -> SupportsFloat:
"""
Predict the reward for the given state, action, and next state.
"""
@abstractmethod
def learning_step(self, state, action, next_state, reward) -> None:
"""
Update the model based on the given state, action, next state, and reward.
""" | 3.390625 | 3 |
SVM-Image-Classification-master/svmclassificatino.py | obrs09/bcnncov19 | 0 | 12757924 | from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import skimage
from sklearn import svm, metrics, datasets
from sklearn.utils import Bunch
from sklearn.model_selection import GridSearchCV, train_test_split
#import opencv
from skimage.io import imread
from skimage.transform import resize
import time
import sys
start = time.time()
def load_image_files(container_path, dimension=(256, 256, 3)):
"""
Load image files with categories as subfolder names
which performs like scikit-learn sample dataset
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
dimension : tuple
size to which image are adjusted to
Returns
-------
Bunch
"""
image_dir = Path(container_path)
folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]
categories = [fo.name for fo in folders]
descr = "A image classification dataset"
images = []
flat_data = []
target = []
for i, direc in enumerate(folders):
for file in direc.iterdir():
img = skimage.io.imread(file)
img_resized = resize(img, dimension, anti_aliasing=True, mode='reflect')
flat_data.append(img_resized.flatten())
images.append(img_resized)
target.append(i)
flat_data = np.array(flat_data)
target = np.array(target)
images = np.array(images)
#print(images)
return Bunch(data=flat_data,
target=target,
target_names=categories,
images=images,
DESCR=descr),folders
image_dataset_train,folders_train = load_image_files("train/")
image_dataset_test,folders_test = load_image_files("test/")
#image_dataset = load_image_files("images/")
X_train = image_dataset_train.data
y_train = image_dataset_train.target
X_test = image_dataset_test.data
y_test = image_dataset_test.target
# image_dataset.data, image_dataset.target, test_size=0.3,random_state=109)
# param_grid = [
# {'C': [1, 10, 100, 1000], 'kernel': ['linear']},
# {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
# ]
#svc = svm.SVC()
clf = svm.SVC()
#clf = GridSearchCV(svc, param_grid)
clf.fit(X_train, y_train)
print(folders_train)
y_pred = clf.predict(X_test)
print(y_pred)
print(y_test)
len_of_y = len(y_pred)
predict_correct_covid = 0
predict_wrong_covid = 0
predict_correct_noncovid = 0
predict_wrong_noncovid = 0
for i in range(len_of_y):
if y_pred[i] == y_test[i] and y_pred[i] == 0:
predict_correct_covid += 1
elif y_pred[i] == y_test[i] and y_pred[i] == 1:
predict_correct_noncovid += 1
elif y_pred[i] != y_test[i] and y_pred[i] == 0:
predict_wrong_covid += 1
elif y_pred[i] != y_test[i] and y_pred[i] == 1:
predict_wrong_noncovid += 1
print("predict_correct_covid", predict_correct_covid)
print("predict_wrong_covid", predict_wrong_covid)
print("predict_correct_noncovid", predict_correct_noncovid)
print("predict_wrong_noncovid", predict_wrong_noncovid)
print("percen of correct covid", predict_correct_covid/(predict_correct_covid + predict_wrong_covid))
print("percen of correct noncovid", predict_correct_noncovid/(predict_correct_noncovid + predict_wrong_noncovid))
print("precent over all", (predict_correct_covid + predict_correct_noncovid)/len_of_y)
end = time.time()
print("time", end - start) | 2.828125 | 3 |
utils/osu/graphing.py | zivoy/flowaboat | 0 | 12757925 | import io
import math
from textwrap import wrap
from time import strftime, gmtime
import bezier
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
from PIL import Image
from matplotlib import pyplot as plt
from ..utils import Log
def graph_bpm(map_obj):
"""
graphs the bpm changes on map
:param map_obj: a MapStats object
:return: image in io stream
"""
Log.log(f"Graphing BPM for {map_obj.title}")
data = [(i.starttime / map_obj.speed_multiplier,
1000 / i.ms_per_beat * 60 / map_obj.speed_multiplier)
for i in map_obj.beatmap.timingpoints if i.change]
chart_points = list()
for i, j in enumerate(data):
if i != 0:
last = data[i - 1]
chart_points.append((j[0] - .01, last[1]))
chart_points.append(j)
if len(data) - 1 == i:
chart_points.append((map_obj.beatmap.hitobjects[-1].starttime
/ map_obj.speed_multiplier, j[1]))
points = pd.DataFrame(chart_points)
points.columns = ["Time", "BPM"]
col = (38 / 255, 50 / 255, 59 / 255, .9)
sns.set(rc={'axes.facecolor': col,
'text.color': (236 / 255, 239 / 255, 241 / 255),
'figure.facecolor': col,
'savefig.facecolor': col,
'xtick.color': (176 / 255, 190 / 255, 197 / 255),
'ytick.color': (176 / 255, 190 / 255, 197 / 255),
'grid.color': (69 / 255, 90 / 255, 100 / 255),
'axes.labelcolor': (240 / 255, 98 / 255, 150 / 255),
'xtick.bottom': True,
'xtick.direction': 'in',
'figure.figsize': (6, 4),
'savefig.dpi': 100
})
ax = sns.lineplot(x="Time", y="BPM", data=points, color=(240 / 255, 98 / 255, 150 / 255))
length = int(map_obj.total_length) * 1000
m = length / 50
plt.xlim(-m, length + m)
formatter = matplotlib.ticker.FuncFormatter(lambda ms, x: strftime('%M:%S', gmtime(ms // 1000)))
ax.xaxis.set_major_formatter(formatter)
comp = round(max(1, (map_obj.bpm_max - map_obj.bpm_min) / 20), 2)
top = round(map_obj.bpm_max, 2) + comp
bot = max(round(map_obj.bpm_min, 2) - comp, 0)
dist = top - bot
plt.yticks(np.arange(bot, top, dist / 6 - .0001))
plt.ylim(bot, top)
round_num = 0 if dist > 10 else 2
formatter = matplotlib.ticker.FuncFormatter(lambda dig, y:
f"{max(dig - .004, 0.0):.{round_num}f}")
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.grid(False)
width = 85
map_text = "\n".join(wrap(f"{map_obj.title} by {map_obj.artist}", width=width)) + "\n" + \
"\n".join(wrap(f"Mapset by {map_obj.creator}, "
f"Difficulty: {map_obj.version}", width=width))
plt.title(map_text)
plt.box(False)
image = io.BytesIO()
plt.savefig(image, bbox_inches='tight')
image.seek(0)
plt.clf()
plt.close()
return image
def map_strain_graph(map_strains, progress=1., width=399., height=40., max_chunks=100, low_cut=30.):
"""
generats a strains graph based on map
:param map_strains: get_strains object
:param progress: how much of the map player finished
:param width: width of image
:param height: height of image
:param max_chunks: resolution to get out of map
:param low_cut: adds some beefing to the bottem
:return: an image in a bytesio object
"""
strains, max_strain = map_strains["strains"], map_strains["max_strain"]
strains_chunks = list()
chunk_size = math.ceil(len(strains) / max_chunks)
for i in range(0, len(strains), chunk_size):
strain_part = strains[i:i + chunk_size]
strains_chunks.append(max(strain_part))
x = np.linspace(0, width, num=len(strains_chunks))
y = np.minimum(low_cut,
height * 0.125 + height * .875 - np.array([i / max_strain for i in
strains_chunks]) * height * .875)
x = np.insert(x, 0, 0)
x = np.insert(x, 0, 0)
x = np.append(x, width)
x = np.append(x, width)
y = np.insert(y, 0, low_cut)
y = np.insert(y, 0, low_cut)
y = np.append(y, low_cut)
y = np.append(y, low_cut)
curves = list()
curves.append(bezier.Curve(np.asfortranarray([[0.0, 0.0], [height, low_cut]]), degree=1))
for i in range(1, len(y) - 1):
node = np.asfortranarray([
[avgpt(x, i - 1), x[i], avgpt(x, i)],
[avgpt(y, i - 1), y[i], avgpt(y, i)]])
curves.append(
bezier.Curve(node, degree=2)
)
curves.append(bezier.Curve(np.asfortranarray([[width, width], [low_cut, height]]), degree=1))
curves.append(bezier.Curve(np.asfortranarray([[width, 0.0], [height, height]]), degree=1))
polygon = bezier.CurvedPolygon(*curves)
_, ax = plt.subplots(figsize=(round(width * 1.30), round(height * 1.30)), dpi=1)
polygon.plot(pts_per_edge=200, color=(240 / 255, 98 / 255, 146 / 255, 1), ax=ax)
plt.xlim(0, width)
plt.ylim(height, 0)
plt.axis('off')
plt.box(False)
image = io.BytesIO()
fig1 = plt.gcf()
fig1.savefig(image, bbox_inches='tight', transparent=True, pad_inches=0, dpi=1)
image.seek(0)
plt.clf()
plt.close()
img = Image.open(image)
data = np.array(img)
for j in data:
for pos, i in enumerate(j):
if pos > len(j) * progress:
j[pos] = i / 1.5
if i[3] != 0:
j[pos][3] = i[3] / 159 * 255
img = Image.fromarray(data)
image.close()
image = io.BytesIO()
img.save(image, "png")
image.seek(0)
return image
def avgpt(points, index):
"""
get the average between current point and the next one
:param points: list of points
:param index: index
:return: average
"""
return (points[index] + points[index + 1]) / 2.0
| 2.578125 | 3 |
soccer_xg/ml/logreg.py | ML-KULeuven/soccer_xg | 100 | 12757926 | import numpy as np
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.pipeline import Pipeline
from soccer_xg.ml.preprocessing import simple_proc_for_linear_algoritms
def logreg_gridsearch_classifier(
numeric_features,
categoric_features,
learning_rate=0.08,
use_dask=False,
n_iter=100,
scoring='roc_auc',
):
"""
Simple classification pipeline using hyperband to optimize logreg hyper-parameters
Parameters
----------
`numeric_features` : The list of numeric features
`categoric_features` : The list of categoric features
`learning_rate` : The learning rate
"""
return _logreg_gridsearch_model(
'classification',
numeric_features,
categoric_features,
learning_rate,
use_dask,
n_iter,
scoring,
)
def logreg_gridsearch_regressor(
numeric_features,
categoric_features,
learning_rate=0.08,
use_dask=False,
n_iter=100,
scoring='roc_auc',
):
"""
Simple regression pipeline using hyperband to optimize logreg hyper-parameters
Parameters
----------
`numeric_features` : The list of numeric features
`categoric_features` : The list of categoric features
`learning_rate` : The learning rate
"""
return _logreg_gridsearch_model(
'regression',
numeric_features,
categoric_features,
learning_rate,
use_dask,
n_iter,
scoring,
)
def _logreg_gridsearch_model(
task,
numeric_features,
categoric_features,
learning_rate,
use_dask,
n_iter,
scoring,
):
if learning_rate is None:
param_space = {
'clf__C': np.logspace(-5, 5, 100),
'clf__class_weight': ['balanced', None],
}
model = LogisticRegression(max_iter=10000, fit_intercept=False)
else:
param_space = {
'clf__penalty': ['l1', 'l2'],
'clf__alpha': np.logspace(-5, 5, 100),
'clf__class_weight': ['balanced', None],
}
learning_rate_schedule = (
'constant' if isinstance(learning_rate, float) else learning_rate
)
eta0 = learning_rate if isinstance(learning_rate, float) else 0
model = SGDClassifier(
learning_rate=learning_rate_schedule,
eta0=eta0,
loss='log',
max_iter=10000,
fit_intercept=False,
)
pipe = Pipeline(
[
(
'preprocessing',
simple_proc_for_linear_algoritms(
numeric_features, categoric_features
),
),
('clf', model),
]
)
if use_dask:
from dask_ml.model_selection import RandomizedSearchCV
return RandomizedSearchCV(
pipe, param_space, n_iter=n_iter, scoring=scoring, cv=5
)
else:
from sklearn.model_selection import RandomizedSearchCV
return RandomizedSearchCV(
pipe, param_space, n_iter=n_iter, scoring=scoring, cv=5
)
| 2.796875 | 3 |
moltemplate/ttree.py | Mopolino8/moltemplate | 0 | 12757927 | <filename>moltemplate/ttree.py
#!/usr/bin/env python
# Authors: <NAME> (jewett.aij at g mail)
# http://www.moltemplate.org
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2011, Regents of the University of California
# All rights reserved.
"""
ttree.py is a simple program for recursively generating large redundant
text files (such as data files read by molecular simulation programs)
from small (non-redundant) text files (such as molecule definitions
and force-field parameters).
By default, the large number of unique template variables generated
in the process are automatically substituted with integers
(or other numeric counters, all of which can be overridden),
rendered, and the rendered templates are written to a file.
BasicUI This section of the code contains the user interface for ttree
when run as a stand-alone program, as described above. (This
section of code contains the "if __name__ == '__main__':" code block.)
-- Data Types --
StaticObj Static nodes are data structures used to store ttree class definitions.
(Static nodes are useful for defining molecule types or
namespaces in LAMMPS or other molecular simulation programs.)
The nodes themselves are stored in a tree of nested class definitions.
Static variables (such as "@atom:C") are also associated with
StaticObjs.
InstanceObj Instance nodes are created when a user creates one (or many)
copies of a class, using the "new" command.
These classes in turn may instantiate other classes.
(Example: A user may manually instantiate several copies of a
molecule, such as a protein, however each of those
molecules may contain molecular subunits, such as
amino acids, which are automatically instantiated.)
Instance variables (such as "$atom:CA") are also associated with
InstanceObjs.
"""
import sys
from collections import defaultdict
import operator
import random
#import gc
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
# -- ttree_lex.py --
# TtreeShlex is a backwards-compatible version of python's standard shlex module
# It has the additional member: "self.wordterminators", which overrides
# the "self.wordchars" member. This enables better handling of unicode
# characters by allowing a much larger variety of characters to appear
# in words or tokens parsed by TtreeShlex. Otherwise it is identical to shlex.
try:
from .ttree_lex import TtreeShlex, SplitQuotedString, EscCharStrToChar, \
SafelyEncodeString, RemoveOuterQuotes, MaxLenStr, HasWildcard, \
InputError, ErrorLeader, OSrcLoc, TextBlock, VarRef, VarBinding, \
TemplateLexer
except (SystemError, ValueError):
# not installed as a package
from ttree_lex import *
if sys.version < '2.6':
raise InputError('Error: Using python ' + sys.version + '\n'
' Alas, you must upgrade to a newer version of python (2.7 or later).')
elif sys.version < '2.7':
sys.stderr.write('--------------------------------------------------------\n'
'----------------- WARNING: OLD PYTHON VERSION ----------\n'
' This program is untested on your python version (' +
sys.version + ').\n'
' PLEASE LET ME KNOW IF THIS PROGRAM CRASHES (and upgrade python).\n'
' -Andrew 2016-9-21\n'
'--------------------------------------------------------\n'
'--------------------------------------------------------\n')
from ordereddict import OrderedDict
else:
from collections import OrderedDict
if sys.version > '3':
import io
else:
import cStringIO
# We keep track of the program name and version.
# (This is only used for generating error messages.)
#g_filename = 'ttree.py'
g_filename = __file__.split('/')[-1]
g_module_name = g_filename
if g_filename.rfind('.py') != -1:
g_module_name = g_filename[:g_filename.rfind('.py')]
g_date_str = '2016-12-21'
g_version_str = '0.85.0'
class ClassReference(object):
""" Every class defined by the user (stored in an StaticObj data structure)
may contain references to other classes (ie. other StaticObjs).
(Note: All of these StaticObjs are stored in the same tree, the
global static tree.)
Examples:
Whenever an instance of a class is created, this may automatically spawn
the creation of additional classes (which are instantiated because a 'new'
command appeared within the first class's definition). These are stored in
the "StaticObj.instance_commands[i].class_ref" attribute.
Similarly, each class (StaticObj) can optionally inherit some of its
traits (consisting of write() and new commands) from one or more
"class_parents" (also StaticObjs). A list of these parents is stored in the
"StaticObj.class_parents" attribute. In both cases (self.instance_commands
or self.class_parents) we need to store a pointer to the StaticObj(s)
corresponding to the instance-childen or class-parents.
(This stored in self.statobj).
However, for the purposes of debugging and interactivity, it is also
convenient to permanently keep track of the string that the user used to
specify the name/location of that class/StaticObj
(stored in self.statobj_str), in addition to the location
in the file where that string occurs (stored in self.srcloc)."""
__slots__ = ["statobj_str", "srcloc", "statobj"]
def __init__(self,
statobj_str=None,
srcloc=None,
statobj=None):
self.statobj_str = statobj_str
if srcloc is None:
self.srcloc = OSrcLoc('', -1)
else:
self.srcloc = srcloc
self.statobj = statobj
# def __repr__(self):
# return repr((self.statobj_str, self.srcloc))
# "Command"s are tasks to carry out.
# (...either immediately, or later during instantiation)
# Commands are used to write to files, create new instances, delete instances,
# or custom commands to modify an instance of a class.
# (For example "instance = new Class.move(1,0,0).rot(45,0,0,1)"
# The ".move(1,0,0)" and ".rot(45,0,0,1)" commands are "stackable" and
# have similar syntax to member functions in C++, JAVA, Python.)
class Command(object):
__slots__ = ["srcloc"]
def __init__(self, srcloc=None):
self.srcloc = srcloc
# COMMENTING OUT: "COUNT" AND "ORDER" ARE NO LONGER NEEDED
#count = 0
# def __init__(self, srcloc=None):
# self.srcloc = srcloc
# # The "order" member is a counter that keeps track of the order
# # in which the Command data types are created (issued by the user).
# Command.count += 1
# self.order = Command.count
# def __lt__(self, x):
# return self.order < x.order
class WriteFileCommand(Command):
""" WriteFileCommand
filename This is the name of the file that will be written to
when the command is executed.
tmpl_list This is the contents of what will be written to the file.
Text strings are often simple strings, however more
generally, they can be strings which include other variables
(ie templates). In general, templates are lists of alternating
TextBlocks and VarRefs, (with additional tags and data to
identify where they occur in in the original user's files).
"""
__slots__ = ["filename", "tmpl_list"]
def __init__(self,
filename=None,
tmpl_list=None,
srcloc=None):
self.filename = filename
if tmpl_list is None:
self.tmpl_list = []
else:
Command.__init__(self, srcloc)
self.tmpl_list = tmpl_list
def __str__(self):
if self.filename:
return 'WriteFileCommand(\"' + self.filename + '\")'
else:
return 'WriteFileCommand(NULL)'
def __copy__(self):
tmpl_list = []
CopyTmplList(self.tmpl_list, tmpl_list) # CHECK:IS_MEMORY_WASTED_HERE?
return WriteFileCommand(self.filename, tmpl_list, self.srcloc)
class InstantiateCommand(Command):
""" InstantiateCommand is a simple tuple-like datatype used to
store pairs of names (strings, stored in self.name),
and ClassReferences (see above, stored in self.class_ref).n
The "suffix" argument is an optional string which may contain
additional instructions how to instantiate the object.
"""
__slots__ = ["name",
"class_ref",
"instobj"]
def __init__(self,
name=None,
class_ref=None,
srcloc=None,
instobj=None):
Command.__init__(self, srcloc)
self.name = name
# if class_ref is None:
# self.class_ref = ClassReference()
# else:
self.class_ref = class_ref
self.instobj = instobj
def __str__(self):
return 'InstantiateCommand(' + self.name + ')'
def __copy__(self):
return InstantiateCommand(self.name,
self.class_ref,
self.srcloc,
self.instobj)
class DeleteCommand(Command):
__slots__ = []
def __init__(self,
srcloc=None):
Command.__init__(self, srcloc)
def __str__(self):
return 'DeleteCommand()'
def __copy__(self):
return DeleteCommand(self.srcloc)
class StackableCommand(Command):
""" StackableCommand is a class for storing commands
that effect the environment of the object being created.
The combined effect of these commands can be thought of as a "stack"
Commands can be pushed on the stack, or popped off
The actual commands themselves are represented by the "contents" member
which is usually a text string.
ttree.py does not attempt to understand the content of these commands.
That job is left up to the __main__ module. (IE. whatever script that
happens to be importing ttree.py. If there is no script, and
ttree.py IS the main module, then it simply ignores these commands.)
"""
__slots__ = ["context_node"]
def __init__(self,
srcloc,
context_node=None):
Command.__init__(self, srcloc)
# if multiple stacks are present, then use "context_node"
self.context_node = context_node
# as a key to identify which stack you want
# the command to modify
class PushCommand(StackableCommand):
__slots__ = ["contents"]
def __init__(self,
contents,
srcloc,
context_node=None):
StackableCommand.__init__(self, srcloc, context_node)
self.contents = contents
def __copy__(self):
return PushCommand(self.contents, self.srcloc, self.context_node)
def __str__(self):
return 'PushCommand(' + str(self.contents) + ')'
class PushRightCommand(PushCommand):
__slots__ = []
def __init__(self,
contents,
srcloc,
context_node=None):
PushCommand.__init__(self, contents, srcloc, context_node)
def __copy__(self):
return PushRightCommand(self.contents, self.srcloc, self.context_node)
def __str__(self):
return 'PushRightCommand(' + str(self.contents) + ')'
class PushLeftCommand(PushCommand):
__slots__ = []
def __init__(self,
contents,
srcloc,
context_node=None):
PushCommand.__init__(self, contents, srcloc, context_node)
def __copy__(self):
return PushLeftCommand(self.contents, self.srcloc, self.context_node)
def __str__(self):
return 'PushLeftCommand(' + str(self.contents) + ')'
class PopCommand(StackableCommand):
__slots__ = ["partner"]
def __init__(self,
partner,
srcloc,
context_node=None):
StackableCommand.__init__(self, srcloc, context_node)
self.partner = partner
def __copy__(self):
return PopCommand(self.partner, self.srcloc, self.context_node)
def __str__(self):
return 'PopCommand(' + str(self.partner.contents) + ')'
class PopRightCommand(PopCommand):
__slots__ = []
def __init__(self,
partner,
srcloc,
context_node=None):
PopCommand.__init__(self, partner, srcloc, context_node)
assert((partner is None) or isinstance(partner, PushRightCommand))
def __copy__(self):
return PopRightCommand(self.partner, self.srcloc, self.context_node)
def __str__(self):
return 'PopRightCommand(' + str(self.partner.contents) + ')'
class PopLeftCommand(PopCommand):
__slots__ = []
def __init__(self,
partner,
srcloc,
context_node=None):
PopCommand.__init__(self, partner, srcloc, context_node)
assert((partner is None) or isinstance(partner, PushLeftCommand))
def __copy__(self):
return PopLeftCommand(self.partner, self.srcloc, self.context_node)
def __str__(self):
return 'PopLeftCommand(' + str(self.partner.contents) + ')'
# The ScopeCommand, ScopeBegin, and ScopeEnd commands are useful to designate
# which commands belong to a particular class definition (or class instance).
# (This is useful later on, when a linear list of commands has been created.)
# They are simply markers an do not do anything. These classes can be ignored.
class ScopeCommand(Command):
__slots__ = ["node"]
def __init__(self,
node,
srcloc):
Command.__init__(self, srcloc)
self.node = node
#self.srcloc = srcloc
def __copy__(self):
return ScopeCommand(self.node, self.srcloc)
def __str__(self):
if self.node:
return 'ScopeCommand(' + self.node.name + ')'
else:
return 'ScopeCommand(None)'
class ScopeBegin(ScopeCommand):
__slots__ = []
def __init__(self, node, srcloc):
ScopeCommand.__init__(self, node, srcloc)
def __copy__(self):
return ScopeBegin(self.node, self.srcloc)
def __str__(self):
if self.node:
return 'ScopeBegin(' + NodeToStr(self.node) + ')'
else:
return 'ScopeBegin(None)'
class ScopeEnd(ScopeCommand):
__slots__ = []
def __init__(self, node, srcloc):
ScopeCommand.__init__(self, node, srcloc)
def __copy__(self):
return ScopeEnd(self.node, self.srcloc)
def __str__(self):
if self.node:
return 'ScopeEnd(' + NodeToStr(self.node) + ')'
else:
return 'ScopeEnd(None)'
# COMMENTING OUT: NOT NEEDED AT THE MOMENT
# class VarAssignCommand(Command):
# """ VarAssignCommand
#
# This class is used whenever the user makes an explicit request to assign
# a variable to a value (values are text strings).
#
# var_ref The variable name (tecnically speaking, I call this
# a variable descriptor string and it includes at least one of
# the following: the name of a leaf node, a category node name,
# and category name)
# the location in the file where variable appears, and (eventually
# after subsequent lookup), references to the leaf_node, cat_node,
# "Category", and "VarBinding" data structures associated with it.
# text_tmpl Text strings are often simple strings, however more
# generally, they can be strings which include other variables
# (ie templates). In general, templates are lists of alternating
# TextBlocks and VarRefs, (with additional tags and data to
# identify where they occur in in the original user's files).
#
# """
# __slots__=["var_ref","text_tmpl"]
#
# def __init__(self,
# #command_name = '=', <-- ?!?
# var_ref = None,
# text_tmpl=None):
# Command.__init__(self, srcloc)
# self.var_ref = var_ref
# self.text_tmpl = text_tmpl
class ModCommand(object):
__slots__ = ["command", "multi_descr_str"]
def __init__(self,
command,
multi_descr_str):
self.command = command
self.multi_descr_str = multi_descr_str
def __str__(self):
return 'ModCommand(' + str(self.command) + ')'
def __copy__(self):
return ModCommand(self.command.__copy__(), self.multi_descr_str)
def CopyTmplList(source_tmpl_list, dest_cpy):
for entry in source_tmpl_list:
if isinstance(entry, TextBlock):
dest_cpy.append(entry) # Then make a shallow copy
# (pointer assignment) to the text
# block (Text blocks do not change
# during instantiation.)
elif isinstance(entry, VarRef):
assert(len(entry.prefix) > 0)
if entry.prefix[0] == '@': # '@' vars refer to static data
dest_cpy.append(entry) # Then make a shallow copy
# pointer assignment) to the static
# variable. (Static variables do
# not change during instantiation.)
elif entry.prefix[0] == '$': # new '$' vars are created
# during every instantiation.
# var_refs do change when you instantiate them. So
# create a new VarRef object, and copy the attributes.
var_ref = VarRef(entry.prefix,
entry.descr_str,
entry.suffix,
entry.srcloc)
# Note: for instance variables ('$' vars)
# "entry.nptr" should not contain
# any data yet, so we just ignore it.
# I assert this below:
assert((entry.nptr.cat_node is None) and
(entry.nptr.leaf_node is None))
dest_cpy.append(var_ref)
else:
assert(False) # prefix[0] should be either '@' or '$'
else:
assert(False) # type(entry) should be either TextBlock or VarRef
def RecursiveJoin(tokens_expr, delimiter=''):
""" RecursiveJoin() converts a tree-like list/tuple of tokens, for example:
['a ', ('tree', '-', ['like', 'container']), [[' '], 'of'], ' strings']
to an ordinary string, eg:
'a tree-like container of strings'
This behavees similarly to "reduce(lambda a, b: a+b, tokens)",
except that it works with arbitrarily nested lists/tuples."""
text = ''
if isinstance(tokens_expr, basestring):
return tokens_expr
else:
text_lstr = []
for i in range(0, len(tokens_expr)):
text.append(TokensToStr(tokens_expr[i]))
return ''.join(text_lstr, delimiter)
#----------------------------------------------------------
#----------------------------------------------------------
# The following code is specific to ttree.
#
# (Up until this point, we have only defined
# a few simple general text parsing routines.)
#----------------------------------------------------------
#----------------------------------------------------------
def PtknsToStr(path_tokens):
"""
There are three ways to store paths:
As a single string: '/Protein/Phe/Ca' <- the format entered by the user
As a list of tokens ['Protein', 'Phe', 'Ca'] <- split into tokens
As a list of nodes in a tree (pointers to nodes in a tree hierarchy)
This function converts between the first two formats.
"""
text = ''
if len(path_tokens) > 0:
text = path_tokens[0]
for i in range(1, len(path_tokens)):
text += '/' + path_tokens[i]
else:
text = ''
return text
def StrToPtkns(path_string):
""" The inverse of PtknsToStr(), this function splits a string like
'/usr/local/../bin/awk' into ['usr','local','..','bin','awk'].
For illustrative purposes only. Use text.split('/') directly instead."""
return orig_text.split('/')
def FindChild(name, node, dbg_loc):
""" FindChild looks over the list of node.children to find a child
which matches the name given in the first argument.
If it is not found, it returns None.
Note: I have not yet specified what kind of nodes FindChild() operates
on. Both StaticObjs and InstanceObjs have self.children and self.parent.
However only StaticObjs have "self.class_parents".
("class_parents" are "parents" in the object-oriented sense.)
If "node" (2nd argument) happens t be an StaticObj, this means it also
We must search over the children of these class_parents as well.
Terminology used here differs from Object Oriented Programming
Children in node.children are not children in the object-oriented
programming sense. However, in OOP, "children" are objects that share all
of the traits of their ancestors (and may have additionl traits as well).
I have implemented OOP style children and parents, but this informtion
is stored in "node.class_parents", instead of "node.parents".
For comparison, instantiated nodes (InstanceObjs) are different. Altough
instantiated classes (InstanceObjs) have access to the attributes of the
class_parents of the StaticObjs that define them, they do not remember the
ownership of that data. (It just gets merged with their own member data,
including their .children.)
Hence we must treat StaticObjs carefully because their are two ways we can
access child data. We should loop over both of them. We do that below:
"""
child = node.children.get(name)
if child:
return child
if isinstance(node, StaticObj):
# The object-oriented inheritance stuff appears here.
# If you don't care about OOP or inheritance,
# then comment out the loop that follows:
# Search recursively over the "children" (ie attributes or members)
# belonging to any OOP ancestors of this node.
for class_parent in node.class_parents:
child = FindChild(name, class_parent, dbg_loc)
if child != None:
return child
for namespace_node in node.namespaces:
child = FindChild(name, namespace_node, dbg_loc)
if child != None:
return child
else:
assert(isinstance(node, InstanceObjBasic))
# Otherwise, a child name match was not found
return None
def FollowPath(path_tokens, starting_node, dbg_loc):
""" FollowPath() returns the "last_node", a node whose position in the
tree is indicated by a list of path_tokens, describing the names
of nodes connecting "starting_node" to "last_node".
If it one of the strings in the list of path_tokens turns out
not to match then names of classes in the tree, then this function
returns the last_node that did match before the error occurred,
as well as an integer which stores the number of tokens in
the path_tokens list which were successfully processed.
In other words, the list of node naes is not a full path, but the
relative path that takes you from one node (not necessarily the root)
to another. Return Value:
Ideally, each node in the list should be a parent or a child of the
previous node. (See comment for PathTokensToStr(), for more details.)
This function returns the number of path_tokens successfully
parsed. Under normal termination, this is len(path_tokens).
If the path can not be followed (because at some point, a child
or parent does not exist), then this function returns a number
smaller than len(path_tokens).
We let the caller handle undefined paths. """
#print(' FollowPath() invoked on: ', path_tokens)
if len(path_tokens) == 0:
return 0, starting_node
node = starting_node
# Is this path a relative path, or a full path?
# If the path-string began with '/', then it's a full path. This means
# that after processing by split('/'), the first token will be ''
# Example: path_tokens='/Prot/Alanine'.split('/')
# --> path_tokens[0] == ''
if path_tokens[0] == '':
# In that case, then take us to the root node:
while node.parent != None:
node = node.parent
#sys.stdout.write('FollowPath(): Retreating to node \"'+node.name+'\"\n')
i0 = 1 # <- We've just processed the first token. Skip over it later.
else:
i0 = 0
i = i0
while i < len(path_tokens):
if path_tokens[i] == '..':
if node.parent is None:
return i, node # <-return the index into the token list
# Caller will know that something went awry
# if the return value is not equal to the
# length of the token list
else:
node = node.parent
i += 1
elif path_tokens[i] == '...':
node_before_ellipsis = node
if i == len(path_tokens) - 1:
return i, node_before_ellipsis
search_target = path_tokens[i + 1]
# Now search over the "children" of this node
# for one who's name matches path_tokens[i].
# If not found, then move up to the parent node's children.
# (This is not an exhaustive tree search. Only the nodes which
# are immediate children of this node's parents are searched.)
while node != None:
child = FindChild(search_target, node, dbg_loc)
if child is None:
node = node.parent
else:
node = child
break
if node is None:
# Caller will know that something went awry if the return
# value is not equal to the length of the token list.
return i, node_before_ellipsis
i += 2
# <-Note we ignore empty tokens from now on.
elif path_tokens[i] in ('', '.'):
# (Same convention is used in specifying a
# directory in a filesystem, eg. using /usr/local
# or /usr//local or /usr/./local. These are all equivalent.)
i += 1
else:
# Now search over the "children" of this
# node for one who's name matches path_tokens[i].
child = FindChild(path_tokens[i], node, dbg_loc)
if child is None:
# In that case, return with the node_list incomplete.
# Let the caller check to see if something went wrong.
return i, node # <-return the index into the token list (i)
# Caller will know that something went awry
# if the return value is not equal to the
# length of the token list
else:
node = child
i += 1
if node.IsDeleted():
#sys.stderr.write('(debug_msg: encountered deleted node: \"'+node.name+'\")\n')
break
return len(path_tokens), node
def PtknsToNode(path_tokens, starting_node, dbg_loc):
""" PtknsToNode() is identical to def FollowPath() except
that it raises syntax-error exceptions if the path is undefined."""
i_last_ptkn, last_node = FollowPath(path_tokens, starting_node, dbg_loc)
if i_last_ptkn < len(path_tokens):
# assert(isinstance(last_node,StaticObj)) <--why did I assert this?
# seems wrong
if (last_node.parent is None) and (path_tokens[i_last_ptkn] == '..'):
# In that case, we tried to back out beyond the root of the tree.
raise InputError('Error(' + g_module_name + '.PtknsToNode()):\n'
' Invalid variable/class name:\n'
' \"' + PtknsToStr(path_tokens) + '\" located near ' + ErrorLeader(
dbg_loc.infile, dbg_loc.lineno) + '\n'
' There are too many \"..\" tokens in the path string.')
elif path_tokens[i_last_ptkn] == '...':
if i_last_ptkn + 1 == len(path_tokens):
raise InputError('Error(' + g_module_name + '.PtknsToNode()):\n'
' Error in ' +
ErrorLeader(dbg_loc.infile,
dbg_loc.lineno) + '\n'
' Expected name following \"...\"\n')
else:
search_target = path_tokens[i_last_ptkn + 1]
# In that case, we were unable to find the node referenced by
# "..."
raise InputError('Error(' + g_module_name + '.PtknsToNode()):\n'
' Class or variable \"' + search_target + '\" not found\n'
' in this context: \"' +
PtknsToStr(path_tokens) + '\"\n'
' located near ' + ErrorLeader(dbg_loc.infile, dbg_loc.lineno))
else:
# Then the reason is: The string in path_tokens[i_last_ptkn]
# was supposed to be a child of last_node but a child
# of that name was not found.
err_msg = 'Error(' + g_module_name + '.PtknsToNode()):\n' +\
' Undefined variable/class name:\n' +\
' \"' + PtknsToStr(path_tokens) + '\",\n' +\
' This occured near or before ' + ErrorLeader(dbg_loc.infile, dbg_loc.lineno) + '\n' +\
' (Specifically \"' + path_tokens[i_last_ptkn] +\
'\" is not a subordinate of \"' + MaxLenStr(last_node.name, '/') + '\".)\n' +\
' This may be due to a typo located here or earlier.\n' +\
' It may also occur if you deleted the object earlier. (Referring to a\n' +\
' deleted object is only forgiven when using [0-9] or [0:10] notation.)\n' +\
' If this object refers to an array you must use brackets []\n' +\
' to explicitly specify the element(s) you want from that array.\n' +\
' (To select multiple elements, you can use [*] or [0-9] or [0:10].)\n'
if (path_tokens[i_last_ptkn] in NodeToPtkns(last_node)):
err_msg += '\nIn this case:\n' +\
' It seems like you may have omitted a } character somewhere before:\n' +\
' ' + ErrorLeader(dbg_loc.infile, dbg_loc.lineno)
raise InputError(err_msg)
assert(False) # One of the two conditions above should be true.
return last_node
def StrToNode(obj_name, starting_node, dbg_loc):
path_tokens = obj_name.split('/')
return PtknsToNode(path_tokens, starting_node, dbg_loc)
def NodeListToPtkns(node_list, dbg_loc=None):
# The path must contain at least the starting node
assert(len(node_list) > 0)
path_tokens = [node_list[0].name]
for i in range(1, len(node_list)):
if node_list[i] == node_list[i - 1].parent:
path_tokens.append('..')
else:
path_tokens.append(node_list[i].name)
# Now check to make sure the user supplied consistent information:
if (node_list[i] not in node_list[i - 1].children.values()):
raise InputError('Error(' + g_module_name + '.NodeListToPtkns()):\n'
' Undefined variable/class name:\n'
' \"' + PtknsToStr(path_tokens) + '\" located near ' + ErrorLeader(
dbg_loc.infile, dbg_loc.lineno) + '\n'
' (\"' + path_tokens[i] + '\" is not subordinate to \"' + MaxLenStr(
node_list[i - 1].name, '/') + '\")\n'
' This could be an internal error.')
return path_tokens
def NodeListToStr(node_list, dbg_loc=None):
# The path must contain at least the starting node
assert(len(node_list) > 0)
path_str = node_list[0].name
for i in range(1, len(node_list)):
if node_list[i] == node_list[i - 1].parent:
path_str += '/..'
else:
path_str += '/' + node_list[i].name
# Now check to make sure the user supplied consistent information:
if (node_list[i] not in node_list[i - 1].children.values()):
err_msg = 'Error(' + g_module_name + '.NodeListToStr()):\n' +\
' Invalid variable/class name:\n' +\
' \"' + PtknsToStr(path_tokens) + '\"'
if dbg_loc != None:
err_msg += ' located near ' + \
ErrorLeader(dbg_loc.infile, dbg_loc.lineno)
err_msg += '\n' +\
' (\"' + node_list[i].name + '\" is not a subordinate of \"' + MaxLenStr(node_list[i - 1].name, '/') + '\")\n' +\
' This could be an internal error.'
raise InputError(err_msg)
return path_str
def NodeToPtkns(node):
ptkns = []
nd = node
while nd != None:
ptkns.append(nd.name)
nd = nd.parent
ptkns.reverse()
return ptkns
def NodeToStr(node):
ptkns = NodeToPtkns(node)
assert(len(ptkns) > 0)
if node.parent is None:
assert(node.name == '')
return '/'
path_str = ptkns[0]
i = 1
while i < len(ptkns):
path_str += '/' + ptkns[i]
i += 1
return path_str
def CatLeafNodesToTkns(cat_name, cat_node, leaf_node, dbg_loc):
assert((cat_node != None) and (leaf_node != None))
assert((cat_name != None) and (cat_name != ''))
# Determine the path of the cat node
cat_node_ptkns = NodeToPtkns(cat_node)
cat_node_ptkns.append(cat_name + ':')
# Determine the path of the leaf node (which should inherit from cat)
deleted = False
leaf_node_ptkns = []
if cat_node != leaf_node:
node = leaf_node
while node.parent != None:
if node.IsDeleted():
deleted = True
leaf_node_ptkns.append('DELETED_' + node.name)
break
leaf_node_ptkns.append(node.name)
if node.parent == cat_node:
break
node = node.parent
leaf_node_ptkns.reverse()
if not deleted:
# Check that leaf inherits from cat. If not, print error.
if ((node.parent != cat_node) and (node != cat_node)):
err_msg = 'Error(' + g_module_name + '.CatLeafNodesToPtkns()):\n' +\
' Invalid variable (category:leaf) pair\n'
if dbg_loc != None:
cat_node_str = NodeToStr(cat_node)
leaf_node_str = NodeToStr(leaf_node)
err_msg += ' located near ' + ErrorLeader(dbg_loc.infile, dbg_loc.lineno) + '\n' +\
' (\"' + leaf_node.name + '\" is not in the scope of \"' + cat_node_str + '/' + cat_name + ':\")\n' +\
' This will happen if you used the \"category\" command to manually\n' +\
' create a category/counter which is not defined globally.\n' +\
'\n' +\
' Note: Using the analogy of a unix style file system, \n' +\
' the problem is that \"' + leaf_node_str + '\"\n' +\
' is not a subdirectory of \"' + cat_node_str + '\".\n' +\
'\n' +\
' Note: This often occurs when \".../\" is used. In that case, you may\n' +\
' be able to avoid this error by referring to your variable explicitly\n' +\
' by using chains of \"../\" tokens in the path instead of \".../\".\n'
#' Make sure that your variable you are using is defined in \n'+\
#' an environment (currently \"'+leaf_node_str+'\")\n'+\
#' which lies WITHIN the environment where the category was defined.\n'+\
#' (currently \"'+cat_node_str+'\").\n'
raise InputError(err_msg)
else:
err_msg = 'Warning: Strange variable path'
if dbg_loc != None:
err_msg += ' near ' + ErrorLeader(dbg_loc.infile, dbg_loc.lineno)
err_msg += '\n' +\
' The category and leaf nodes for variable \"' + cat_name + ':' + leaf_node.name + '\" are the same.\n' +\
' Check to see that this variable is behaving the way you intended.\n' +\
' (It\'s possible this could be an internal error in the program.)\n'
sys.stderr.write(err_msg)
# Merge the list of strings together into a single string:
return cat_node_ptkns + leaf_node_ptkns
def CanonicalCatName(cat_name, cat_node, dbg_loc=None):
# Determine the path of the cat node
tkns = NodeToPtkns(cat_node)
tkns.append(cat_name)
#full_cat_name = tkns[0]
# for i in range(1,len(tkns)):
# full_cat_name += '/'+tkns[i]
# better way:
return '/'.join(tkns)
def CanonicalDescrStr(cat_name, cat_node, leaf_node, dbg_loc=None):
tkns = CatLeafNodesToTkns(cat_name, cat_node, leaf_node, dbg_loc)
descr_str = tkns[0]
for i in range(1, len(tkns)):
if (len(descr_str) > 0) and (descr_str[-1] == ':'):
descr_str += tkns[i]
else:
descr_str += '/' + tkns[i]
return descr_str
def CollapsePath(path_tokens):
"""
CollapsePath() takes a list of Strings argument representing a
directory-like path string
(for example '/SUB1A/Sub2A/../Sub2B/sub3b/../sub3c/entry'),
and replaces it with a version which should contain no '..' patterns.
(In the example above, it returns /SUB1A/Sub2B/sub3c/entry')
"""
new_ptkns = []
ndelete = 0
i = len(path_tokens) - 1
while i >= 0:
if path_tokens[i] == '..':
ndelete += 1
else:
if (ndelete > 0) and (path_tokens[i] != ''):
# Note: "path_tokens[i] != '')" means "/a/b//c" <-> "/a/b/c"
ndelete -= 1
else:
if len(path_tokens[i]) > 0:
new_ptkns.append(path_tokens[i])
i -= 1
new_ptkns.reverse()
if ndelete > 0:
return ndelete # <-- useful to let caller know an error ocurred
return new_ptkns
def FindCatNode(category_name, current_node, srcloc):
""" Search upwards (toward the ancester nodes), looking for a node
containing a category matching category_name (first argument).
Useful when the user specifies a category name, but neglects to
specify which node it was defined in.
Note: there is no gaurantee that the category node returned by this function
contains an entry in it's "categories" list corresponding to this
category name. You must check for this condition and handle it."""
cat_node = None
node = current_node
while True:
if category_name in node.categories:
cat_node = node
break
elif node.parent != None:
node = node.parent
else:
# node.parent is None, ... we're done
break
if cat_node is None:
assert(node.parent is None)
# sys.stderr.write('Warning near ' +
# ErrorLeader(srcloc.infile,
# srcloc.lineno)+'\n'+
# ' no category named \"'+category_name+'\" found.\n'+
# ' Creating a new global category: /'+
# category_name+':\n')
cat_node = node # the global node
assert(cat_node != None)
return cat_node
def RemoveNullTokens(in_ptkns):
"""This function just gets rid of useless empty tokens in the path ('', '.')
(However if '' appears at the beginning of a path, we leave it alone.)
"""
out_ptkns = []
for i in range(0, len(in_ptkns)):
if ((in_ptkns[i] != '.') and
((in_ptkns[i] != '') or (i == 0))):
out_ptkns.append(in_ptkns[i])
# (I'm sure there are ways to write this in python
# using fewer lines of code. Sigh.)
return out_ptkns
def DescrToCatLeafPtkns(descr_str, dbg_loc):
"""
Review: Variables in this program have three parts:
1) A variable category name (designating the type of variable).
2) A variable category path, which consists of a node which is an ancestor
of the variable leaf (1) in the tree
3) A variable name ("leaf"), which refers to a node in the tree
(either a static type tree or instance tree)
DescrToCatLeafPtkns() takes a string describing a variable,
as it appears in a template (ie, a write() command, once it has been
stripped of it's '$' or '@' prefix, and surrounding {} brackets)
...and divides it into strings which specify the location of that leaf in
a static or instance tree, in addition to the name and location of the
category node. Descriptor examples for atoms in water:
"AtomType:/Water/O", There are only 2 --types-- of atoms in
"AtomType:/Water/H", a water molecule. We identify them this way.
"AtomID:O" However each water molecule has 3 atoms, and we
"AtomID:H1" can give each atom in each water molecule a unique
"AtomID:H2" AtomID number. "AtomID:H2" is the id number of the
second hydrogen atom in the current water molecule.
---- Output: This function returns a 3-tuple: ----
leaf_ptkns The name of the variable's leaf node, as well as the list of
tokens denoting the path (named list of nodes) which lead to it.
cat_name The name of the variable category (no path information)
cat_ptkns A --suggestion-- for where to find the node containing the
category mentioned in "cat_name". Same format as leaf_ptkns.
Examples:
"AtomType:/Water/O" cat_name='AtomType', cat_path=[], leaf_ptkns=['','Water','O']
"AtomType:/Water/H" cat_name='AtomType', cat_path=[], leaf_ptkns=['','Water','H']
"AtomID:O" cat_name='AtomID', cat_path=[], leaf_ptkns=['O']
"AtomID:H1" cat_name='AtomID', cat_path=[], leaf_ptkns=['H1']
"AtomID:H2" cat_name='AtomID', cat_path=[], leaf_ptkns=['H2']
"mol:/" cat_name='mol', cat_path=[], leaf_ptkns=['']
"mol:" cat_name='mol', cat_path=[], leaf_ptkns=[]
"mol:../" cat_name='mol', cat_path=[], leaf_ptkns=['..']
"../mol" cat_name='mol', cat_path=[], leaf_ptkns=['..']
"$/peptide[3]/ResID:res[25]" cat_name='ResID', cat_path=['', 'peptide[3]'], leaf_ptkns=['res[25]']
"""
split_colon = descr_str.split(':')
if len(split_colon) > 2:
raise InputError('Error(' + g_module_name + '.DescrToCatLeafPtkns())\n'
' Error near ' +
ErrorLeader(dbg_loc.infile, dbg_loc.lineno) + '\n\n'
' Bad variable descriptor: \"' + descr_str + '\"\n' +
' There can be at most one \':\' character in a variable descriptor.\n')
# ---- Are we using colon syntax (example '$atom:H1')?
elif len(split_colon) == 2:
# The category name = text after the last '/' (if present)and before
# ':'
cat_ptkns = split_colon[0].split('/')
cat_name = cat_ptkns[-1]
# The text before that is the suggested (category) path
cat_ptkns = cat_ptkns[:-1]
# if len(cat_ptkns) == 0:
# cat_ptkns.append('.')
# The remaining text is the path leading to the leaf node.
if split_colon[1] != '':
leaf_ptkns = split_colon[1].split('/')
else:
leaf_ptkns = []
if (cat_name == ''):
raise InputError('Error(' + g_module_name + '.DescrToCatLeafPtkns()):\n'
' Error near ' +
ErrorLeader(dbg_loc.infile,
dbg_loc.lineno) + '\n\n'
' Bad variable descriptor: \"' + descr_str + '\"\n')
else:
# ---- Are we using colon-less syntax (example: "$../mol") ?
ptkns = split_colon[0].split('/')
cat_name = ptkns[-1] # last token (eg. "mol") is the cat_name
leaf_ptkns = ptkns[:-1] # the rest is the leaf's path ("..")
if len(leaf_ptkns) == 0:
leaf_ptkns.append('.')
# cat_ptkns = ptkns[:-1] # the same goes for the cat path suggestion
# if len(cat_ptkns) == 0:
# cat_ptkns.append('.')
cat_ptkns = []
# On 2012-8-22, I commented out this line:
# return cat_name, RemoveNullTokens(cat_ptkns), RemoveNullTokens(leaf_ptkns)
# and replaced it with:
return cat_name, RemoveNullTokens(cat_ptkns), leaf_ptkns
def DescrToCatLeafNodes(descr_str,
context_node,
dbg_loc,
create_missing_nodes=False):
"""
Variables in ttree correspond to nodes in a tree
(and also categories to which they belong).
DescrToCatLeafNodes() reads the name of a variable,
(its descriptor) and determines where in the tree
does this variable reside, and what is it's category?
This function is the heart of ttree because it is
the function used to interpret ttree variable syntax.
(It is very messy right now. I will clean up the code later. AJ 2011-9-06)
Arguments:
descr_str The complete name that the user gave
to the variable. (Excluding '$' or '@')
context_node The class (node) in which the variable
was used. descr_str is interpeted
relative to this context. (This argument
is similar to the current directory
in which a command was issued in unix.)
dbg_loc The location in the user's input file(s)
where this variable is referred to.
create_missing_nodes
If we lookup a variable whose leaf node
does not exist yet, should we create it?
Setting this argument to "True" allows
us to augment the tree to add nodes
corresponding to variables.
-- Here is a greatly simplified version of DescrToCatLeafNodes(): --
def DescrToCatLeafNodes(descr_str, context_node, dbg_loc):
cat_name, cat_ptkns, leaf_ptkns = DescrToCatLeafPtkns(descr_str, dbg_loc)
cat_node = PtknsToNode(cat_ptkns, context_node, dbg_loc)
if len(cat_ptkns) > 0:
leaf_node = PtknsToNode(leaf_ptkns, cat_node, dbg_loc)
else:
leaf_node = PtknsToNode(leaf_ptkns, context_node, dbg_loc)
return cat_name, cat_node, leaf_node
(This version works, but it does not handle "..." corectly,
and it does not create missing nodes when needed.)
-- Here is a (probably unnecessary) review of terminology: --
Descriptor String:
The first argument ("descr_str") is a descriptor string.
A descriptor string typically contains ":" and "/"
characters to to divide the string into pieces in order
to identify a category name, category node, and leaf node.
Conceptually, the variable's NAME is the leaf node.
The variable's TYPE is the category (node and name).
Node:
Nodes are used to represent both class objects and variable names
1) class objects
Each type of class objects is represented by an StaticObj.
Each instantiated object is represented by an InstanceObj.
2) variable names (leaf nodes)
However variable names are also represented using either
StaticObjs (for @ static variables) or
InstanceObjs (for $ instance variables)
Again, all variables in ttree are members of a class object.
In this case, the name of the node corresponds to the variable's
name, and it's position in the tree refers to the class to which
it belongs.
However "leaf nodes" do not uniquely identify the
actual variable itself. A single node can refer to two different
variables if they are in different categories.
All 3 identifiers (leaf node, category node, category name)
are needed to uniquely identify a ttree variable. See below.
Ptkn (Path Token)
Strings containing multiple '/' characters are typically used
to identify the location of the category and leaf nodes in the
tree (ie the path to the node). The '/' characters are
delimiters which break up the string into small pieces, (which
are usually the names of classes).
These pieces are called "path tokens" or "ptkns"
Leaf Node:
It exists as a node in a tree (instead of a simple string)
because, just like member variables in a class in an
object oriented programming language (or in a C struct)
language, variables in ttree belong to the class in
which they are defined. The node's location in the
tree represents which class it belongs to.
If a variable's leaf node name
refers to a node which does no exist yet, then we create it
(assuming the "create_missing_nodes" argument is "True").
Category Node/Name:
Categories are a peculiar feature of ttree. Categories
are groups of variables that share the same counter when
numeric values are automatically given to each variable.
So you can think of a category as a counter with a name.
Variables in different categories have different counters,
and are assigned numeric values independently.
Consequently two variables in different categories
may be assigned the same number. But two variables
in the same category are always given unique values.
Counters are typically global, but can have local scope.
(ie, only defined within a Class, or an instantiated
class, and whatever other classes are nested or
instantiated beneath it.)
Therefore to identify a counter/category you must specify
both a name AND a node. The node identifies the class where
the scope is defined. It is assumed that the Leaf Node
(see above) lies within this scope (ie. somewhere after
it in the tree).
Example: local counters are used to keep track of the
residues within in a protein chain. If we use a class to
represent the protein, we can create a local residue-
counter (category) within that protein. Then when we
instantiate the protein multiple times, this counter
is reset for every new instance of of the protein.
"""
cat_name, cat_ptkns, leaf_ptkns = DescrToCatLeafPtkns(descr_str, dbg_loc)
# ---- ellipsis hack ----
#
# Search for class:
# Most users expect ttree.py to behave like a
# standard programming language: If the class they are
# instantiating was not defined in this specific
# location, they expect ttree.py to search for
# it outwards, first in the parent's environment,
# and then in the parent's parent's environment,
# and so on, until the object is found.
# For example, most users expect this to work:
# class Res{
# write("Atoms") {
# $atom:CA @atom:CA 0.123 1.234 2.345
# $atom:CB @atom:CB 1.234 2.345 3.456
# }
# }
# class Protein{
# write_once("AnglesByType") {
# @angle:backbone @atom:Res/CA @atom:Res/CA @atom:Res/CA
# }
# Notice that in class Protein, we did not have to specify
# where "Res" was defined because it is defined in the parent
# environment (ie. immediately outside Proteins's environment).
# The general way to do this in ttree.py, is to
# use ellipsis syntax "@atom:.../Res/CA" symbol. The
# ellipsis ".../" tells ttree.py to search upwards
# for the object to the right of it ("Res")
# In order to make ttree.py behave the way
# most users are expecting, we artificially insert a
# ".../" before the class name here. (Later on, the
# code that processes the ".../" symbol will take
# care of finding A. We don't have to worry about
# about doing that now.)
#
# I think we only want to do this for variables with path information
# such as "@atom:Res/CA" (which means that leaf_ptkns = ['Res', 'CA']).
# For simple variables like "@atom:CA", we don't automatically look upwards
# unless the user eplicitly requests it.
# (That's why we check to make sure that len(leaf_ptkns) > 1 below
# before we insert '...' into the leaf_ptkns.)
# In other words, the two variables "@atom:CA" below are treated differently
#
# A {
# write("Atoms") {
# @atom:CA
# }
# class B {
# write("Atoms") {
# @atom:CA
# }
# }
# }
#
if ((descr_str.find(':') != -1) and
#(not ((len(leaf_ptkns) == 1) and
# (leaf_ptkns[0] == context_node.name))) and
#(len(leaf_ptkns) > 0) and
(len(leaf_ptkns) > 1) and
(len(leaf_ptkns[0]) > 0) and
(leaf_ptkns[0][0] not in ('.', '*', '?'))):
leaf_ptkns.insert(0, '...')
# ---- Done with "ellipsis hack" -----
# sys.stderr.write(' DescrToCatLeafNodes(): (cat_ptkns, cat_name, lptkns) = ('+
# str(cat_ptkns)+', \"'+cat_name+'\", '+str(leaf_ptkns)+')\n')
cat_node = None
cat_start_node = context_node
leaf_start_node = context_node
if (len(cat_ptkns) > 0):
if cat_ptkns[-1] == '...':
# The "..." in this position means trace the path from the
# current node (context_node) up to cat_ptkns[:-1].
cat_start_node = PtknsToNode(cat_ptkns[:-1], context_node, dbg_loc)
# Later on, we will search upwards until we find an ancestor
# node containing a category matching cat_name. This will
# be taken care of later. (See "if cat_node is None:" below.)
else:
# In this case, the user supplied an explicit path
# for the category node. Find it now.
cat_node = PtknsToNode(cat_ptkns, context_node, dbg_loc)
# Whenever the user supplies an explicit path, then
# the cat node should be the starting location from
# which the leaf path is interpreted. This nearly
# insures that the leaf node will be an ancestor
# of the category node, which is what we want.
leaf_start_node = cat_node
if cat_node is None:
# Otherwise, the user did not indicate where the category
# node is defined, but only supplied the category name.
# (This is the most common scenario.)
# In this case, climb up the tree to the parent
# until you find an ancestor with a category whose
# name matches cat_name.
cat_node = FindCatNode(cat_name, cat_start_node, dbg_loc)
if (cat_name not in cat_node.categories):
if create_missing_nodes:
# If this is the first time we encountered a variable in this
# category (ie if it's the first time we encountered a variable
# with this category's name and node), then we must create a
# new entry in the cat_node.categories associative container
# (using cat_name as the dictionary key).
cat_node.categories[cat_name] = Category(cat_name)
else:
raise InputError('Error(' + g_module_name + '.DescrToCatLeafNodes()):\n'
' Error near ' +
ErrorLeader(dbg_loc.infile, dbg_loc.lineno) + '\n'
' Category named \"' + cat_name + '\" not found at\n'
' position ' + NodeToStr(cat_node) + '\n')
# ---------- Now look up the leaf node -----------
if (len(leaf_ptkns) > 0) and (leaf_ptkns[-1] == 'query()'):
# Special case: "query()"
# Variables named "query()" are not really variables.
# (They are a way for users to query a category's counter.)
# But we treat them as such internally. Consequently we
# give them unique names to avoid clashes (just in case
# "query()" appears multiple times in the same context).
#leaf_ptkns[-1] = '__query__'+dbg_loc.infile+'_'+str(dbg_loc.lineno)
leaf_ptkns[-1] = '__query__' + str(dbg_loc.order)
# Lookup the path for the leaf:
#
# Often, the leaf that the path refers to does not
# exist yet. For example, it is common for a template to
# contain a reference to "$atom:CA". If the current context_node
# is "/protein1/res22", this means that the leaf should be
# at "/protein1/res22/CA". (However in this example, "CA"
# is not a class that has been defined yet. It is the name
# of a variable which which may not have even been mentioned
# before. Think of "CA" as a variable placeholder.
#
# So we follow the path tokens as far as we can:
i_last_ptkn, last_node = FollowPath(leaf_ptkns,
leaf_start_node,
dbg_loc)
# Did we find the node?
if i_last_ptkn == len(leaf_ptkns):
leaf_node = last_node
else:
# If we are here, then we did not find the node.
# The unrecognized token is stored in
# leaf_ptkns[i_last_ptkn]
if leaf_ptkns[i_last_ptkn] == '...':
# ----------------------------------------------
# ---- UGHH I hate dealing with '...' ----
# ----(Messy code to follow in this section)----
# ----------------------------------------------
# The "..." means different things depending on
# whether or not it is the last token in leaf_ptkns.
if i_last_ptkn + 1 < len(leaf_ptkns):
# If "..." is NOT the last token in leaf_ptkns, we
# should search for an ancestor of this node who has
# a child whose name matches a the requested target
# string (located in leaf_ptkns[i_last_ptkn+1])
search_target = leaf_ptkns[i_last_ptkn + 1]
# If such an ancestor exists, then FollowPath()
# should have already found it for us.
# This means it was not found.
# So if there is only one more token in the
# list of tokens, then create the needed node
if (create_missing_nodes and
(i_last_ptkn + 1 == len(leaf_ptkns) - 1)):
# Create a new leaf node and link it:
new_leaf_name = leaf_ptkns[-1]
parent_node = last_node
# Is this parent_node an StaticObj? (..or inherit from
# StaticObj?)
if isinstance(parent_node, StaticObj):
parent_node.children[new_leaf_name] = StaticObj(
new_leaf_name, parent_node)
elif isinstance(parent_node, InstanceObj):
parent_node.children[new_leaf_name] = InstanceObjBasic(
new_leaf_name, parent_node)
else:
assert(False) # (only 2 types of nodes are possible)
# Now assign the pointer
leaf_node = parent_node.children[new_leaf_name]
else:
# In that case, we were unable to find the node referenced
# by "..."
raise InputError('Error(' + g_module_name + '.DescrToCatLeafNodes()):\n'
# containing ellipsis (...)\n'
' Broken path.\n'
' class/variable \"' + search_target + '\" not found in this\n'
' context: \"'
#+var_ref.prefix + var_ref.descr_str + var_ref.suffix+'\"\n'
+ descr_str + '\"\n'
' located near ' + ErrorLeader(dbg_loc.infile, dbg_loc.lineno))
else: # if i_last_ptkn+1 < len(leaf_ptkns):
# If "..." IS the last token, then it means:
# we want to search for the CATEGORY NAME,
# This is very different.
# It means we need to:
# search backwards up the ancestor tree until
# we find an ancestor variable (of last_node)
# which has the right category, (ie until you
# find an ancestor node with a variable (VarRef)
# pointing to it with belonging to the correct
# category node and name (determined above).)
# If not found, then use the current context_node.
assert(cat_name in cat_node.categories)
var_bindings = cat_node.categories[cat_name].bindings
node = last_node
while (node != None):
# Recall that cat_node.categories[cat_name]
# is a dictionary whose keys are leaf nodes
# corresponding to the variables in this category.
if node in var_bindings:
# then we found it, and we're done
break
else:
node = node.parent
if node != None:
leaf_node = node
else:
# If not found, have it point to the
# current (context) node.
leaf_node = context_node
# -----------------------------------------------
# -- Finished dealing with '...' in leaf_ptkns --
# -----------------------------------------------
elif (create_missing_nodes and
((i_last_ptkn == len(leaf_ptkns) - 1) or
HasWildcard('/'.join(leaf_ptkns)))):
# elif (create_missing_nodes and
# (i_last_ptkn == len(leaf_ptkns)-1)):
# Again, another reason the leaf-node was not found is
# that it refers to a leaf node which has not yet been
# created. If the path was valid until up to the last
# token, then we sould create a new node with this name.
# -- This is a common scenario. --
# -- This is how all new variables are created. --
# Anyway, we handle that here:
# Create a new leaf node and link it:
new_leaf_name = leaf_ptkns[-1]
new_leaf_name = '/'.join(leaf_ptkns[i_last_ptkn:])
parent_node = last_node
# Is this parent_node an StaticObj? (..or does it inherit from
# StaticObj?)
if isinstance(parent_node, StaticObj):
parent_node.children[new_leaf_name] = StaticObj(
new_leaf_name, parent_node)
elif isinstance(parent_node, InstanceObj):
parent_node.children[new_leaf_name] = InstanceObjBasic(
new_leaf_name, parent_node)
else:
assert(False) # (only 2 types of nodes are possible)
# Now assign the pointer
leaf_node = parent_node.children[new_leaf_name]
else:
# Otherwise, the user made a mistake in the path.
# Figure out which kind of mistake and print an error.
if (last_node.parent is None) and (leaf_ptkns[i_last_ptkn] == '..'):
# In that case, we tried to back out beyond the root of the
# tree.
raise InputError('Error(' + g_module_name + '.DescrToCatLeafNodes()):\n'
' Broken path in variable:\n'
#' \"'+var_ref.prefix + var_ref.descr_str + var_ref.suffix+'\"\n'
' \"' + descr_str + '\"\n'
' located near ' +
ErrorLeader(dbg_loc.infile,
dbg_loc.lineno) + '\n'
' There are too many \"..\" tokens in the path string.')
else:
# Then the reason is: The string in leaf_ptkns[i_last_ptkn]
# was supposed to be a child of last_node but a child
# of that name was not found.
raise InputError('Error(' + g_module_name + '.DescrToCatLeafNodes()):\n'
' Broken path / Undefined variable:\n'
#' \"'+var_ref.prefix + var_ref.descr_str + var_ref.suffix+'\"\n'
' \"' + descr_str + '\"\n'
' located near ' +
ErrorLeader(dbg_loc.infile,
dbg_loc.lineno) + '\n'
' Undefined: \"' + \
PtknsToStr(leaf_ptkns) + '\"\n'
' (Specifically \"' + leaf_ptkns[i_last_ptkn] +
'\" is not a subordinate of \"' + MaxLenStr(last_node.name, '/') + '\")')
#'\n This could be a typo or spelling error.')
return cat_name, cat_node, leaf_node
def DescrToVarBinding(descr_str, context_node, dbg_loc):
""" DescrToVarBinding() is identical to LookupVar(), but it has a name
that is harder to remember. See comment for LookupVar() below.
"""
cat_name, cat_node, leaf_node = DescrToCatLeafNodes(descr_str,
context_node,
dbg_loc)
if cat_name in cat_node.categories:
category = cat_node.categories[cat_name]
var_bindings = category.bindings
if leaf_node in var_bindings:
var_binding = var_bindings[leaf_node]
else:
raise InputError('Error(' + g_module_name + '.DescrToVarBinding()):\n'
' Error near ' +
ErrorLeader(dbg_loc.infile, dbg_loc.lineno) + '\n'
' Bad variable reference: \"' + descr_str + '\". There is\n'
' There no category named \"' + cat_name + '\" defined for "' + NodeToStr(cat_node) + '\".\n')
else:
raise InputError('Error(' + g_module_name + '.DescrToVarBinding()):\n'
' Error near ' +
ErrorLeader(dbg_loc.infile, dbg_loc.lineno) + '\n'
' Bad variable reference: \"' + descr_str + '\". There is\n'
' no category named \"' + cat_name + '\" defined for "' + NodeToStr(cat_node) + '\".\n')
return var_binding
# Wrappers:
def LookupVar(descr_str, context_node, dbg_loc):
""" LookupVar() looks up a string (a variable descriptor, which is the
variable's name, excluding the '$', '@' prefixes and any '{}' brackets.)
This function returns the variable's "VarBinding" (the variable-name:value
pair). This is useful for querying or changing the value of a variable.
Because nearly all variables are local, you must specify the starting
node (ie. the node corresponding to the class in which this class
or variable was referred to). This is typically the global node.
"""
return DescrToVarBinding(descr_str, context_node, dbg_loc)
def LookupNode(obj_name, starting_node, dbg_loc):
""" LookupNode() parses through a string like
'../ClassA/NestedClassB'
and returns the corresponding node.
Nodes are data types used for representing a class or class instance.
They are also used for storing variables.
'ClassA/NestedClassB/VariableC'
Because nearly all variables are local, you must specify the starting
node (ie. the node corresponding to the class in which this class
or variable was referred to). This is typically the global node.
"""
return StrToNode(obj_name, starting_node, dbg_loc)
class SimpleCounter(object):
__slots__ = ["n", "nincr"]
# static data attributes:
default_n0 = 1
default_nincr = 1
def __init__(self, n0=None, nincr=None):
if n0 == None:
n0 = SimpleCounter.default_n0
if nincr == None:
nincr = SimpleCounter.default_nincr
self.n = n0 - nincr
self.nincr = nincr
def query(self):
return self.n
def incr(self):
self.n += self.nincr
def __copy__(self): # makes a (deep) copy of the counter in its current state
return SimpleCounter(self.n + self.nincr, self.nincr)
class Category(object):
"""
Category contains a list of all of the variables which belong
to the same category, as well as some supporting information.
Attributes:
name The name of the category (a string)
bindings An OrderedDict() containing leaf_node:VarBinding
(key:value) pairs. Variables are looked up by their leaf node.
The actual variable name (which simply refers to the leaf node)
and values are both stored in the VarBinding data structure.
counter A counter object like "SimpleCounter". Each time counter.incr()
is invoked it should return a unique string (typically this is
simply a string representing an integer which is incremented).
"""
__slots__ = ["name", "bindings", "counter",
"manual_assignments", "reserved_values"]
def __init__(self,
name='',
bindings=None,
counter=None,
manual_assignments=None,
reserved_values=None):
self.name = name
if bindings is None:
self.bindings = OrderedDict()
else:
self.bindings = bindings
if counter is None:
self.counter = SimpleCounter()
else:
self.counter = counter
if manual_assignments is None:
self.manual_assignments = OrderedDict()
else:
self.manual_assignments = manual_assignments
if reserved_values is None:
self.reserved_values = OrderedDict()
else:
self.reserved_values = reserved_values
class StaticObj(object):
""" StaticObjs and InstanceObjs:
The state of the system is stored in two different trees:
1) The static tree:
StaticObj trees are similar "class" definitions in an OOP language.
These trees contains class definitions, and their nested classes,
and instructions for how to create new instances (copies) of this class.
Nodes in this tree are stored using StaticObjs:
2) The instance tree:
This tree contains classes that have been instantiated, and any sub-
classes (members or attributes) that are instantiated as a result.
This tree is automatically generated by instantiating the root
StaticObj. Nodes in this tree are stored using InstanceObjs.
StaticObjs and InstanceObjs both contain
"commands" (commands which usually involve instructions
for writing templates)
"categories" (local counters used to assign variables. See below.)
"children" (Nested class definitions -NOT- OOP child classes. See below.)
StaticObjs also contain
"instance_commands"
"instance_categories"
These three members contain information to create a new instance/copy
of this class (how to construct an InstanceObj from an StaticObj).
StaticObj contains the member function Parse() which builds the global static
tree by parsing the contents of a text file supplied by the user.
The external function BuildInstanceTree(), creates the global instance tree
from the global static tree (a tree of StaticObjs).
----- CLASS MEMBERS OF StaticObj: ----
0) Name:
Every class (object type) has a name. It is stored in self.name.
To make it easier to distinguish the names of classes from the names of
individual instances of that class, I recommend using a capital letter
for the name of a class type (and lower-case letters for instances).
1) Commands
Commands are usually instructions for writing templates.
Templates are blocks of ordinary text which contain variables.
(Variables in this program consist of variable names, categories,
and (eventually) bound values (usually generated automatically),
which will be substituted into the template to generate a text file.)
A class can contain multiple templates, each one having a unique name
which also happens to be the name of the file that will be created when
the template is written.
Variants:
self.commands:
Some templates are written immediate after the class is defined
(stored in "self.commands").
Example: The "write_once()" command.
self.instance_commands:
Other templates are written when an instance/copy of the class is created
(stored in "self.instance_commands".
Example: The "write()" command.
2) Children
self.children:
Class definitions can be defined from within the definition of other
("parent") classes. These nested classes are referred to as "children".
These sub-classes are not "children" in the OOP sense of the word at
all (they do not have any of the the traits of their "parents").
However in the source code I refer to them as "children" because here
they are implemented as "child" nodes (branches) in the tree-like
data-structure used to store class definitions (the static tree).
3) Categories
This is a new concept and is difficult to explain.
Recall that each class contains a list of templates containing raw text,
interspersed with variables (whose values will determined later).
In most cases, variables are assigned to integer values which are
automatically generated by incrementing a counter. Simply put,
"categories" are collections of variables which share the same counter.
Within a category, the goal is to assign a unique integer (or other
symbol) to each distinct variable in this category.
To avoid name-clashes, variable names have local "scope".
This scope is the "leaf_token"
Categories can be specific to a particular class (node), and any of the
classes (nodes) which are nested within it, but by default are global.
(This means they "belong" to the global (outermost) node by default.)
All the various categories which are defined within a particular
StaticObj are stored in self.categories.
Static variables (ie. variables with a '@' prefix) are stored this way.
"self.categories"
If a class contains a new category, it means that if any nested
classes defined within that class definition contain (static, '@')
variables whose categories match the category name, their values will
be determined by looking up the couter associated with this category
stored locally (here) in self.categories. All variables belonging
to this category are stored in "self.categories[category_name]".
"self.instance_categories"
Recall that creating a new copy (instance) of a class automatically
creates an InstanceObj in the instance-tree. InstanceObj's have a
".categories" attribute of their own, the contents of which are
copied from this StaticObj's "self.instance_categories" attribute.
Instantiating a new class also spawns the instantiation of any
sub-classes.
If any of these "instance children" contain variables whose category
names match a category stored in the parent InstanceObj's .categories
dictionary, then their values will be determined by that InstanceObj's
counter for that category name.
4) Parent:
A link to the parent StaticObj is stored in self.parent.
"""
__slots__ = ["name",
"parent",
"children",
"categories",
"commands",
"srcloc_begin",
"srcloc_end",
"deleted",
"class_parents",
"namespaces",
"instname_refs",
"instance_categories",
"instance_commands_push",
"instance_commands",
"instance_commands_pop"]
def __init__(self,
name='',
parent=None):
"""
The members/attributes of StaticObj are defined in the comment
for StaticObj above. """
# The following members are shared by both InstanceObj and StaticObj:
self.name = name
self.parent = parent # For traversing the global static template tree
self.children = OrderedDict() # Nested class definitions.
self.categories = OrderedDict() # <- new variable categories that are only defined
# in the context of this molecule's type definition
self.commands = [] # Commands to carry out (only once)
# vb##self.var_bindings=[] # List of variables assigned to this
# object.
self.srcloc_begin = None # Keep track of location in user files
self.srcloc_end = None # (useful for error message reporting)
self.deleted = False # Users can delete static objects?
# (why not?)
# The following members are not shared with InstanceObj:
self.class_parents = [] # classes we inherit traits from (this is
# similar to the parent/child relationship
# in an object-oriented-programming language)
self.namespaces = [] # A list of classes we also look in when searching
# for other static nodes or variables. (similar to
# class_parents, but only used for searches.)
self.instname_refs = {} # <-- used for debugging to insure that
# two instances do not have the same name
# <-new variable categories that are defined
self.instance_categories = OrderedDict()
# within the scope of this molecule's instance
self.instance_commands_push = [] # 1)process these commands first by adding
# these commands to InstanceObj.commands
# (before you deal with class_parents)
self.instance_commands = [] # 2) then add this to InstanceObj.commands
self.instance_commands_pop = [] # 3) finally add these commands
def DeleteSelf(self):
for child in self.children.values():
child.DeleteSelf()
self.deleted = True
def IsDeleted(self):
return self.deleted
# vb##def AddVarBinding(self, var_binding):
# vb## if self.var_bindings is None:
# vb## self.var_bindings = [var_binding]
# vb## else:
# vb## self.var_bindings.append(var_binding)
def Parse(self, lex):
""" Parse() builds a static tree of StaticObjs by parsing text file.
-The "lex" argument is a file or input stream which has been converted
to a "TemplateLexer" object (similar to the python's built-in shlex lexer).
"""
# The next two variables store a stack of commands the user wants
# to manually add to the list of stackable instance_commands.
# (Allowing the users to directly manipulate the transformation stack
# is an experimental feature as of 2015- Most users don't need this.)
user_push_left_commands = []
user_push_right_commands = []
#sys.stdout.write(' -- Parse() invoked --\n')
# Keep track of the location in the users' input files where this
# class object is first defined. (Keep in mind that the user might
# augment their original class definition, adding new content to an
# existing class. In that case self.srcloc_begin will have already
# been assigned. We don't want to overwrite it in that case.)
if self.srcloc_begin is None: # <-- not defined yet?
self.srcloc_begin = lex.GetSrcLoc()
while True:
cmd_token = lex.get_token()
#print('Parse(): token = \"'+cmd_token+'\", '+lex.error_leader())
if cmd_token == lex.eof:
#print('Parse(): EOF encountered\n')
break
if ((cmd_token == 'write') or
(cmd_token == 'write_once') or
(cmd_token == 'create_var') or
(cmd_token == 'replace')):
open_paren = lex.get_token()
#print('Parse(): open_paren=\"'+open_paren+'\"')
if open_paren == '{':
# ..then the user neglected to specify the "dest" file-name
# argument. In that case, supply the default, ''.
# (which is shorthand for the standard out in this case)
open_curly = open_paren[0]
open_paren = ''
close_paren = ''
tmpl_filename = ''
srcloc = lex.GetSrcLoc()
else:
tmpl_filename = lex.get_token()
if tmpl_filename == ')':
tmpl_filename = ''
close_paren = ')'
else:
close_paren = lex.get_token()
open_curly = lex.get_token()
srcloc = lex.GetSrcLoc()
if (cmd_token == 'create_var'):
tmpl_filename = None
# This means: define the template without attaching
# a file name to it. (IE., don't write the contents
# of what's enclosed in the curly brackets { } to a file.
# Why?
# "create_var" commands are implemented as "write() {...}"
# commands (containing one or more variables) which
# never get written to a file or the terminal. Parsing
# the contents of the curly brackets defines the variables
# inside in the same way as parsing the text inside an
# ordinary "write() {...}" command.
if (cmd_token == 'replace'):
tmpl_filename = "ttree_replacements.txt"
if ((open_curly != '{') or
((open_paren == '') and (close_paren != '')) or
((open_paren == '(') and (close_paren != ')'))):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error in ' + lex.error_leader() + '\n\n'
'Syntax error at the beginning of the \"' + cmd_token + '\" command.')
if tmpl_filename != None:
tmpl_filename = RemoveOuterQuotes(
tmpl_filename, lex.quotes)
# ( The previous line is similar to:
# tmpl_filename = tmpl_filename.strip(lex.quotes) )
tmpl_contents = lex.ReadTemplate()
StaticObj.CleanupReadTemplate(tmpl_contents, lex)
#sys.stdout.write(' Parse() after ReadTemplate, tokens:\n\n')
# print(tmpl_contents)
# sys.stdout.write('\n----------------\n')
if (cmd_token == 'write_once' or
cmd_token == 'replace'):
# Check for a particular bug:
# Ordinary instance variables (preceded by a '$')
# should never appear in a write_once() statement.
for entry in tmpl_contents:
if (isinstance(entry, VarRef) and
(entry.prefix[0] == '$')):
err_msg = ('Error(' + g_module_name + '.StaticObj.Parse()):\n' +
' Error near ' + ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno) + '\n' +
' Illegal variable: \"' + entry.prefix + entry.descr_str + entry.suffix + '\"\n' +
' All variables in a \"' + cmd_token + '\" statement must be statically\n' +
' defined, and hence they must begin with a \'@\' prefix character.\n' +
' (not a \'$\' character).\n')
if (cmd_token == 'write_once'):
err_msg += ' Suggestion: Use the \"write()\" command instead.\n'
raise InputError(err_msg)
if cmd_token == 'write':
commands = self.instance_commands
elif (cmd_token == 'write_once' or
cmd_token == 'replace'):
commands = self.commands
elif (cmd_token == 'create_var'):
commands = self.instance_commands
else:
assert(False)
command = WriteFileCommand(tmpl_filename,
tmpl_contents,
srcloc)
commands.append(command)
# end of "if (cmd_token == 'write') or (cmd_token ==
# 'write_once'):"
elif cmd_token == 'delete':
instobj_descr_str = lex.get_token()
instobj_srcloc = lex.GetSrcLoc()
delete_command = DeleteCommand(instobj_srcloc)
mod_command = ModCommand(delete_command,
instobj_descr_str)
self.instance_commands.append(mod_command)
elif cmd_token == 'using':
namespacecom_str = lex.get_token()
if namespacecom_str != 'namespace':
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' The \"' + cmd_token + '\" command must be followed by the \"namespace\" keyword.')
namespace_str = lex.get_token()
stnode = StrToNode(namespace_str,
self,
lex.GetSrcLoc())
self.namespaces.append(stnode)
elif cmd_token == 'category':
cat_name = lex.get_token()
cat_count_start = 1
cat_count_incr = 1
open_paren = lex.get_token()
if (open_paren == '('):
token = lex.get_token()
if token == ',':
token = lex.get_token()
if token != ')':
# Interpret token as an integer, float, or string
try:
cat_count_start = int(token)
except ValueError:
try:
cat_count_start = float(token)
except ValueError:
cat_count_start = RemoveOuterQuotes(
token, '\'\"')
token = lex.get_token()
if token == ',':
token = lex.get_token()
if token != ')':
# Interpret token as an integer,float,or string
try:
cat_count_incr = int(token)
except ValueError:
try:
cat_count_incr = float(token)
except ValueError:
cat_count_incr = RemoveOuterQuotes(
token, '\'\"')
token = lex.get_token()
if token != ')':
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' \"' + cmd_token + ' ' + cat_name + '...\" has too many arguments,\n'
' or lacks a close-paren \')\'.\n')
else:
lex.push_token(open_paren)
if (isinstance(cat_count_start, basestring) or
isinstance(cat_count_incr, basestring)):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' \"' + cmd_token + ' ' + cat_name + '(' +
str(cat_count_start) + ',' +
str(cat_count_incr) + ')\"\n'
' Only numeric counters are currently supported.\n')
# check for really stupid and unlikely errors:
if type(cat_count_start) is not type(cat_count_incr):
if ((isinstance(cat_count_start, int) or
isinstance(cat_count_start, float))
and
(isinstance(cat_count_incr, int) or
isinstance(cat_count_incr, float))):
cat_count_start = float(cat_count_start)
cat_count_incr = float(cat_count_incr)
else:
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' Problem with \"' + cmd_token + '\" command.\n')
prefix = cat_name[0]
cat_name = cat_name[1:]
# Add this category to the list.
if prefix == '@':
self.categories[cat_name] = Category(cat_name)
self.categories[cat_name].counter = SimpleCounter(cat_count_start,
cat_count_incr)
elif prefix == '$':
self.instance_categories[cat_name] = Category(cat_name)
self.instance_categories[cat_name].counter = SimpleCounter(cat_count_start,
cat_count_incr)
else:
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' category name = \"' + cat_name + '\" lacks a \'$\' or \'&\' prefix.\n'
' This one-character prefix indicates whether the variables in this\n'
' new category will be static or dynamics variables\n')
elif (cmd_token == '}') or (cmd_token == ''):
# a '}' character means we have reached the end of our scope.
# Stop parsing and let the caller deal with the remaining text.
# (And a '' means we reached the end of the file... I think.)
break
# elif (cmd_token == 'include'):
# "include filename" loads a file (adds it to the file stack)
# The "TtreeShlex" class (from which "lex" inherits) handles
# "include" statements (ie. "source" statements) automatically.
elif ((cmd_token == 'push') or
(cmd_token == 'push_left') or
(cmd_token == 'push_right')):
push_cmd_src_loc = lex.GetSrcLoc()
push_cmd_text = lex.GetParenExpr()
if ((len(push_cmd_text) < 2) or
(push_cmd_text[0] != '(') or
(push_cmd_text[-1] != ')')):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' Bad \"push\" command. Expected an expression in parenthesis.\n')
push_cmd_text = push_cmd_text[1:-1]
if (cmd_token == 'push_right'):
push_command = PushRightCommand(push_cmd_text,
push_cmd_src_loc)
user_push_right_commands.append(push_command)
else:
push_command = PushLeftCommand(push_cmd_text,
push_cmd_src_loc)
user_push_left_commands.append(push_command)
self.instance_commands.append(push_command)
elif ((cmd_token == 'pop') or
(cmd_token == 'pop_left') or
(cmd_token == 'pop_right')):
pop_cmd_text = lex.GetParenExpr()
pop_cmd_src_loc = lex.GetSrcLoc()
if (cmd_token == 'pop_right'):
if len(user_push_right_commands) > 0:
push_command = user_push_right_commands.pop()
else:
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' Too many \"pop_right\" commands.\n')
pop_command = PopRightCommand(push_command,
pop_cmd_src_loc)
else:
if len(user_push_left_commands) > 0:
push_command = user_push_left_commands.pop()
else:
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' Too many pop, (or pop_left) commands.\n')
pop_command = PopLeftCommand(push_command,
pop_cmd_src_loc)
self.instance_commands.append(pop_command)
else:
# Otherwise, 'cmd_token' is not a command at all.
# Instead it's the name of an object which needs to be
# defined or instantiated.
# First, let's figure out which.
# (small detail: The "class" keyword is optional
# and can be skipped.)
if cmd_token == 'class':
object_name = lex.get_token()
else:
object_name = cmd_token
next_symbol = lex.get_token()
#print('Parse(): next_token=\"'+next_symbol+'\"')
class_parents = []
if next_symbol == 'inherits':
# Then read in the list of classes which are parents of
# of this class. (Multiple inheritance is allowed.)
# (We don't yet check to insure that these are valid class
# names. We'll do this later in LookupStaticRefs().)
syntax_err_inherits = False
while True:
next_symbol = lex.get_token()
if ((next_symbol == '{') or
(next_symbol == lex.eof)):
break
elif (next_symbol == '='):
syntax_err_inherits = True
break
else:
class_parents.append(StrToNode(next_symbol,
self,
lex.GetSrcLoc()))
if len(class_parents) == 0:
syntax_err_inherits = True
if syntax_err_inherits:
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' \"inherits\" should be followed by one or more class names.\n')
if next_symbol == '{':
child_name = object_name
# Check to see if this class has already been defined.
# (IE. check if it present in the list of children.)
# If the name (child_name) matches another class (child),
# then the contents of the new class will be appended to
# the old. This way, class definitions can be augmented
# later. (This is the way "namespaces" work in C++.)
child = self.children.get(child_name)
# If found, we refer to it as "child".
# If not, then we create a new StaticObj named "child".
if child is None:
child = StaticObj(child_name, self)
self.children[child_name] = child
assert(child.name == child_name)
# Either way we invoke child.Parse(), to
# add contents (class commands) to child.
child.Parse(lex)
child.class_parents += class_parents
elif next_symbol == '=':
next_symbol = lex.get_token()
if next_symbol == 'new':
base_name = object_name
base_srcloc = lex.GetSrcLoc()
array_slice_str = ''
if base_name.find('/') != -1:
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + ErrorLeader(base_srcloc.infile,
base_srcloc.lineno) + '\n'
' (You can not instantiate some other object\'s members.)\n'
' Invalid instance name: \"' + base_name + '\"\n')
elif base_name in self.instname_refs:
ref_srcloc = self.instname_refs[base_name]
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Duplicate class/array \"' + base_name + '\"\n'
' This occurs near:\n'
' ' + ErrorLeader(ref_srcloc.infile,
ref_srcloc.lineno) + '\n'
' and also near:\n'
' ' + ErrorLeader(base_srcloc.infile,
base_srcloc.lineno) + '\n')
else:
self.instname_refs[base_name] = base_srcloc
# Check for syntax allowing the user to instantiate
# PART of an array. For example, check for this syntax:
# "monomers[20-29] = new ...". This only fills in a
# portion of the array from: monomers[20]...monomers[29]
#
# We also have to deal with multidimensional syntax
# like this: "cells[3][2-3][1][4-7] = new..."
# Split the "cells[3][2-3][2][4-7][2]" string into
# "cells[3][", "][1][", and "]".
# Later, we will instantiate InstanceObjs with names:
# "cells[3][2][1][4]"
# "cells[3][2][1][5]"
# "cells[3][2][1][6]"
# "cells[3][2][1][7]"
# "cells[3][3][1][4]"
# "cells[3][3][1][5]"
# "cells[3][3][1][6]"
# "cells[3][3][1][7]"
p1 = base_name.find('[')
if p1 == -1:
p1 = len(base_name)
else:
p1 += 1
array_name_tkns = [base_name[0:p1]]
array_name_offsets = []
p2 = -1
p4 = p1
while p4 < len(base_name):
p3 = base_name.find(']', p1)
if p3 == -1:
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Expected a \']\' character following:\n'
' \"' +
base_name[0:p1] +
'\", located near:\n'
' ' + ErrorLeader(ref_srcloc.infile,
ref_srcloc.lineno) + '\n')
# Search for a '-', ':', or '*' character between []
# For example "monomers[20-29] = "
# If present, the user wants us to fill a range
# inside an array. This could be a multi-dimensional
# array, (eg "cells[3][2-6][4-11] = "), so we must
# figure out which entries in the array the user
# wants us to fill (in this case, "[2-6][4-11]")
p2 = base_name.find('-', p1)
if p2 == -1:
p2 = len(base_name)
if p2 > p3:
p2 = base_name.find(':', p1)
if p2 == -1:
p2 = len(base_name)
if p2 > p3:
p2 = base_name.find('*', p1)
if p2 == -1:
p2 = len(base_name)
p4 = p3 + 1
if p4 < len(base_name):
if base_name[p4] == '[':
p4 += 1 # skip over it
else:
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Expected a \'[\' character forllowing a \']\' character in\n'
' \"' +
base_name[
0:p2 + 1] + '\", located near:\n'
' ' + ErrorLeader(ref_srcloc.infile,
ref_srcloc.lineno) + '\n')
if p2 > p3:
# Then no '-', ':', or '*' character was found
# between '[' and the subsequent ']' character
# In that case, ignore this token
token = base_name[p1:p4]
# append all this text to the previous token
if len(array_name_tkns) == 0:
array_name_tkns.append(token)
else:
array_name_tkns[-1] = array_name_tkns[-1] + token
array_slice_str = 'slice '
else:
assert((p1 < p2) and (p2 < p3))
index_offset_str = base_name[p1:p2]
if len(index_offset_str) == 0:
index_offset = 0
elif (not str.isdigit(index_offset_str)):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Expected a nonnegative integer preceding the \'' +
base_name[
p2] + '\' character in:\n'
' \"' +
base_name[
0:p2 + 1] + '\", located near:\n'
' ' + ErrorLeader(ref_srcloc.infile,
ref_srcloc.lineno) + '\n')
else:
index_offset = int(index_offset_str)
token = base_name[p3:p4]
array_name_tkns.append(token)
array_name_offsets.append(index_offset)
p1 = p4
# If the statobj_str token contains a ']' character
# then this means the user wants us to make multiple
# copies of this template. The number of copies
# to instantiate is enclosed in the [] characters
# (Example wat = new Water[3000] creates
# 3000 instantiations of the Water template
# named wat[1], wat[2], wat[3], ... wat[3000]).
# Note: Here '[' and ']' have a special meaning.
# So lex.get_token() should not treat them as
# ordinary word characters. To prevent this:
orig_wordterminators = lex.wordterminators
lex.wordterminators += '[],'
class_name_str = lex.get_token()
if ((class_name_str == lex.eof) or
(class_name_str == '}')):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
'Class ends prematurely. (Incomplete \"new\" statement.)')
assert(len(class_name_str) > 0)
if (class_name_str[0] == '['):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' new ' + class_name_str + '\n'
'Bracketed number should be preceeded by a class name.')
class_names = []
weights = []
num_by_type = []
if class_name_str == 'random':
class_names, weights, num_by_type = self._ParseRandom(
lex)
tmp_token = lex.get_token()
if len(tmp_token) > 0:
if tmp_token[0] == '.':
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' \"' + tmp_token + '\" should not follow random()\n'
'\n'
' Coordinate transformations and other commands (such as \"' +
tmp_token + '\")\n'
' should appear after each class name inside the random() statement,\n'
' not after it. For example, do not use:\n'
' \"lipids=new random([DPPC,DLPC],[0.5,0.5]).move(0,0,23.6)\"\n'
' Use this instead:\n'
' \"lipids=new random([DPPC.move(0,0,23.6),DLPC.move(0,0,23.6)],[0.5,0.5])\"\n')
lex.push_token(tmp_token)
else:
class_name, class_suffix, class_suffix_srcloc = \
self._ProcessClassName(class_name_str, lex)
array_size = []
array_suffixes = []
array_srclocs = []
# A general "new" statement could look like this:
# "m = new Mol.scale(3) [2].trans(0,4.5,0).rotate(30,0,0,1)
# [3].trans(0,0,4.5)"
# So far we have processed "m = new Mol.scale(3)".
# Now, we need to deal with:
# "[2].trans(0,4.5,0).rotate(30,0,0,1) [3].trans(0,0,4.5)"
while True:
new_token = lex.get_token()
# if ((new_token == '') or (new_token == lex.eof)):
# break
if new_token == '[':
number_str = lex.get_token()
close_bracket = lex.get_token()
if ((not str.isdigit(number_str)) or
(close_bracket != ']')):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error in \"new\" statement near ' + lex.error_leader() + '\n'
' A \'[\' character should be followed by a number and a \']\' character.')
array_size.append(int(number_str))
suffix = lex.get_token()
if ((suffix == '') or (suffix == lex.eof)):
array_suffixes.append('')
array_srclocs.append(base_srcloc)
break
if suffix[0] == '.':
lex.push_token(suffix[1:])
suffix_func = lex.GetParenExpr()
suffix = '.' + suffix_func
array_suffixes.append(suffix)
array_srclocs.append(lex.GetSrcLoc())
else:
array_suffixes.append('')
array_srclocs.append(base_srcloc)
lex.push_token(suffix)
if suffix != '[':
break
else:
lex.push_token(new_token)
break
srcloc_final = lex.GetSrcLoc()
lex.wordterminators = orig_wordterminators
assert(len(array_size) == len(array_suffixes))
if len(array_size) > 0:
if len(array_name_offsets) == 0:
assert(len(array_name_tkns) == 1)
array_name_offsets = [0] * len(array_size)
array_name_tkns[0] = array_name_tkns[0] + '['
for d in range(0, len(array_size) - 1):
array_name_tkns.append('][')
array_name_tkns.append(']')
if len(array_name_offsets) != len(array_size):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error in \"new\" statement near/before ' + lex.error_leader() + '\n'
' Array ' + array_slice_str +
'dimensionality on the left side of the \'=\' character (' + str(
len(array_name_offsets)) + ')\n'
' does not match the array dimensionality on the right side (' + str(len(array_size)) + ').\n')
# If the user wants us to instantiate a
# multidimensional array of class instances
# then we must loop through this multidimensional
# array and create a new instance for each entry.
# For example fill a 3 dimensional volume
# with 1000 water molecules
# Example 1:
# solvent = new Water [10][10][10]
# (The coordinates must be read separately.)
# In this example array_size = [10,10,10]
# array_suffixes = ['','','']
# Example 2:
# solvent = new Water.transcm(0,0,0)
# [10].trans(0,0,4)
# [10].trans(0,4,0).rot(45,0,0,1)
# [10].trans(4,0,0)
# (This command generates a 10x10x10 lattice
# simple cubic lattice of regularly spaced
# water molecules pointing the same direction.)
# In this example array_size = [10,10,10]
# and
# class_suffix = 'transcm(0,0,0)'
# and
# array_suffixes = ['trans(0,0,4)',
# 'trans(0,4,0).rot(45,0,0,1)',
# 'trans(4,0,0)']
# Note that tree ignores the "trans()"
# commands, it stores them so that inherited
# classes can attempt to process them.
D = len(array_size)
if D > 0:
i_elem = 0 # (used to look up selection_list[])
if len(num_by_type) > 0:
selection_list = []
for i in range(0, len(num_by_type)):
selection_list += [i] * num_by_type[i]
random.shuffle(selection_list)
num_elements = 1
for d in range(0, D):
num_elements *= array_size[d]
err_msg_str = str(array_size[0])
for d in range(1, D):
err_msg_str += '*' + str(array_size[d])
if num_elements != len(selection_list):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near or before ' + lex.error_leader() + '\n'
' The sum of the numbers in the \"new random([],[])\" command (' + str(
len(selection_list)) + ')\n'
' does not equal the number of elements in the array (' + err_msg_str + ')\n')
digits = [0 for d in range(0, D)]
table_filled = False
pushed_commands = []
while (not table_filled):
instance_name = array_name_tkns[0]
for d in range(0, D):
i = digits[d]
instance_name += str(i +
array_name_offsets[d]) +\
array_name_tkns[d + 1]
# Does the user want us to select
# a class at random?
if len(class_names) > 0:
if len(num_by_type) > 0:
class_name_str = class_names[
selection_list[i_elem]]
else:
class_name_str = RandomSelect(class_names,
weights)
class_name, class_suffix, class_suffix_srcloc = \
self._ProcessClassName(
class_name_str, lex)
if class_suffix != '':
class_suffix_command = \
PushRightCommand(class_suffix.lstrip('.'),
class_suffix_srcloc)
self.instance_commands.append(
class_suffix_command)
command = \
InstantiateCommand(instance_name,
ClassReference(class_name,
base_srcloc),
base_srcloc)
self.instance_commands.append(command)
if class_suffix != '':
command = \
PopRightCommand(class_suffix_command,
srcloc_final)
self.instance_commands.append(command)
# Now go to the next entry in the table.
# The indices of this table are similar to
# a D-digit integer. We increment this d-digit
# number now.
d_carry = D - 1
while True:
digits[d_carry] += 1
if digits[d_carry] >= array_size[d_carry]:
digits[d_carry] = 0
if array_suffixes[d_carry] != '':
for i in range(0, array_size[d_carry] - 1):
partner = pushed_commands.pop()
command = PopRightCommand(partner,
srcloc_final)
self.instance_commands.append(
command)
d_carry -= 1
else:
if array_suffixes[d_carry] != '':
command = PushRightCommand(array_suffixes[d_carry].lstrip('.'),
array_srclocs[d_carry])
pushed_commands.append(command)
self.instance_commands.append(
command)
break
if d_carry < 0:
table_filled = True
break
# (used to look up selection_list[])
i_elem += 1
pass
else:
if len(class_names) > 0:
assert(len(num_by_type) == 0)
# if len(num_by_type) > 0:
# class_name_str = class_names[selection_list[i_elem]]
# else:
# class_name_str = RandomSelect(class_names,
# weights)
class_name_str = RandomSelect(class_names,
weights)
class_name, class_suffix, class_suffix_srcloc = \
self._ProcessClassName(class_name_str, lex)
if class_suffix != '':
class_suffix_command = \
PushRightCommand(class_suffix.lstrip('.'),
class_suffix_srcloc)
self.instance_commands.append(
class_suffix_command)
command = \
InstantiateCommand(base_name,
ClassReference(class_name,
base_srcloc),
base_srcloc)
self.instance_commands.append(command)
if class_suffix != '':
command = \
PopRightCommand(class_suffix_command,
srcloc_final)
self.instance_commands.append(command)
else:
# Now check for commands using this syntax:
#
# "MolNew = MolOld.rot(45,1,0,0).scale(100.0)"
# /|\ /|\ `-----------.------------'
# | | |
# child_name parent_name optional suffix
child_name = object_name
parent_name_str = next_symbol
child = StaticObj(child_name, self)
parent_name, suffix, suffix_srcloc = \
self._ProcessClassName(parent_name_str, lex)
child.class_parents.append(StrToNode(parent_name,
self,
lex.GetSrcLoc()))
if suffix != '':
# Assume the command is a StackableCommand. (This
# way it will enclose the commands of the parents.)
# Stackable commands come in (Push...Pop) pairs.
push_command = PushLeftCommand(suffix,
suffix_srcloc)
pop_command = PopLeftCommand(push_command,
suffix_srcloc)
push_mod_command = ModCommand(push_command, './')
pop_mod_command = ModCommand(pop_command, './')
child.instance_commands_push.append(
push_mod_command)
child.instance_commands_pop.insert(
0, pop_mod_command)
#sys.stderr.write('child.instance_commands_push = '+str(child.instance_commands_push)+'\n')
#sys.stderr.write('child.instance_commands_pop = '+str(child.instance_commands_pop)+'\n')
# Check to see if this class has already been defined.
if self.children.get(child_name) is not None:
if self.children[i].IsDeleted():
del self.children[child_name]
else:
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' The name \"' + child_name + '\" is already in use.')
self.children[child_name] = child
else:
# Otherwise hopefully this is a post-instance command
# (a command applied to a class which has been instantiated)
# In that case, the object_name would be followed by
# a dot and a function-call containing a '(' paren (which
# would have ended up stored in the next_symbol variable).
open_paren_encountered = False
if (next_symbol == '('):
open_paren_encountered = True
# put '(' back in the stream
lex.push_token(next_symbol)
i_dot = object_name.rfind('.')
i_slash = object_name.rfind('/')
dot_encountered = ((i_dot != -1) and
((i_slash == -1) or (i_slash < i_dot)))
if (open_paren_encountered and dot_encountered and
(object_name[:1] != '[')):
obj_descr_str, suffix, suffix_srcloc = \
self._ExtractSuffix(object_name, lex)
path_tokens = obj_descr_str.split('/')
i_last_ptkn, staticobj = FollowPath(path_tokens,
self,
lex.GetSrcLoc())
instobj_descr_str = './' + \
'/'.join(path_tokens[i_last_ptkn:])
# I still support the "object_name.delete()" syntax for
# backwards compatibility. (However newer input files
# use this equivalent syntax: "delete object_name")
if suffix == 'delete()':
delete_command = DeleteCommand(suffix_srcloc)
mod_command = ModCommand(delete_command,
instobj_descr_str)
staticobj.instance_commands.append(mod_command)
else:
push_command = PushLeftCommand(suffix,
suffix_srcloc,
'.')
pop_command = PopLeftCommand(push_command,
suffix_srcloc,
'.')
push_mod_command = ModCommand(push_command,
instobj_descr_str)
pop_mod_command = ModCommand(pop_command,
instobj_descr_str)
if instobj_descr_str != './':
# sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+
# staticobj.name+'.instance_commands\n')
staticobj.instance_commands.append(
push_mod_command)
staticobj.instance_commands.append(
pop_mod_command)
else:
# sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+
# staticobj.name+'.instance_commands_push\n')
# Question: Should I make these PushRight commands and
# append them in the opposite order?
# If so I also have to worry about the case
# above.
staticobj.instance_commands_push.append(
push_mod_command)
staticobj.instance_commands_pop.insert(
0, pop_mod_command)
else:
# Otherwise, the cmd_token is not any of these:
# "write", "write_once", "replace", "create_vars"
# "delete", or "category".
# ... and it is ALSO not any of these:
# the name of a class (StaticObj), or
# the name of an instance (InstanceObj)
# followed by either a '.' or "= new"
#
# In that case, it is a syntax error:
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Syntax error at or before ' + lex.error_leader() + '\n'
' \"' + object_name + ' ' + next_symbol + '\".')
# Keep track of the location in the user's input files
# where the definition of this object ends.
self.srcloc_end = lex.GetSrcLoc()
# Finally, if there are any remaining user_push_left_commands or
# user_push_right_commands, deal with them (by popping them).
for push_command in user_push_left_commands:
push_command = user_push_left_commands.pop()
pop_command = PopLeftCommand(push_command,
self.srcloc_end)
self.instance_commands.append(pop_command)
for push_command in user_push_right_commands:
push_command = user_push_right_commands.pop()
pop_command = PopRightCommand(push_command,
self.srcloc_end)
self.instance_commands.append(pop_command)
@staticmethod
def CleanupReadTemplate(tmpl_contents, lex):
# 1) Remove any newlines at the beginning of the first text block
# in tmpl_content.(Sometimes they cause ugly extra blank lines)
assert(len(tmpl_contents) > 0)
if isinstance(tmpl_contents[0], TextBlock):
first_token_strip = tmpl_contents[0].text.lstrip(' ')
if ((len(first_token_strip) > 0) and
(first_token_strip[0] in lex.newline)):
tmpl_contents[0].text = first_token_strip[1:]
tmpl_contents[0].srcloc.lineno += 1
# 2) Remove any trailing '}' characters, and complain if absent.
# The last token
assert(isinstance(tmpl_contents[-1], TextBlock))
assert(tmpl_contents[-1].text in ['}', ''])
if tmpl_contents[-1].text == '}':
del tmpl_contents[-1]
else:
tmpl_begin = None
if isinstance(tmpl_contents[0], TextBlock):
tmpl_begin = tmpl_contents[0].srcloc
elif isinstance(tmpl_contents[0], VarRef):
tmpl_begin = tmpl_contents[0].srcloc
else:
assert(False)
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n\n'
' Premature end to template.\n'
'(Missing terminator character, usually a \'}\'.) The\n'
'incomplete template begins near ' + ErrorLeader(tmpl_begin.infile, tmpl_begin.lineno) + '\n')
# 3) Finally, if there is nothing but whitespace between the
# last newline and the end, then strip that off too.
if isinstance(tmpl_contents[-1], TextBlock):
i = len(tmpl_contents[-1].text) - 1
if i >= 0:
while ((i >= 0) and
(tmpl_contents[-1].text[i] in lex.whitespace) and
(tmpl_contents[-1].text[i] not in lex.newline)):
i -= 1
if (tmpl_contents[-1].text[i] in lex.newline):
tmpl_contents[-1].text = tmpl_contents[-1].text[0:i + 1]
def LookupStaticRefs(self):
""" Whenever the user requests to instantiate a new copy of a class,
the name of that class is stored in self.instance_commands.
This name is stored as a string. After all of the classes have been
defined, then we go back through the tree and replace these names
with pointers to actual StaticObjs which correspond to those classes.
(This was deferred until all of the classes have been defined so
that users can refer to classes that they will define later on.)
"""
# Now do the same for any children which
# are created during instantiation:
for command in self.instance_commands:
# Does this command create/instantiate a new copy of a class?
if isinstance(command, InstantiateCommand):
# If so, figure out which statobj is referred to by
# statobj_str.
assert(isinstance(command.class_ref.statobj_str, basestring))
command.class_ref.statobj = StrToNode(command.class_ref.statobj_str,
self,
command.class_ref.srcloc)
# Now recursively resolve StaticObj pointers for the "children"
# (in this case, "children" refers to classes whose definitions
# are nested within this one).
for child in self.children.values():
child.LookupStaticRefs()
def _ExtractSuffix(self, class_name_str, lex):
"""
This ugly function helps process "new" commands such as:
mola = new ForceFieldA/../MoleculeA.move(30,0,0).rot(45,0,0,1)
This function expects a string,
(such as "ForceFieldA/../MoleculeA.move(30,0,0).rot(45,0,0,1)")
It extracts the class name "ForceFieldA/../MoleculeA"
and suffix "move(30,0,0).rot(45,0,0,1)"
"""
# Dots in class names can appear for 2 reasons:
# 1) as part of a path like "../" describing the location
# where this class was defined relative to the caller.
# In that case it will be preceeded or followed by
# either another dot '.', or a slash '/'
# 2) as part of a "suffix" which appears after the name
# containing instructions which modify how to
# instantiate that class.
# Case 1 is handled elsewhere. Case 2 is handled here.
i_dot = 0
while i_dot < len(class_name_str):
i_dot = class_name_str.find('.', i_dot)
if i_dot == -1:
break
# Is the '.' character followed by another '.', as in ".."?
# If so, it's part of a path such as "../Parent/Mol', (if
# so, it's not what we're looking for, so keep searching)
if i_dot < len(class_name_str) - 1:
if class_name_str[i_dot + 1] == '.':
i_dot += 1
# otherwise, check to see if it is followed by a '/'?
elif class_name_str[i_dot + 1] != '/':
# if not, then it must be part of a function name
break
class_suffix = ''
class_name = class_name_str
class_suffix_srcloc = None
if ((i_dot != -1) and
(i_dot < len(class_name_str))):
class_suffix = class_name_str[i_dot:]
class_name = class_name_str[:i_dot]
if class_name_str[-1] != ')':
# If it does not already contains the parenthesis?
class_suffix += lex.GetParenExpr()
class_suffix_srcloc = lex.GetSrcLoc()
# sys.stderr.write(' splitting class name into class_name.suffix\n'
# ' class_name=\"'+class_name+'\"\n'
# ' suffix=\"'+class_suffix+'\"\n')
return class_name, class_suffix.lstrip('.'), class_suffix_srcloc
def _ProcessClassName(self, class_name_str, lex):
"""
This function does some additional
processing (occasionaly inserting "..." before class_name).
"""
class_name, class_suffix, class_suffix_srcloc = \
self._ExtractSuffix(class_name_str, lex)
# ---- ellipsis hack ----
# (Note-to-self 2012-4-15)
# Most users expect ttree.py to behave like a
# standard programming language: If the class they are
# instantiating was not defined in this specific
# location, they expect ttree.py to search for
# it outwards, first in the parent's environment,
# and then in the parent's parent's environment,
# and so on, until the object is found.
# For example, most users expect this to work:
# class A{
# <definition_of_a_goes_here...>
# }
# class B{
# a = new A
# }
# Notice in the example above we did not have to specify where "A"
# was defined, because it is defined in the parent's
# environment (ie. immediately outside B's environment).
#
# One can obtain the equivalent behavior in ttree.py
# using ellipsis syntax: "a = new .../A" symbol.
# The ellipsis ".../" tells ttree.py to search upwards
# for the object to the right of it ("A")
# In order to make ttree.py behave the way
# most users are expecting, we artificially insert a
# ".../" before the class name here. (Later on, the
# code that processes the ".../" symbol will take
# care of finding A.)
if (len(class_name) > 0) and (class_name[0] not in ('.', '/', '*', '?')):
class_name = '.../' + class_name
return class_name, class_suffix, class_suffix_srcloc
def _ParseRandom(self, lex):
bracket1 = lex.get_token()
bracket2 = lex.get_token()
if ((bracket1 != '(') and (bracket1 != '[')):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
'Expected a \"([\" following ' + class_name + '.')
class_names = []
token = ''
prev_token = '['
while True:
token = lex.get_token()
if (token == '('):
lex.push_token(token)
token = lex.GetParenExpr()
if (prev_token not in (',', '[', '(')):
assert(len(class_names) > 0)
class_names[-1] = prev_token + token
prev_token = prev_token + token
else:
class_names.append(token)
prev_token = token
else:
if ((token == ']') or
(token == lex.eof) or
(token == '}') or
((token in lex.wordterminators) and
(token != ','))):
if (prev_token in (',', '[', '(')):
class_names.append('')
break
if token != ',':
class_names.append(token)
elif (prev_token in (',', '[', '(')):
class_names.append('')
prev_token = token
token_comma = lex.get_token()
bracket1 = lex.get_token()
if ((token != ']') or
(token_comma != ',') or
(bracket1 != '[')):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
'Expected a list of class names enclosed in [] brackets, followed by\n'
'a comma, and then a list of probabilities also enclosed in [] brackets.\n'
'(A random-seed following another comma is optional.)')
weights = []
while True:
token = lex.get_token()
if ((token == ']') or
(token == lex.eof) or
(token == '}') or
((token in lex.wordterminators) and
(token != ','))):
break
if token != ',':
try:
weight = float(token)
except ValueError:
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' \"' + token + '\"\n'
'Expected a list of numbers enclosed in [] brackets.')
if (weight < 0.0):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' Negative numbers are not allowed in \"random(\" argument list.\n')
weights.append(weight)
bracket2 = lex.get_token()
if ((token != ']') or
(bracket2 not in (')', ','))):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
'Expected a \")\" or a \",\" following the list of numeric weights.')
if len(class_names) != len(weights):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
'Unequal number of entries in object list and probability list.\n')
# Are all the entries in the "weights" array integers?
# If they are then, treat them as molecule counters,
# ot probabilities
num_by_type = []
for i in range(0, len(weights)):
# are the weights all positive integers?
n = int(weights[i])
if n == weights[i]:
num_by_type.append(n)
if len(num_by_type) < len(weights):
num_by_type = []
tot_weight = sum(weights)
if (tot_weight <= 0.0):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' The numbers in the \"random(\" argument list can not all be zero.\n')
for i in range(0, len(weights)):
weights[i] /= tot_weight
if bracket2 == ',':
try:
token = lex.get_token()
seed = int(token)
random.seed(seed)
except ValueError:
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' \"' + token + '\"\n'
'Expected an integer (a seed) following the list of weights.')
bracket2 = lex.get_token()
if (bracket2 != ')'):
raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\n'
' Error near ' + lex.error_leader() + '\n'
' \"' + token + '\"\n'
'Expected a \")\".')
else:
random.seed()
return (class_names, weights, num_by_type)
def BuildCommandList(self, command_list):
"""
Search the commands in the tree and make a linear list of commands
in the order they should be carried out.
"""
if self.IsDeleted():
return
# Add a special note to the list of commands to indicate which object
# the commands refer to. (This might be useful one day.)
# Later we can loop through this command list and still be able to tell
# whether or not we are within the scope of a particular class or instance
# (by seeing if we are between a "ScopeBegin" and "ScopeEnd" pair).
command_list.append(ScopeBegin(self, self.srcloc_begin))
# We want to append commands to the command_list in the same order
# that these commands appear in the user's input files.
# Unfortunately the commands may be interspersed with the creation of
# new StaticObjs which have their own commands which we have to explore
# recursively.
# Fortunately each child (StaticObj) has a srcloc_begin member, so we
# can infer the correct order of all the commands belonging to the
# children and correctly insert them into the correct place in between
# the commands of the parent.
srcloc2command_or_child = {}
for command in self.commands:
srcloc2command_or_child[command.srcloc] = command
for child in self.children.values():
srcloc = child.srcloc_begin
# special case: Some children do not have a srcloc because
# they were generated automatically. These children should
# not have any commands either so we can ignore them.
if srcloc != None:
srcloc2command_or_child[srcloc] = child
else:
assert(len(child.commands) == 0)
for srcloc in sorted(srcloc2command_or_child.keys()):
entry = srcloc2command_or_child[srcloc]
if isinstance(entry, StaticObj):
child = entry
child_commands = []
child.BuildCommandList(child_commands)
command_list += child_commands
else:
command_list.append(entry)
command_list.append(ScopeEnd(self, self.srcloc_end))
def __str__(self):
out_str = self.name
if len(self.children) > 0:
out_str += '('
i = 0
for child in self.children.values():
if i + 1 < len(self.children):
out_str += str(child) + ', '
else:
out_str += str(child) + ')'
i += 1
return out_str
def RandomSelect(entries, weights):
""" Return an entry from a list at random using
a (normalized) list of probabilities. """
assert(len(entries) == len(weights))
x = random.random()
i = 0
tot_probability = 0.0
while i < len(weights) - 1:
tot_probability += weights[i]
if x <= tot_probability:
break
i += 1
return entries[i]
class InstanceObjBasic(object):
""" A simplified version of InstanceObj.
See the documentation/comments for InstanceObj for more details.
(Leaf nodes (variables) are typically stored as InstanceObjBasic objects
More general, non-leaf nodes are stored using InstanceObj objects.)
"""
__slots__ = ["name", "parent"]
def __init__(self,
name='',
parent=None):
self.parent = parent # the environment/object which created this object
# Example:
# Suppose this "molecule" is an amino acid monomer
# belonging to a protein. The "parent" refers to
# the InstanceObj for the protein. ".parent" is
# useful for traversing the global instance tree.
# (use InstanceObj.statobj.parent for
# traversing the global static tree)
self.name = name # A string uniquely identifying this object in
# in it's "parent" environment.
# (It is always the same for every instance
# of the parent object. It would save memory to
# get rid of this member. Andrew 2012/9/13)
#self.deleted = False
# vb##self.var_bindings=None # List of variables assigned to this object
# vb## # or None (None takes up less space than an
# vb## # empty list.)
# vb##def AddVarBinding(self, var_binding):
# vb## if self.var_bindings is None:
# vb## self.var_bindings = [var_binding]
# vb## else:
# vb## self.var_bindings.append(var_binding)
# def DeleteSelf(self):
# self.deleted = True
def DeleteSelf(self):
# self.Dealloc()
self.parent = self # This condition (normally never true)
# flags the node as "deleted". (Nodes are never
# actually deleted, just flagged.)
# I used to have a separate boolean member variable
# which was set True when deleted, but I started
# eliminated unnecessary data members to save space.
# def IsDeleted(self):
# return self.deleted
def IsDeleted(self):
# Return true if self.deleted == True or self.parent == self
# for this node (or for any ancestor node).
node = self
while node.parent != None:
if hasattr(node, 'deleted'):
if node.deleted:
return True
elif node.parent == node:
return True
node = node.parent
return False
# def Dealloc(self):
# pass
# vb##if self.var_bindings is None:
# vb## return
# vb##N = len(self.var_bindings)-1
# vb##for i in range(0,len(self.var_bindings)):
# vb## vb = self.var_bindings[N-i]
# vb## cat = vb.category
# vb## assert(self in cat.bindings)
# vb## del cat.bindings[self]
# vb## del self.var_bindings[N-i]
# vb##self.var_bindings = None
class InstanceObj(InstanceObjBasic):
""" InstanceObjs are used to store nodes in the global
"instance tree", the tree of all classes (molecules) which have
been instantiated. Recall that whenever a class is instantiated,
it's members will be instantiated as well. Since these
members can also be classes, this relationship is hierarchical,
and can be represented as a tree.
"InstanceObjs" are the data type used to store the nodes in that tree."""
__slots__ = ["statobj",
"children",
"categories",
"commands",
"commands_push",
"commands_pop",
"srcloc_begin",
"srcloc_end",
"deleted"]
#"LookupMultiDescrStr",
# "Dealloc",
# "DeleteSelf",
# "IsDeleted",
# "UndeleteSelf",
# "DeleteProgeny",
#"BuildInstanceTree",
#"ProcessCommand",
#"ProcessContextNodes",
#"BuildCommandList"]
def __init__(self,
name='',
parent=None):
InstanceObjBasic.__init__(self, name, parent)
self.statobj = None # The statobj node refered to by this instance
self.children = {} # A list of statobjs corresponding to
# constituent parts (members) of the
# current class instance.
# The typical example is to consider the
# multiple amino acids (child-molecules)
# which must be created in order to create a
# new protein (instance) to which they belong
# (which would be "self" in this example)
self.categories = {} # This member stores the same data as the
# Instance variables (ie. variables
# with a '$' prefix) are stored in a
# category belonging to node.categories
# where "node" is of type InstanceObj.
# (There is a long explanation of
# "categories" in the comments
# of class StaticObj.)
self.commands = [] # An ordered list of commands to carry out
# during instantiation
# Stackable commands to carry out (first, before children)
self.commands_push = []
# Stackable commands to carry out (last, after children)
self.commands_pop = []
self.srcloc_begin = None # Keep track of location in user files
self.srcloc_end = None # (useful for error message reporting)
self.deleted = False
def LookupMultiDescrStr(self,
multi_descr_str,
srcloc,
null_list_warning=False,
null_list_error=False):
"""
Post-Instance (PI) modifiers/commands are commands which modify
an instance of a class after it has already been instantiated.
Simple Example:
class A {
...
}
class B {
a = new A.command_1()
a.command_2()
}
In the example above "command_2()" is a ModCommand, and
"a" is the multi_descr_str (string describing the correspond InstanceObj).
The "command_2()" command will be retroactively pushed onto the
list of commands to execute once "a" is instantiated.
(This is somewhat counter-intuitive.)
When array brackets [] and wildcards are used, a single ModCommand
can modify many different instances, for example suppose:
a = new A [2][5][3]
then "a[1][2][*].command_3()" is equivalent to
a[0][2][0].command_3()
a[0][2][1].command_3()
a[0][2][2].command_3()
In this example "a[1][2][*]" is the multi_descr_str
"a[*][3][*].command_4()" is equivalent to
a[0][3][0].command_4()
a[0][3][1].command_4()
a[1][3][0].command_4()
a[1][3][1].command_4()
In this function, we interpret strings like "a" and "a[*][3][*]"
in the examples above, and figure out which InstanceObjs they refer to,
and push the corresponding command into that InstanceObjs instance
command stack retroactively.
In addition to [*], you can use [a-b] and [a:b] syntax. For example:
"a[0][1-2][0-1].command_3()" and
"a[0][1:3][0:2].command_3()" are both equivalent to:
a[0][1][0].command_3()
a[0][1][1].command_3()
a[0][2][0].command_3()
a[0][2][1].command_3()
"""
pattern_str = multi_descr_str
# Suppose pattern_str = 'a[1][*][3]/b[**][2]'
# We want to split this string into a list of string fragments
# which omits the '*' characters: [ 'a[', '][3]/b', '][2]' ]
# However, we only want to do this when * is enclosed in [].
pattern_fragments = []
ranges_ab = []
i_close_prev = 0
i_close = 0
i_open = 0
while True:
i_open = pattern_str.find('[', i_open + 1)
if i_open == -1:
pattern_fragments.append(pattern_str[i_close_prev:])
break
else:
i_close = pattern_str.find(']', i_open + 1)
if i_close == -1:
pattern_fragments.append(pattern_str[i_close_prev:])
break
# If there is a '*' or a ':' character between
# the [] brackets, then split the string at '['
# (at i_open) and resume reading again at ']'
# (at i_close) (and create a new entry in the
# pattern_fragments[] and ranges_ab[] lists)
wildcard_here = True
range_ab = [0, -1]
for j in range(i_open + 1, i_close):
if ((pattern_str[j] == ':') or
((pattern_str[j] == '-') and (j > i_open + 1)) or
(pattern_str[j] == '*')):
i_wildcard = len(pattern_fragments)
range_a_str = pattern_str[i_open + 1: j]
range_b_str = pattern_str[j + 1: i_close]
if (range_a_str != ''):
if str.isdigit(range_a_str):
range_ab[0] = int(range_a_str)
else:
raise InputError('Error near ' +
ErrorLeader(srcloc.infile,
srcloc.lineno) + '\n'
' Expected colon-separated integers.\n')
if (range_b_str != ''):
if str.isdigit(range_b_str):
range_ab[1] = int(range_b_str)
# special case: When [a-b] type syntax is
# used, it selects from a to b inclusive.
# (IE. b is not a strict upper bound.)
if pattern_str[j] == '-':
range_ab[1] += 1
else:
raise InputError('Error near ' +
ErrorLeader(srcloc.infile,
srcloc.lineno) + '\n'
' Expected colon-separated integers.\n')
break
elif j == i_close - 1:
wildcard_here = False
if wildcard_here:
pattern_fragments.append(
pattern_str[i_close_prev:i_open + 1])
ranges_ab.append(range_ab)
i_close_prev = i_close
assert(len(pattern_fragments) - 1 == len(ranges_ab))
# Now figure out which InstanceObj or InstanceObjs correspond to
# the name or set of names suggested by the multi_descr_str,
# (after wildcard characters have been substituted with integers).
instobj_list = []
if len(pattern_fragments) == 1:
# commenting out:
# instobj_list.append(StrToNode(pattern_str, self, srcloc))
#
# Line above will print an error message if the node is not found.
# However sometimes we don't want this. Use this code instead:
path_tokens = pattern_str.split('/')
i_last_ptkn, instobj = FollowPath(path_tokens,
self,
srcloc)
# If found add to instobj_list
if ((i_last_ptkn == len(path_tokens))
and (not instobj.IsDeleted())):
instobj_list.append(instobj)
else:
# num_counters equals the number of bracket-enclosed wildcards
num_counters = len(pattern_fragments) - 1
multi_counters = [ranges_ab[i][0] for i in range(0, num_counters)]
all_matches_found = False
d_carry = 0
while d_carry < num_counters:
# Find the next InstanceObj in the set of InstanceObjs which
# satisfy the wild-card pattern in pattern_fragments.
while d_carry < num_counters:
candidate_descr_str = ''.join([pattern_fragments[i] +
str(multi_counters[i])
for i in range(0, num_counters)]
+
[pattern_fragments[num_counters]])
# sys.stderr.write('DEBUG: /'+self.name+
# '.LookupMultiDescrStr()\n'
# ' looking up \"'+
# candidate_descr_str+'\"\n')
path_tokens = candidate_descr_str.split('/')
i_last_ptkn, instobj = FollowPath(path_tokens,
self,
srcloc)
# If there is an InstanceObj with that name,
# then add it to the list of InstanceObjs to
# which we will apply this modifier function,
# and increment the counters
# If found (and if the counter is within the range)...
if ((i_last_ptkn == len(path_tokens)) and
((ranges_ab[d_carry][1] == -1) or
(multi_counters[d_carry] < ranges_ab[d_carry][1]))):
# (make sure it has not yet been "deleted")
if (not instobj.IsDeleted()):
instobj_list.append(instobj)
d_carry = 0
multi_counters[0] += 1
#sys.stderr.write('DEBUG: InstanceObj found.\n')
break
# If there is no InstanceObj with that name,
# then perhaps it is because we have incremented
# the counter too high. If there are multiple
# counters, increment the next most significant
# counter, and reset this counter to 0.
# Keep looking
# (We only do this if the user neglected to explicitly
# specify an upper bound --> ranges_ab[d_carry[1]==-1)
elif ((ranges_ab[d_carry][1] == -1) or
(multi_counters[d_carry] >= ranges_ab[d_carry][1])):
#sys.stderr.write('DEBUG: InstanceObj not found.\n')
multi_counters[d_carry] = ranges_ab[d_carry][0]
d_carry += 1
if d_carry >= num_counters:
break
multi_counters[d_carry] += 1
else:
# Object was not found but we keep going. Skip
# to the next entry in the multi-dimensional list.
d_carry = 0
multi_counters[0] += 1
break
if (null_list_warning and (len(instobj_list) == 0)):
sys.stderr.write('WARNING(' + g_module_name + '.LookupMultiDescrStr()):\n'
' Potential problem near ' +
ErrorLeader(srcloc.infile,
srcloc.lineno) + '\n'
' No objects (yet) matching name \"' + pattern_str + '\".\n')
if (null_list_error and
(len(instobj_list) == 0)):
if len(pattern_fragments) == 1:
raise InputError('Error(' + g_module_name + '.LookupMultiDescrStr()):\n'
' Syntax error near ' +
ErrorLeader(srcloc.infile,
srcloc.lineno) + '\n'
' No objects matching name \"' + pattern_str + '\".')
else:
sys.stderr.write('WARNING(' + g_module_name + '.LookupMultiDescrStr()):\n'
' Potential problem near ' +
ErrorLeader(srcloc.infile,
srcloc.lineno) + '\n'
' No objects (yet) matching name \"' + pattern_str + '\".\n')
return instobj_list
def __str__(self):
out_str = self.name
if len(self.children) > 0:
out_str += '('
i = 0
for child in self.children.values():
if i + 1 < len(self.children):
out_str += str(child) + ', '
else:
out_str += str(child) + ')'
i += 1
return out_str
def DeleteSelf(self):
self.deleted = True
# COMMENT1: Don't get rid of pointers to yourself. Knowing which
# objects you instantiated and destroyed might be useful
# in case you want to apply multiple delete [*] commands
# COMMENT2: Don't delete all the child nodes, and commands. These are
# needed later (so that text-templates containing references
# to these nodes don't cause moltemplate to crash.)
# def UndeleteSelf(self):
# self.deleted = False
#
#
# def DeleteProgeny(self):
# for child in self.children.values():
# if hasattr(child, 'DeleteProgeny'):
# child.DeleteProgeny()
# else:
# child.DeleteSelf()
# self.DeleteSelf();
def BuildInstanceTree(self,
statobj,
class_parents_in_use):
"""
This takes care of the details of copying relevant data from an StaticObj
into a newly-created InstanceObj. It allocates space for and performs
a deep-copy of any instance variables (and new instance categories), but
it performs a shallow copy of everything else (template text, etc..).
This is done recursively for every child that this class instantiates.
"""
if self.IsDeleted():
return
# sys.stderr.write(' DEBUG: '+self.name+
# '.BuildInstanceTree('+statobj.name+')\n')
#instance_refs = {}
# Keep track of which line in the file (and which file) we were
# in when we began parsing the class which defines this instance,
# as well as when we stopped parsing.
# (Don't do this if you are recusively searching class_parents because
# in that case you would be overwritting .statobj with with the
# parent.)
if len(class_parents_in_use) == 0:
self.statobj = statobj
self.srcloc_begin = statobj.srcloc_begin
self.srcloc_end = statobj.srcloc_end
# Make copies of the class_parents' StaticObj data.
# First deal with the "self.instance_commands_push"
# These commands should be carried out before any of the commands
# in "self.instance_commands".
for command in statobj.instance_commands_push:
# self.commands.append(command)
self.ProcessCommand(command)
# Then deal with class parents
for class_parent in statobj.class_parents:
# Avoid the "Diamond of Death" multiple inheritance problem
if class_parent not in class_parents_in_use:
# sys.stderr.write(' DEBUG: '+self.name+'.class_parent = '+
# class_parent.name+'\n')
self.BuildInstanceTree(class_parent,
class_parents_in_use)
class_parents_in_use.add(class_parent)
# Now, deal with the data in THIS object and its children
assert((self.commands != None) and (self.categories != None))
# "instance_categories" contains a list of new "categories" (ie new
# types of variables) to create whenever this class is instantiated.
# (This is used whenever we create a local counter variable: Suppose we
# want to count the residues within a particular protein, when there
# are multiple copies of the same protein in the simulation.)
for cat_name, cat in statobj.instance_categories.items():
assert(len(cat.bindings) == 0)
self.categories[cat_name] = Category(cat_name)
self.categories[cat_name].counter = cat.counter.__copy__()
# Note: Later on we will generate leaf nodes corresponding to
# variables, and put references to them in this category.
# Deal with the "instance_commands",
for command in statobj.instance_commands:
# self.commands.append(command)
self.ProcessCommand(command)
# Finally deal with the "self.instance_commands_pop"
# These commands should be carried out after all of the commands
# in "self.instance_commands".
for command in statobj.instance_commands_pop:
# self.commands.append(command)
self.ProcessCommand(command)
def ProcessCommand(self, command):
if isinstance(command, ModCommand):
sys.stderr.write(' processing command \"' + str(command) + '\"\n')
mod_command = command
instobj_list = self.LookupMultiDescrStr(mod_command.multi_descr_str,
mod_command.command.srcloc)
if isinstance(mod_command.command, DeleteCommand):
# Delete any objects we have created so far
# whose name matches mod_command.multi_descr_str:
for instobj in instobj_list:
instobj.DeleteSelf()
# instobj.DeleteProgeny()
elif len(instobj_list) == 0:
raise InputError('Error(' + g_module_name + '.ProcessCommand()):\n'
' Syntax error at or before ' +
ErrorLeader(mod_command.command.srcloc.infile,
mod_command.command.srcloc.lineno) + '\n'
' No objects matching name \"' +
mod_command.multi_descr_str + '\"\n'
' (If the object is an array, include brackets. Eg. \"molecules[*][*][*]\")')
else:
for instobj in instobj_list:
assert(not isinstance(mod_command.command, DeleteCommand))
command = mod_command.command.__copy__()
self.ProcessContextNodes(command)
if isinstance(command, PushCommand):
instobj.commands_push.append(command)
elif isinstance(mod_command.command, PopCommand):
instobj.commands_pop.insert(0, command)
else:
# I don't know if any other types commands will ever
# occur but I handle them below, just in case...
assert(not isinstance(command, InstantiateCommand))
instobj.commands.append(command.__copy__())
return # ends "if isinstance(command, ModCommand):"
# Otherwise:
command = command.__copy__()
self.ProcessContextNodes(command)
if isinstance(command, InstantiateCommand):
sys.stderr.write(' processing command \"' + str(command) + '\"\n')
# <- useful later to keep track of the
self.commands.append(command)
# order that children were created
# check to make sure no child of that name was previously defined
prev_child = self.children.get(command.name)
if ((prev_child != None) and (not prev_child.IsDeleted())):
raise InputError('Error near ' +
ErrorLeader(command.srcloc.infile,
command.srcloc.lineno) + '\n'
' Object \"' + command.name + '\" is already defined.\n')
child = InstanceObj(command.name, self)
command.instobj = child
if command.class_ref.statobj_str == '':
child.DeleteSelf()
# Why? This if-then check handles the case when the user
# wants to create an array of molecules with random vacancies.
# When this happens, some of the instance commands will
# contain instructions to create a copy of a molecule with
# an empty molecule-type-string (class_ref.statobj_str).
# Counter-intuitively, ...
# ...we DO want to create something here so that the user can
# safely loop over the array without generating an error.
# (Such as to delete elements, or move the remaining
# members in the array.) We just want to mark it as
# 'deleted'. (That's what "DeleteSelf()" does.)
else:
# This is the heart of "BuildInstanceTree()"
# (which implements object composition)
new_class_parents_in_use = set([])
child.BuildInstanceTree(command.class_ref.statobj,
new_class_parents_in_use)
self.children[child.name] = child
elif isinstance(command, WriteFileCommand):
#sys.stderr.write(' processing command \"'+str(command)+'\"\n')
self.commands.append(command)
for var_ref in command.tmpl_list:
# Process the VarRef entries in the tmpl_list,
# (and check they have the correct prefix: either '$' or '@')
# Ignore other entries (for example, ignore TextBlocks).
if (isinstance(var_ref, VarRef) and (var_ref.prefix[0] == '$')):
if (var_ref.descr_str[:4] == 'mol:'):
pass
var_ref.nptr.cat_name, var_ref.nptr.cat_node, var_ref.nptr.leaf_node = \
DescrToCatLeafNodes(var_ref.descr_str,
self,
var_ref.srcloc,
True)
categories = var_ref.nptr.cat_node.categories
# "categories" is a dictionary storing "Category" objects
# indexed by category names.
# Note to self: Always use the ".categories" member,
# (never the ".instance_categories" member.
# ".instance_categories" are only used temporarilly before
# we instantiate, ie. before we build the tree of
# InstanceObjs.)
category = categories[var_ref.nptr.cat_name]
# "category" is a Category object containing a
# dictionary of VarBinding objects, and an internal
# counter.
var_bindings = category.bindings
# "var_bindings" is a dictionary storing "VarBinding"
# objects, indexed by leaf nodes. Each leaf node
# corresponds to a unique variable in this category.
# --- Now update "var_bindings" ---
# Search for the "VarBinding" object that
# corresponds to this leaf node.
# If not found, then create one.
if var_ref.nptr.leaf_node in var_bindings:
var_binding = var_bindings[var_ref.nptr.leaf_node]
# "var_binding" stores the information for a variable,
# including pointers to all of the places the variable
# is rerefenced, the variable's (full) name, and value.
#
# Keep track of all the places that varible is
# referenced by updating the ".refs" member
var_binding.refs.append(var_ref)
else:
# Not found, so we create a new binding.
var_binding = VarBinding()
# var_binding.refs contains a list of all the places
# this variable is referenced. Start with this var_ref:
var_binding.refs = [var_ref]
# keep track of the cat_node, cat_name, leaf_node:
var_binding.nptr = var_ref.nptr
# "var_binding.full_name" stores a unique string like
# '@/atom:Water/H' or '$/atom:water[1423]/H2',
# which contains the full path for the category and leaf
# nodes, and uniquely identifies this variable globally.
# Thus these strings correspond uniquely (ie. in a
# one-to-one fashion) with the nodes they represent.
var_binding.full_name = var_ref.prefix[0] + \
CanonicalDescrStr(var_ref.nptr.cat_name,
var_ref.nptr.cat_node,
var_ref.nptr.leaf_node,
var_ref.srcloc)
# (These names can always be generated later when needed
# but it doesn't hurt to keep track of it here too.)
# Now add this binding to the other
# bindings in this category:
var_bindings[var_ref.nptr.leaf_node] = var_binding
# vb##
# var_ref.nptr.leaf_node.AddVarBinding(var_binding)
var_binding.category = category
# It's convenient to add a pointer in the opposite direction
# so that later if we find the var_ref, we can find its
# binding and visa-versa. (Ie. two-way pointers)
var_ref.binding = var_binding
assert(var_ref.nptr.leaf_node in var_bindings)
else:
# Otherwise, we don't know what this command is yet.
# Append it to the list of commands and process it/ignore it later.
self.commands.append(command)
def ProcessContextNodes(self, command):
if hasattr(command, 'context_node'):
# Lookup any nodes pointers to instobjs
if command.context_node != None:
if type(command.context_node) is str:
command.context_node = StrToNode(command.context_node,
self,
command.srcloc)
# (Otherwise, just leave it as None)
def BuildCommandList(self, command_list):
"""
Search the commands in the tree and make a linear list of commands
in the order they should be carried out.
"""
if self.IsDeleted():
return
if (len(self.commands) == 0):
assert(len(self.children) == 0)
# To save memory don't generate any commands
# for trivial (leaf) nodes
return
# Add a special note to the list of commands to indicate which object
# the commands refer to. (This might be useful one day.)
# Later we can loop through this command list and still be able to tell
# whether or not we are within the scope of a particular class or instance
# (by seeing if we are between a "ScopeBegin" and "ScopeEnd" pair).
command_list.append(ScopeBegin(self, self.srcloc_begin))
# Note:
# The previous version looped over all commands in this node, and then
# recursively invoke BuildCommandList() on all the children of this node
# We don't do that anymore because it does not take into account the
# order that various child objects were created/instantiated
# which potentially could occur in-between other commands. Instead,
# now we loop through the command_list and recursively visit child
# nodes only when we encounter them in the command list.
for command in self.commands_push:
assert(isinstance(command, InstantiateCommand) == False)
command_list.append(command)
for command in self.commands:
if isinstance(command, InstantiateCommand):
#child = self.children[command.name]
# the above line does not work because you may have
# deleted that child after you created and then
# replaced it by somebody else. Store the node.
child = command.instobj
child.BuildCommandList(command_list)
else:
command_list.append(command)
for command in self.commands_pop:
assert(isinstance(command, InstantiateCommand) == False)
command_list.append(command)
command_list.append(ScopeEnd(self, self.srcloc_begin))
def AssignTemplateVarPtrs(tmpl_list, context_node):
"""
Now scan through all the variables within the templates defined
for this context_node (either static or dynamic depending on var_filter).
Each reference to a variable in the template has a descriptor which
indicates the variable's type, and in which molecule it is defined (ie
where it is located in the molecule instance tree or type definition tree).
(See comments for "class VarNPtr(object):" above for details.)
Eventually we want to assign a value to each variable.
This same variable (node) may appear multiple times in diffent templates.
So we also create a place to store this variable's value, and also assign
(two-way) pointers from the VarRef in the template, to this storage area so
that later on when we write out the contents of the template to a file, we
can substitute this variable with it's value, in all the places it appears.
"""
for var_ref in tmpl_list:
# Process the VarRef entries in the tmpl_list,
# (and check they have the correct prefix: either '$' or '@')
# Ignore other entries (for example, ignore TextBlocks).
if (isinstance(var_ref, VarRef) and
((isinstance(context_node, StaticObj) and
(var_ref.prefix[0] == '@'))
or
(isinstance(context_node, InstanceObjBasic) and
(var_ref.prefix[0] == '$')))):
var_ref.nptr.cat_name, var_ref.nptr.cat_node, var_ref.nptr.leaf_node = \
DescrToCatLeafNodes(var_ref.descr_str,
context_node,
var_ref.srcloc,
True)
categories = var_ref.nptr.cat_node.categories
# "categories" is a dictionary storing "Category" objects
# indexed by category names.
# Note to self: Always use the ".categories" member,
# (never the ".instance_categories" member.
# ".instance_categories" are only used temporarilly before
# we instantiate, ie. before we build the tree of InstanceObjs.)
category = categories[var_ref.nptr.cat_name]
# "category" is a Category object containing a
# dictionary of VarBinding objects, and an internal counter.
var_bindings = category.bindings
# "var_bindings" is a dictionary storing "VarBinding"
# objects, indexed by leaf nodes. Each leaf node
# corresponds to a unique variable in this category.
# --- Now update "var_bindings" ---
# Search for the "VarBinding" object that
# corresponds to this leaf node.
# If not found, then create one.
if var_ref.nptr.leaf_node in var_bindings:
var_binding = var_bindings[var_ref.nptr.leaf_node]
# "var_binding" stores the information for a variable,
# including pointers to all of the places the variable
# is rerefenced, the variable's (full) name, and value.
#
# Keep track of all the places that varible is
# referenced by updating the ".refs" member
var_binding.refs.append(var_ref)
else:
# Not found, so we create a new binding.
var_binding = VarBinding()
# var_binding.refs contains a list of all the places
# this variable is referenced. Start with this var_ref:
var_binding.refs = [var_ref]
# keep track of the cat_node, cat_name, leaf_node:
var_binding.nptr = var_ref.nptr
# "var_binding.full_name" stores a unique string like
# '@/atom:Water/H' or '$/atom:water[1423]/H2',
# which contains the full path for the category and leaf
# nodes, and uniquely identifies this variable globally.
# Thus these strings correspond uniquely (ie. in a
# one-to-one fashion) with the nodes they represent.
var_binding.full_name = var_ref.prefix[0] + \
CanonicalDescrStr(var_ref.nptr.cat_name,
var_ref.nptr.cat_node,
var_ref.nptr.leaf_node,
var_ref.srcloc)
# (These names can always be generated later when needed
# but it doesn't hurt to keep track of it here too.)
# Now add this binding to the other
# bindings in this category:
var_bindings[var_ref.nptr.leaf_node] = var_binding
# vb## var_ref.nptr.leaf_node.AddVarBinding(var_binding)
var_binding.category = category
# It's convenient to add a pointer in the opposite direction
# so that later if we find the var_ref, we can find its
# binding and visa-versa. (Ie. two-way pointers)
var_ref.binding = var_binding
assert(var_ref.nptr.leaf_node in var_bindings)
def AssignStaticVarPtrs(context_node, search_instance_commands=False):
#sys.stdout.write('AssignVarPtrs() invoked on node: \"'+NodeToStr(context_node)+'\"\n')
if search_instance_commands:
assert(isinstance(context_node, StaticObj))
commands = context_node.instance_commands
else:
# Note: Leaf nodes contain no commands, so skip them
if (not hasattr(context_node, 'commands')):
return
# Otherwise process their commands
commands = context_node.commands
for command in commands:
if isinstance(command, WriteFileCommand):
AssignTemplateVarPtrs(command.tmpl_list, context_node)
# Recursively invoke AssignVarPtrs() on all (non-leaf) child nodes:
for child in context_node.children.values():
AssignStaticVarPtrs(child, search_instance_commands)
def AssignVarOrderByCommand(command_list, prefix_filter):
"""
For each category in context_node, and each variable in that category,
set the order of each variable according to the position of the
write(), write_once(), or other command that created it.
Only variables with the correct prefix ('$' or '@') are affected.
"""
count = 0
for command in command_list:
if isinstance(command, WriteFileCommand):
tmpl_list = command.tmpl_list
for var_ref in tmpl_list:
if isinstance(var_ref, VarRef):
if var_ref.prefix in prefix_filter:
count += 1
if ((var_ref.binding.order is None) or
(var_ref.binding.order > count)):
var_ref.binding.order = count
# def AssignVarOrderByFile(command_list, prefix_filter):
# """
# For each category in context_node, and each variable in that category,
# set the order of each variable equal to the position of that variable
# in the user's input file.
#
# """
#
# for command in command_list:
# if isinstance(command, WriteFileCommand):
# tmpl_list = command.tmpl_list
# for var_ref in tmpl_list:
# if isinstance(var_ref, VarRef):
# if var_ref.prefix in prefix_filter:
# if ((var_ref.binding.order is None) or
# (var_ref.binding.order > var_ref.srcloc.order)):
# var_ref.binding.order = var_ref.srcloc.order
def AssignVarOrderByFile(context_node, prefix_filter, search_instance_commands=False):
"""
For each category in context_node, and each variable in that category,
set the order of each variable equal to the position of that variable
in the user's input file.
"""
commands = context_node.commands
if search_instance_commands:
assert(isinstance(context_node, StaticObj))
commands.append(context_node.instance_commands_push +
context_node.instance_commands +
context_node.instance_commands_pop)
for command in commands:
if isinstance(command, WriteFileCommand):
tmpl_list = command.tmpl_list
for var_ref in tmpl_list:
if (isinstance(var_ref, VarRef) and
(var_ref.prefix in prefix_filter)):
if ((var_ref.binding.order == -1) or
(var_ref.binding.order > var_ref.srcloc.order)):
var_ref.binding.order = var_ref.srcloc.order
for child in context_node.children.values():
AssignVarOrderByFile(child, prefix_filter, search_instance_commands)
def AutoAssignVals(cat_node,
sort_variables,
reserved_values=None,
ignore_prior_values=False):
"""
This function automatically assigns values to all the variables
belonging to all the categories in cat_node.categories.
Each category has its own internal counter. For every variable in that
category, query the counter (which usually returns an integer),
and assign the variable to it. Exceptions can be made if the integer
is reserved by some other variable, or if it has been already assigned.
Afterwards, we recursively search the child nodes recursively
(in a depth-first-search order).
sort_variables: Sorting the variables according to their "binding.order"
counters is optional.
"""
if (not hasattr(cat_node, 'categories')):
# (sometimes leaf nodes lack a 'categories' member, to save memory)
return
# Search the tree in a depth-first-search manner.
# For each node, examine the "categories" associated with that node
# (ie the list of variables whose counters lie within that node's scope).
for cat_name, cat in cat_node.categories.items():
# Loop through all the variables in this category.
if sort_variables:
# Sort the list of variables according to var_binding.order
# First, print a progress indicator (this could be slow)
prefix = '$'
# Is this parent_node an StaticObj? (..or inherit from StaticObj?)
if isinstance(cat_node, StaticObj):
prefix = '@'
sys.stderr.write(' sorting variables in category: ' + prefix +
CanonicalCatName(cat_name, cat_node) + ':\n')
var_bind_iter = iter(sorted(cat.bindings.items(),
key=operator.itemgetter(1)))
else:
# Just iterate through them in the order that they were added
# to the category list. (This happens to be the same order as
# we found it earlier when searching the tree.)
var_bind_iter = iter(cat.bindings.items())
for leaf_node, var_binding in var_bind_iter:
if ((var_binding.value is None) or ignore_prior_values):
if var_binding.nptr.leaf_node.name[:9] == '__query__':
# -- THE "COUNT" HACK --
# '__query__...' variables are not really variables.
# They are a mechanism to allow the user to query the
# category counter without incrementing it.
var_binding.value = str(cat.counter.query())
elif HasWildcard(var_binding.full_name):
# -- The wildcard hack ---
# Variables containing * or ? characters in their names
# are not allowed. These are not variables, but patterns
# to match with other variables. Represent them by the
# (full-path-expanded) string containing the * or ?.
var_binding.value = var_binding.full_name
else:
if ((not var_binding.nptr.leaf_node.IsDeleted()) and
(len(var_binding.refs) > 0)):
# For each (regular) variable, query this category's counter
# (convert it to a string), and see if it is already in use
# (in this category). If not, then set this variable's value
# to the counter's value. Either way, increment the
# counter.
while True:
cat.counter.incr()
value = str(cat.counter.query())
if ((reserved_values is None) or
((cat, value) not in reserved_values)):
break
var_binding.value = value
# Recursively invoke AssignVarValues() on all child nodes
for child in cat_node.children.values():
AutoAssignVals(child,
sort_variables,
reserved_values,
ignore_prior_values)
# Did the user ask us to reformat the output string?
# This information is encoded in the variable's suffix.
def ExtractFormattingCommands(suffix):
if (len(suffix) <= 1):
return None, None
if suffix[-1] == '}': # Get rid of any trailing '}' characters
suffix = suffix[:-1]
if suffix[-1] != ')': # Format functions are always followed by parens
return None, None
else:
idot = suffix.find('.') # Format functions usually preceeded by '.'
ioparen = suffix.find('(')
icparen = suffix.find(')')
format_fname = suffix[idot + 1:ioparen]
args = suffix[ioparen + 1:icparen]
args = args.split(',')
for i in range(0, len(args)):
args[i] = RemoveOuterQuotes(args[i].strip(), '\"\'')
return format_fname, args
def Render(tmpl_list, substitute_vars=True):
"""
This function converts a TextBlock,VarRef list into a string.
It is invoked by WriteTemplatesValue() in order to print
out the templates stored at each node of the tree.
"""
out_str_list = []
i = 0
while i < len(tmpl_list):
entry = tmpl_list[i]
if isinstance(entry, VarRef):
var_ref = entry
var_bindings = var_ref.nptr.cat_node.categories[
var_ref.nptr.cat_name].bindings
# if var_ref.nptr.leaf_node not in var_bindings:
#assert(var_ref.nptr.leaf_node in var_bindings)
if var_ref.nptr.leaf_node.IsDeleted():
raise InputError('Error near ' +
ErrorLeader(var_ref.srcloc.infile,
var_ref.srcloc.lineno) + '\n'
' The variable you referred to does not exist:\n\n'
' ' + var_ref.prefix + var_ref.descr_str + var_ref.suffix + '\n\n'
' (You probably deleted it or something it belonged to earlier.)\n')
else:
if substitute_vars:
value = var_bindings[var_ref.nptr.leaf_node].value
format_fname, args = ExtractFormattingCommands(
var_ref.suffix)
if format_fname == 'ljust':
if len(args) == 1:
value = value.ljust(int(args[0]))
else:
value = value.ljust(int(args[0]), args[1])
elif format_fname == 'rjust':
if len(args) == 1:
value = value.rjust(int(args[0]))
else:
value = value.rjust(int(args[0]), args[1])
out_str_list.append(value)
else:
out_str_list.append(var_ref.prefix +
SafelyEncodeString(var_bindings[var_ref.nptr.leaf_node].full_name[1:]) +
var_ref.suffix)
else:
assert(isinstance(entry, TextBlock))
out_str_list.append(entry.text)
i += 1
return ''.join(out_str_list)
def IgnoreThis(a):
pass
def FindReplacementVarPairs(context_node,
replace_var_pairs):
# search_instance_commands = False):
#####################
# if search_instance_commands:
# assert(isinstance(context_node, StaticObj))
# commands = context_node.instance_commands
# else:
# # Note: Leaf nodes contain no commands, so skip them
# if (not hasattr(context_node, 'commands')):
# return
# # Otherwise process their commands
# commands = context_node.commands
#####################
commands = context_node.commands
for command in commands:
if (isinstance(command, WriteFileCommand) and
command.filename == 'ttree_replacements.txt'):
tmpl_list = command.tmpl_list
var_alias = None
for entry in tmpl_list:
# Each successive pair of variables indicates a
# variable you wish to replace.
# (Any ordinary text in between variable names is ignored.)
if isinstance(entry, VarRef):
if var_alias == None:
var_alias = (entry.nptr.cat_name,
entry.nptr.cat_node,
entry.nptr.leaf_node)
else:
var_replace = (entry.nptr.cat_name,
entry.nptr.cat_node,
entry.nptr.leaf_node)
replace_var_pairs[var_alias] = var_replace
var_alias = None
# Recursively invoke AssignVarPtrs() on all (non-leaf) child nodes:
for child in context_node.children.values():
FindReplacementVarPairs(child,
replace_var_pairs)
# search_instance_commands)
def ReplaceVars(context_node,
replace_var_pairs,
search_instance_commands=False):
if len(replace_var_pairs) == 0:
return
#sys.stdout.write('AssignVarPtrs() invoked on node: \"'+NodeToStr(context_node)+'\"\n')
if search_instance_commands:
assert(isinstance(context_node, StaticObj))
commands = context_node.instance_commands
else:
# Note: Leaf nodes contain no commands, so skip them
if (not hasattr(context_node, 'commands')):
return
# Otherwise process their commands
commands = context_node.commands
if len(replace_var_pairs) > 0:
for command in commands:
if isinstance(command, WriteFileCommand):
ReplaceVarsInTmpl(command.tmpl_list,
replace_var_pairs)
# Recursively invoke ReplaceVars() on all (non-leaf) child nodes:
for child in context_node.children.values():
ReplaceVars(child,
replace_var_pairs,
search_instance_commands)
def ReplaceVarsInTmpl(tmpl_list, replace_var_pairs):
""" replace any references to specific variables with other variables """
if len(replace_var_pairs) == 0:
return
i = 0
while i < len(tmpl_list):
entry = tmpl_list[i]
if isinstance(entry, VarRef):
var_ref = entry
#full_name = var_bindings[var_ref.nptr.leaf_node].full_name
if (var_ref.nptr.cat_name,
var_ref.nptr.cat_node,
var_ref.nptr.leaf_node) in replace_var_pairs:
# optional: (since we will eventually delete the variable)
# delete the reference to this variable from "bindings"
nptr_old = var_ref.nptr
# swap the old variable with the new one
(nptr_new_cat_name, nptr_new_cat_node, nptr_new_leaf_node) = \
replace_var_pairs[(nptr_old.cat_name,
nptr_old.cat_node,
nptr_old.leaf_node)]
var_bindings = var_ref.nptr.cat_node.categories[
nptr_old.cat_name].bindings
assert(nptr_new_leaf_node in var_bindings)
# Copy the things we need from the old variable.
# References to the old variable should be added to the new one
# (since they are the same variable)
# for ref in var_bindings[nptr_old.leaf_node].refs:
# ref.nptr.cat_name = nptr_new_cat_name
# ref.nptr.cat_node = nptr_new_cat_node
# ref.nptr.leaf_node = nptr_new_leaf_node
if nptr_old.leaf_node in var_bindings:
var_bindings[
nptr_new_leaf_node].refs += var_bindings[nptr_old.leaf_node].refs
del var_bindings[nptr_old.leaf_node]
var_ref.nptr.cat_name = nptr_new_cat_name
var_ref.nptr.cat_node = nptr_new_cat_node
var_ref.nptr.leaf_node = nptr_new_leaf_node # <-- this will...
# ... update all places where that nptr is used, including
# all of the varrefs from the old variable. In other words,
# there is no need to manually update the leaf_nodes in
# the var_bindings[nptr_new_leaf_node].refs
# (It's better to do it this way instead.)
# var_ref.prefix = (...no need to modify)
# var_ref.suffix = (...no need to modify)
var_ref.descr_str = \
CanonicalDescrStr(var_ref.nptr.cat_name,
var_ref.nptr.cat_node,
var_ref.nptr.leaf_node,
var_ref.srcloc)
var_bindings[nptr_new_leaf_node].full_name = var_ref.prefix[
0] + var_ref.descr_str
i += 1
def MergeWriteCommands(command_list):
""" Write commands are typically to the same file.
We can improve performance by appending all of
commands that write to the same file together before
carrying out the write operation.
"""
file_templates = defaultdict(list)
for command in command_list:
if isinstance(command, WriteFileCommand):
if command.filename != None:
file_templates[command.filename] += \
command.tmpl_list
return file_templates
def WriteTemplatesValue(file_templates):
""" Carry out the write() and write_once() commands (which
write out the contents of the templates contain inside them).
"""
for filename, tmpl_list in file_templates.items():
if filename == '':
out_file = sys.stdout
else:
out_file = open(filename, 'a')
out_file.write(Render(tmpl_list, substitute_vars=True))
if filename != '':
out_file.close()
# Alternate (old method):
# for command in command_list:
# if isinstance(command, WriteFileCommand):
# if command.filename != None:
# if command.filename == '':
# out_file = sys.stdout
# else:
# out_file = open(command.filename, 'a')
#
# out_file.write(Render(command.tmpl_list))
#
# if command.filename != '':
# out_file.close()
def WriteTemplatesVarName(file_templates):
""" Carry out the write() and write_once() commands (which
write out the contents of the templates contain inside them).
However variables within the templates are represented by their
full name instead of their assigned value.
"""
for filename, tmpl_list in file_templates.items():
if filename != '':
out_file = open(filename + '.template', 'a')
out_file.write(Render(tmpl_list, substitute_vars=False))
out_file.close()
def EraseTemplateFiles(command_list):
filenames = set([])
for command in command_list:
if isinstance(command, WriteFileCommand):
if (command.filename != None) and (command.filename != ''):
if command.filename not in filenames:
filenames.add(command.filename)
# Openning the files (in mode 'w') and closing them again
# erases their contents.
out_file = open(command.filename, 'w')
out_file.close()
out_file = open(command.filename + '.template', 'w')
out_file.close()
# def ClearTemplates(file_templates):
# for filename in file_templates:
# if filename != '':
# out_file = open(filename, 'w')
# out_file.close()
# out_file = open(filename + '.template', 'w')
# out_file.close()
def WriteVarBindingsFile(node):
""" Write out a single file which contains a list of all
of the variables defined (regardless of which class they
were defined in). Next to each variable name is the corresponding
information stored in that variable (a number) that variable.
"""
if (not hasattr(node, 'categories')):
# (sometimes leaf nodes lack a 'categories' member, to save memory)
return
out = open('ttree_assignments.txt', 'a')
for cat_name in node.categories:
var_bindings = node.categories[cat_name].bindings
for nd, var_binding in var_bindings.items():
if nd.IsDeleted():
continue # In that case, skip this variable
if len(var_binding.refs) == 0: # check2016-6-07
continue
# if type(node) is type(nd):
if ((isinstance(node, InstanceObjBasic) and isinstance(nd, InstanceObjBasic))
or
(isinstance(node, StaticObj) and isinstance(nd, StaticObj))):
# Now omit variables whos names contain "*" or "?"
# (these are actually not variables, but wildcard patterns)
if not HasWildcard(var_binding.full_name):
if len(var_binding.refs) > 0:
usage_example = ' #' +\
ErrorLeader(var_binding.refs[0].srcloc.infile,
var_binding.refs[0].srcloc.lineno)
else:
usage_example = ''
out.write(SafelyEncodeString(var_binding.full_name) + ' ' +
SafelyEncodeString(var_binding.value)
+ usage_example + '\n')
out.close()
for child in node.children.values():
WriteVarBindingsFile(child)
def CustomizeBindings(bindings,
objectdefs,
objects):
var_assignments = set()
for name, vlpair in bindings.items():
prefix = name[0]
var_descr_str = name[1:]
value = vlpair.val
dbg_loc = vlpair.loc
if prefix == '@':
var_binding = LookupVar(var_descr_str,
objectdefs,
dbg_loc)
elif prefix == '$':
var_binding = LookupVar(var_descr_str,
objects,
dbg_loc)
else:
# If the user neglected a prefix, this should have generated
# an error earlier on.
assert(False)
# Change the assignment:
var_binding.value = value
var_assignments.add((var_binding.category, value))
# sys.stderr.write(' CustomizeBindings: descr=' + var_descr_str +
# ', value=' + value + '\n')
return var_assignments
def ReplaceVarsInCustomBindings(bindings,
objectdefs,
objects,
replace_var_pairs):
if len(replace_var_pairs) == 0:
return
list_of_pairs = bindings.items()
for name, vlpair in list_of_pairs:
prefix = name[0]
var_descr_str = name[1:]
value = vlpair.val
dbg_loc = vlpair.loc
if prefix == '@':
# At this point, we have probably already binding associated
# with any replaced variables. Instead lookup the nodes directly:
cat_name, cat_node, leaf_node = DescrToCatLeafNodes(var_descr_str,
objectdefs,
dbg_loc)
# If this triplet corresponds to a variable we want to replace
# then lookup the corrected triplet
if (cat_name, cat_node, leaf_node) in replace_var_pairs:
(new_cat_name,
new_cat_node,
new_leaf_node) = replace_var_pairs[(cat_name,
cat_node,
leaf_node)]
# now reconstruct the string representing that variable
new_name = prefix + CanonicalDescrStr(new_cat_name,
new_cat_node,
new_leaf_node)
bindings[new_name] = bindings[name]
del bindings[name]
##############################################################
##################### BasicUI functions #####################
# These functions are examples of how to use the StaticObj
# and InstanceObj data structures above, and to read a ttree file.
# These are examples only. New programs based on ttree_lib.py
# will probably require their own settings and functions.
##############################################################
def BasicUIReadBindingsFile(bindings_so_far, filename):
try:
f = open(filename, 'r')
except IOError:
sys.stderr.write('Error(' + g_filename + '):\n'' : unable to open file\n'
'\n'
' \"' + filename + '\"\n'
' for reading.\n'
'\n'
' (If you were not trying to open a file with this name, then this could\n'
' occur if you forgot to enclose your command-line-argument in quotes,\n'
' For example, use: \'$atom:wat[2]/H1 20\' or "\$atom:wat[2]/H1 to 20"\n'
' to set the variable $atom:wat[2]/H1 to 20.)\n')
sys.exit(1)
BasicUIReadBindingsStream(bindings_so_far, f, filename)
f.close()
def BasicUIReadBindingsText(bindings_so_far, text, source_name=''):
if sys.version > '3':
in_stream = io.StringIO(text)
else:
in_stream = cStringIO.StringIO(text)
return BasicUIReadBindingsStream(bindings_so_far, in_stream, source_name)
class ValLocPair(object):
__slots__ = ["val", "loc"]
def __init__(self,
val=None,
loc=None):
self.val = val
self.loc = loc
def BasicUIReadBindingsStream(bindings_so_far, in_stream, source_name=''):
# EXAMPLE (simple version)
# The simple version of this function commented out below
# does not handle variable whose names or values
# contain strange or escaped characters, quotes or whitespace.
# But I kept it in for illustrative purposes:
#
# for line in f:
# line = line.strip()
# tokens = line.split()
# if len(tokens) == 2:
# var_name = tokens[0]
# var_value = tokens[1]
# var_assignments[var_name] = var_value
# f.close()
lex = TemplateLexer(in_stream, source_name)
tmpllist = lex.ReadTemplate()
i = 0
if isinstance(tmpllist[0], TextBlock):
i += 1
while i + 1 < len(tmpllist):
# process one line at a time (2 entries per line)
var_ref = tmpllist[i]
text_block = tmpllist[i + 1]
assert(isinstance(var_ref, VarRef))
if (not isinstance(text_block, TextBlock)):
raise InputError('Error(' + g_filename + '):\n'
' This is not a valid name-value pair:\n'
' \"' + var_ref.prefix + var_ref.descr_str +
' ' + text_block.text.rstrip() + '\"\n'
' Each variable asignment should contain a variable name (beginning with\n'
' @ or $) followed by a space, and then a string you want to assign to it.\n'
' (Surrounding quotes are optional and will be removed.)\n')
# Variables in the ttree_assignments.txt file use "full-path" style.
# In other words, the full name of the variable, (including all
# path information) is stored var_ref.descr_str,
# and the first character of the prefix stores either a @ or $
var_name = var_ref.prefix[:1] + var_ref.descr_str
text = SplitQuotedString(text_block.text.strip())
var_value = EscCharStrToChar(RemoveOuterQuotes(text, '\'\"'))
bindings_so_far[var_name] = ValLocPair(var_value, lex.GetSrcLoc())
i += 2
class BasicUISettings(object):
"""
BasicUISettings() contains several run-time user customisations
for ttree. (These effect the order and values assigned to variables
in a ttreee file).
This object, along with the other "UI" functions below are examples only.
(New programs based on ttree_lib.py will probably have their own settings
and functions.)
Members:
user_bindings
user_bindings_x
These are lists containing pairs of variable names,
and the string values they are bound to (which are typically numeric).
Values specified in the "user_bindings_x" list are "exclusive".
This means their values are reserved, so that later on, when other
variables (in the same category) are automatically assigned to values, care
care will be taken to avoid duplicating the values in user_bindings_x.
However variables in the "user_bindings" list are assigned without regard
to the values assigned to other variables, and may or may not be unique.
order_method
The order_method specifies the order in which values will be automatically
assigned to variables. (In the context of building molecular simulation
input files, this helps the user to insure that the order of the atoms
created by the ttree file matches the order they appear in other files
created by other programs.)
"""
def __init__(self,
user_bindings_x=None,
user_bindings=None,
order_method='by_command',
lex=None):
if user_bindings_x:
self.user_bindings_x = user_bindings_x
else:
self.user_bindings_x = OrderedDict()
if user_bindings:
self.user_bindings = user_bindings
else:
self.user_bindings = OrderedDict()
self.order_method = order_method
if lex == None:
self.lex = TemplateLexer()
else:
self.lex = lex
def BasicUIParseArgs(argv, settings, main=False):
"""
BasicUIParseArgs()
The following function contains part of the user interface for a
typical ttree-based program. This function processes an argument list
and extracts the common ttree user settings.
This function, along with the other "UI" functions below are examples only.
(New programs based on ttree_lib.py will probably have their own UI.)
"""
# argv = [arg for arg in orig_argv] # (make a deep copy of "orig_argv")
# This error message is used in multiple places:
bind_err_msg = 'should either be followed by a 2-column\n' +\
' file (containing variable-value pairs on each line).\n' +\
' --OR-- a quoted string (such as \"@atom:x 2\")\n' +\
' with the full variable name and its desired value.'
bind_err_msg_var = 'Missing value, or space needed separating variable\n' +\
' and value. (Remember to use quotes to surround the argument\n' +\
' containing the variable name, and it\'s assigned value.)'
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if argv[i] == '-a':
if ((i + 1 >= len(argv)) or (argv[i + 1][:1] == '-')):
raise InputError('Error(' + g_filename + '):\n'
' Error in -a \"' +
argv[i + 1] + ' argument.\"\n'
' The -a flag ' + bind_err_msg)
if (argv[i + 1][0] in '@$'):
#tokens = argv[i+1].strip().split(' ')
tokens = SplitQuotedString(argv[i + 1].strip())
if len(tokens) < 2:
raise InputError('Error(' + g_filename + '):\n'
' Error in -a \"' +
argv[i + 1] + '\" argument.\n'
' ' + bind_err_msg_var)
BasicUIReadBindingsText(settings.user_bindings_x,
argv[i + 1],
'__command_line_argument__')
else:
BasicUIReadBindingsFile(settings.user_bindings_x,
argv[i + 1])
#i += 2
del(argv[i:i + 2])
elif argv[i] == '-b':
if ((i + 1 >= len(argv)) or (argv[i + 1][:1] == '-')):
raise InputError('Error(' + g_filename + '):\n'
' Error in -b \"' +
argv[i + 1] + ' argument.\"\n'
' The -b flag ' + bind_err_msg)
if (argv[i + 1][0] in '@$'):
#tokens = argv[i+1].strip().split(' ')
tokens = SplitQuotedString(argv[i + 1].strip())
if len(tokens) < 2:
raise InputError('Error(' + g_filename + '):\n'
' Error in -b \"' +
argv[i + 1] + '\" argument.\n'
' ' + bind_err_msg_var)
BasicUIReadBindingsText(settings.user_bindings,
argv[i + 1],
'__command_line_argument__')
else:
BasicUIReadBindingsFile(settings.user_bindings,
argv[i + 1])
#i += 2
del(argv[i:i + 2])
elif argv[i] == '-order-command':
settings.order_method = 'by_command'
#i += 1
del(argv[i:i + 1])
elif argv[i] == '-order-file':
settings.order_method = 'by_file'
#i += 1
del(argv[i:i + 1])
elif ((argv[i] == '-order-tree') or (argv[i] == '-order-dfs')):
settings.order_method = 'by_tree'
del(argv[i:i + 1])
elif ((argv[i] == '-import-path') or
(argv[i] == '-importpath') or
(argv[i] == '-import_path')):
if ((i + 1 >= len(argv)) or (argv[i + 1][:1] == '-')):
raise InputError('Error(' + g_filename + '):\n'
' Error in \"' +
argv[i] + '\" argument.\"\n'
' The \"' +
argv[
i] + '\" argument should be followed by the name of\n'
' an environment variable storing a path for including/importing files.\n')
custom_path = RemoveOuterQuotes(argv[i + 1])
include_path_list = custom_path.split(':')
for d in include_path_list:
if len(d) > 0:
settings.lex.include_path.append(d)
del(argv[i:i + 2])
elif (argv[i][0] == '-') and main:
# elif (__name__ == '__main__'):
raise InputError('Error(' + g_filename + '):\n'
'Unrecogized command line argument \"' + argv[i] + '\"\n')
else:
i += 1
if main:
# Instantiate the lexer we will be using.
# (The lexer's __init__() function requires an openned file.
# Assuming __name__ == '__main__', then the name of that file should
# be the last remaining (unprocessed) argument in the argument list.
# Otherwise, then name of that file will be determined later by the
# python script which imports this module, so we let them handle it.)
if len(argv) == 1:
raise InputError('Error(' + g_filename + '):\n'
' This program requires at least one argument\n'
' the name of a file containing ttree template commands\n')
elif len(argv) == 2:
try:
# Parse text from the file named argv[1]
settings.lex.infile = argv[1]
settings.lex.instream = open(argv[1], 'r')
except IOError:
sys.stderr.write('Error(' + g_filename + '):\n'
' unable to open file\n'
' \"' + argv[1] + '\"\n'
' for reading.\n')
sys.exit(1)
del(argv[1:2])
else:
# if there are more than 2 remaining arguments,
problem_args = ['\"' + arg + '\"' for arg in argv[1:]]
raise InputError('Syntax Error (' + g_filename + '):\n'
' Problem with argument list.\n'
' The remaining arguments are:\n\n'
' ' + (' '.join(problem_args)) + '\n\n'
' (The actual problem may be earlier in the argument list.\n'
' If these arguments are source files, then keep in mind\n'
' that this program can not parse multiple source files.)\n'
' Check the syntax of the entire argument list.\n')
def BasicUI(settings,
static_tree_root,
instance_tree_root,
static_commands,
instance_commands):
"""
BasicUI()
This function loads a ttree file and optional custom bindings for it,
creates a "static" tree (of defined ttree classes),
creates an "instance" tree (of instantiated ttree objects),
automatically assigns values to unbound variables,
substitutes them into text templates (renders the template).
The actual writing of the templates to a file is not handled here.
"""
# Parsing, and compiling is a multi-pass process.
# Step 1: Read in the StaticObj (class) definitions, without checking
# whether or not the instance_children refer to valid StaticObj types.
sys.stderr.write('parsing the class definitions...')
static_tree_root.Parse(settings.lex)
# gc.collect()
#sys.stderr.write('static = ' + str(static_tree_root) + '\n')
# Step 2: Now that the static tree has been constructed, lookup
# any references to classes (StaticObjs), contained within
# the instance_children or class_parents of each node in
# static_tree_root. Replace them with (pointers to)
# the StaticObjs they refer to (and check validity).
# (Note: Variables stored within the templates defined by write()
# and write_once() statements may also refer to StaticObjs in
# the tree, but we leave these references alone. We handle
# these assignments later using "AssignVarPtrs()" below.)
sys.stderr.write(' done\nlooking up classes...')
static_tree_root.LookupStaticRefs()
# gc.collect()
# Step 3: Now scan through all the (static) variables within the templates
# and replace the (static) variable references to pointers
# to nodes in the StaticObj tree:
sys.stderr.write(' done\nlooking up @variables...')
# Step 3a)
# Here we assign pointers for @variables in "write_once(){text}" templates:
AssignStaticVarPtrs(static_tree_root, search_instance_commands=False)
# Step 3b) Replace any @variables with their equivalents (if applicable)
replace_var_pairs = {}
FindReplacementVarPairs(static_tree_root, replace_var_pairs)
ReplaceVars(static_tree_root, replace_var_pairs,
search_instance_commands=False)
# Step 3c)
# Here we assign pointers for @variables in "write(){text}" templates:
AssignStaticVarPtrs(static_tree_root, search_instance_commands=True)
ReplaceVars(static_tree_root, replace_var_pairs,
search_instance_commands=True)
sys.stderr.write(' done\nconstructing the tree of class definitions...')
sys.stderr.write(' done\n\nclass_def_tree = ' +
str(static_tree_root) + '\n\n')
# gc.collect()
# Step 4: Construct the instance tree (the tree of instantiated
# classes) from the static tree of type definitions.
sys.stderr.write('constructing the instance tree...\n')
class_parents_in_use = set([])
instance_tree_root.BuildInstanceTree(
static_tree_root, class_parents_in_use)
#sys.stderr.write('done\n garbage collection...')
# gc.collect()
sys.stderr.write(' done\n')
#sys.stderr.write('instance_tree = ' + str(instance_tree_root) + '\n')
# Step 5: The commands must be carried out in a specific order.
# (for example, the "write()" and "new" commands).
# Search through the tree, and append commands to a command list.
# Then re-order the list in the order the commands should have
# been executed in. (We don't carry out the commands yet,
# we just store them and sort them.)
class_parents_in_use = set([])
static_tree_root.BuildCommandList(static_commands)
instance_tree_root.BuildCommandList(instance_commands)
#sys.stderr.write('static_commands = '+str(static_commands)+'\n')
#sys.stderr.write('instance_commands = '+str(instance_commands)+'\n')
# Step 6: Replace any $variables with their equivalents (if applicable)
ReplaceVars(instance_tree_root, replace_var_pairs)
# Step 7: We are about to assign numbers to the variables.
# We need to decide the order in which to assign them.
# By default static variables (@) are assigned in the order
# they appear in the file.
# And, by default instance variables ($)
# are assigned in the order they are created during instantiation.
#sys.stderr.write(' done\ndetermining variable count order...')
AssignVarOrderByFile(static_tree_root, '@', search_instance_commands=True)
AssignVarOrderByCommand(instance_commands, '$')
# Step 8: Assign the variables.
# (If the user requested any customized variable bindings,
# load those now.)
if len(settings.user_bindings_x) > 0:
if len(replace_var_pairs) > 0:
ReplaceVarsInCustomBindings(settings.user_bindings_x,
static_tree_root,
instance_tree_root,
replace_var_pairs)
reserved_values = CustomizeBindings(settings.user_bindings_x,
static_tree_root,
instance_tree_root)
else:
reserved_values = None
sys.stderr.write('sorting variables...\n')
AutoAssignVals(static_tree_root,
(settings.order_method != 'by_tree'),
reserved_values)
AutoAssignVals(instance_tree_root,
(settings.order_method != 'by_tree'),
reserved_values)
if len(settings.user_bindings) > 0:
if len(replace_var_pairs) > 0:
ReplaceVarsInCustomBindings(settings.user_bindings,
static_tree_root,
instance_tree_root,
replace_var_pairs)
CustomizeBindings(settings.user_bindings,
static_tree_root,
instance_tree_root)
sys.stderr.write(' done\n')
return
def main():
"""
This is is a "main module" wrapper for invoking ttree.py
as a stand alone program. This program:
1)reads a ttree file,
2)constructs a tree of class definitions (g_objectdefs)
3)constructs a tree of instantiated class objects (g_objects),
4)automatically assigns values to the variables,
5)and carries out the "write" commands to write the templates a file(s).
"""
g_program_name = g_filename
sys.stderr.write(g_program_name + ' v' +
g_version_str + ' ' + g_date_str + ' ')
sys.stderr.write('\n(python version ' + str(sys.version) + ')\n')
try:
settings = BasicUISettings()
BasicUIParseArgs(sys.argv, settings, main=True)
# Data structures to store the class definitionss and instances
g_objectdefs = StaticObj('', None) # The root of the static tree
# has name '' (equivalent to '/')
g_objects = InstanceObj('', None) # The root of the instance tree
# has name '' (equivalent to '/')
# A list of commands to carry out
g_static_commands = []
g_instance_commands = []
BasicUI(settings,
g_objectdefs,
g_objects,
g_static_commands,
g_instance_commands)
# Now write the files
# (Finally carry out the "write()" and "write_once()" commands.)
# Optional: Multiple commands to write to the same file can be merged to
# reduce the number of times the file is openned and closed.
sys.stderr.write('writing templates...\n')
# Erase the files that will be written to:
EraseTemplateFiles(g_static_commands)
EraseTemplateFiles(g_instance_commands)
g_static_commands = MergeWriteCommands(g_static_commands)
g_instance_commands = MergeWriteCommands(g_instance_commands)
# Write the files with the original variable names present
WriteTemplatesVarName(g_static_commands)
WriteTemplatesVarName(g_instance_commands)
# Write the files with the variable names substituted by values
WriteTemplatesValue(g_static_commands)
WriteTemplatesValue(g_instance_commands)
sys.stderr.write(' done\n')
# Step 11: Now write the variable bindings/assignments table.
sys.stderr.write('writing \"ttree_assignments.txt\" file...')
# <-- erase previous version.
open('ttree_assignments.txt', 'w').close()
WriteVarBindingsFile(g_objectdefs)
WriteVarBindingsFile(g_objects)
sys.stderr.write(' done\n')
except (ValueError, InputError) as err:
sys.stderr.write('\n\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == '__main__':
main()
| 2.75 | 3 |
environments/rlgymenv.py | rylz/openai-gail | 0 | 12757928 | <gh_stars>0
import numpy as np
import policyopt
import gym
from gym import spaces, envs
gym.undo_logger_setup()
import logging; logging.getLogger('gym.core').addHandler(logging.NullHandler())
class RLGymSim(policyopt.Simulation):
def __init__(self, env_name):
self.env = envs.make(env_name)
self.action_space = self.env.action_space
self.curr_obs = self.env.reset()
self.is_done = False
def step(self, action):
if isinstance(self.action_space, spaces.Discrete):
# We encode actions in finite spaces as an integer inside a length-1 array
# but Gym wants the integer itself
assert action.ndim == 1 and action.size == 1 and action.dtype in (np.int32, np.int64)
action = action[0]
else:
assert action.ndim == 1 and action.dtype == np.float64
self.curr_obs, reward, self.is_done, _ = self.env.step(action)
return reward
@property
def obs(self):
return self.curr_obs.copy()
@property
def done(self):
return self.is_done
def draw(self, track_body_name='torso'):
self.env.render()
def reset(self):
self.curr_obs = self.env.reset()
self.is_done = False
def _convert_space(space):
'''Converts a rl-gym space to our own space representation'''
if isinstance(space, spaces.Box):
assert space.low.ndim == 1 and space.low.shape[0] >= 1
return policyopt.ContinuousSpace(dim=space.low.shape[0])
elif isinstance(space, spaces.Discrete):
return policyopt.FiniteSpace(size=space.n)
raise NotImplementedError(space)
class RLGymMDP(policyopt.MDP):
def __init__(self, env_name):
print('Gym version:', gym.version.VERSION)
self.env_name = env_name
tmpsim = self.new_sim()
self._obs_space = _convert_space(tmpsim.env.observation_space)
self._action_space = _convert_space(tmpsim.env.action_space)
self.env_spec = tmpsim.env.spec
self.gym_env = tmpsim.env
@property
def obs_space(self):
return self._obs_space
@property
def action_space(self):
return self._action_space
def new_sim(self, init_state=None):
assert init_state is None
return RLGymSim(self.env_name)
| 2.53125 | 3 |
tests/test_requestor.py | luisfmcalado/coinoxr | 2 | 12757929 | <gh_stars>1-10
import pytest
from coinoxr.requestor import Requestor
from coinoxr.response import Response
from tests.stub_client import StubHttpClient
from coinoxr.error import AppIdError
class TestRequestor:
@pytest.fixture
def http_client(self, mocker):
return mocker.Mock(StubHttpClient)
@pytest.fixture
def requestor(self, http_client):
return Requestor("fake_api_key", client=http_client)
@pytest.fixture
def mock_response(self, http_client, mocker):
def mock_response(status_code, body):
http_client.get = mocker.MagicMock(return_value=Response(status_code, body))
return mock_response
@pytest.fixture
def mock_bad_response(self, http_client, mocker):
def mock_bad_response(status_code):
http_client.get = mocker.MagicMock(return_value=Response(status_code, None))
return mock_bad_response
def test_get_invalid_method(self, requestor, mock_bad_response):
mock_bad_response(405)
result = requestor.get("", {})
assert result.code == 405
assert result.body is None
def test_get_result(self, requestor, mock_response):
data = {"base": "USD"}
mock_response(200, data)
result = requestor.get("usage", {})
assert result.code == 200
assert result.body == data
def test_missing_api_id_exception(self, http_client):
import coinoxr
coinoxr.app_id = None
message = "No API key provided. Setup coinoxr.app_id = <API-Key> or app_id argument.You can get the API key from open exchange rates dashboard."
with pytest.raises(AppIdError) as ex:
Requestor(None, client=http_client)
assert message in str(ex.value)
def test_skip_app_id(self, http_client, mock_response):
data = {"base": "USD"}
mock_response(200, data)
requestor = Requestor(None, client=http_client, skip_app_id=True)
result = requestor.get("usage", {})
assert result.code == 200
assert result.body == data
| 2.265625 | 2 |
cvat/apps/engine/migrations/0018_jobcommit.py | wsp-digital/cvat | 4,197 | 12757930 | # Generated by Django 2.1.7 on 2019-04-17 09:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('engine', '0017_db_redesign_20190221'),
]
operations = [
migrations.CreateModel(
name='JobCommit',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('version', models.PositiveIntegerField(default=0)),
('timestamp', models.DateTimeField(auto_now=True)),
('message', models.CharField(default='', max_length=4096)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='commits', to='engine.Job')),
],
options={
'abstract': False,
'default_permissions': (),
},
),
]
| 1.742188 | 2 |
the_complete_python_&_postgreSQL_developer_course/a_lottery_app/list_comprehension.py | supermonkeyparadise/python | 0 | 12757931 | <gh_stars>0
user_input = '5,4,25,18,22,9'
user_numbers = user_input.split(',')
user_numbers_as_int = []
for number in user_numbers:
user_numbers_as_int.append(int(number))
print(user_numbers_as_int)
print([number for number in user_numbers])
print([number*2 for number in user_numbers])
print([int(number) for number in user_numbers])
| 3.609375 | 4 |
Projects Cousera/PythonDataStructures/Prefixes.py | teksaulo/My-projects | 0 | 12757932 | <gh_stars>0
line = "Please have a nice day"
# This takes a parameter, what prefix we're looking for.
line_new = line.startswith('Please')
print(line_new)
#Does it start with a lowercase p?
# And then we get back a False because,
# no, it doesn't start with a lowercase p
line_new = line.startswith('p')
print(line_new)
| 3.59375 | 4 |
main.py | regingelectricbear/QQ- | 0 | 12757933 | <filename>main.py
from selenium import webdriver
from util import getMsgList, getImgList, writeMsgList, writeImgList
import time
import os
print('---QQ空间小爬虫---')
print('-使用说明:')
print('-1、请确保您已登录QQ')
print('-2、该爬虫只允许爬取QQ好友')
print('-3、请确保您已安装chrome以及对应的chrome_driver')
pageIndex = 1
imgNumber = 1
qqNumber = input('输入爬取的好友QQ号:')
foldPath = './result/' + qqNumber
imgPath = foldPath + '/images'
textPath = foldPath + '/text'
browser = webdriver.Chrome()
browser.get('http://user.qzone.qq.com/' + qqNumber + '/311')
# 创建存档
if os.path.exists('result') is False:
os.mkdir('./result')
if os.path.exists(foldPath) is False:
os.mkdir(foldPath)
if os.path.exists(imgPath) is False:
os.mkdir(imgPath)
print('图像库创建中..')
print('图像库创建完成..')
if os.path.exists(textPath) is False:
os.mkdir(textPath)
with open(textPath + '/' + qqNumber + '.txt', 'w') as f:
print('文档库创建中..')
print('文档库创建完成..')
# 进入验证身份
login_frame = browser.find_element_by_css_selector('#login_frame')
browser.switch_to.frame(login_frame)
browser.find_element_by_class_name('face').click()
browser.switch_to_default_content()
browser.implicitly_wait(10)
try:
while True:
browser.save_screenshot('temp.png')
# 输出抓取页码
print('正在抓取..第' + str(pageIndex) + '页说说')
pageIndex += 1
# 进入空间
frame = browser.find_element_by_css_selector('.app_canvas_frame')
browser.switch_to.frame(frame)
# 获取说说列表--文字部分
time.sleep(2.0)
contexts = browser.find_elements_by_class_name('content')
dates = browser.find_elements_by_css_selector('.c_tx.c_tx3.goDetail')
msgList = getMsgList(contexts, dates)
writeMsgList(msgList, textPath)
# 获取说说列表--图片部分
time.sleep(2.0)
imageAnchors = browser.find_elements_by_css_selector('[title=查看大图]')
imgList = getImgList(imageAnchors)
imgNumber = writeImgList(imgList, imgNumber, imgPath)
time.sleep(2.0)
# 切换到下一页
browser.find_element_by_css_selector('[title=下一页]').click()
browser.switch_to_default_content()
except Exception:
print('爬取结束..')
| 2.796875 | 3 |
avoid_obstacles/scripts/waypoints_manager.py | mattp256/wheele | 2 | 12757934 | <gh_stars>1-10
#!/usr/bin/env python
import rospy, math
import numpy as np
from std_msgs.msg import Int16
import tf
from geometry_msgs.msg import Quaternion, Point, Pose, PoseStamped, Vector3, Vector3Stamped
from sensor_msgs.msg import Imu, JointState
from nav_msgs.msg import Odometry
import time
import sys
class WaypointManager():
def __init__(self):
self.tf_listener = tf.TransformListener()
self.near_goal_distance_threshold = 3.0
self.cone_transition = False
rospy.init_node('waypoint_manager')
rospy.Subscriber('/move_base_simple/goal', PoseStamped, self.clicked_goal_callback, queue_size = 2)
rospy.Subscriber('raw_cone_pose', PoseStamped, self.raw_cone_callback, queue_size=2)
rospy.Subscriber('obs_cone_pose', PoseStamped, self.obs_cone_callback, queue_size=2)
rospy.Subscriber('odom', Odometry, self.odom_callback, queue_size=1)
rospy.Subscriber('bump_switch', Int16, self.bump_switch_callback, queue_size=1)
self.wp_pub = rospy.Publisher('wp_goal', PoseStamped, queue_size=2)
self.wp_cone_pub = rospy.Publisher('wp_cone_pose', PoseStamped, queue_size=2)
self.found_cone_pub = rospy.Publisher('found_cone', Int16, queue_size=2)
print('Initializing Waypoint Manager.')
self.bump_switch = 0
self.found_cone = False
self.botx_odom = 0.0
self.boty_odom = 0.0
self.botx_map = 0.0
self.boty_map = 0.0
self.bot_pose_odom = PoseStamped()
self.bot_pose_odom.header.frame_id = "odom"
self.bot_pose_odom.pose.orientation.w = 1.0
self.goal = PoseStamped()
self.goal.header.frame_id = "odom"
self.goal.pose.orientation.w = 1.0
self.cur_wp = PoseStamped()
self.cur_wp.header.frame_id = "odom"
self.cur_wp.pose.orientation.w = 1.0
self.map_wp = PoseStamped()
self.map_wp.header.frame_id = "map"
self.map_wp.pose.orientation.w = 1.0
deg2meters = 111111.11
meters2deg = 1.0/deg2meters
#longitude, latitude
#x,y +x=east, +y=north
waypoints = np.array([
[0, 0], #lon, lat start
[4.0*meters2deg, -2.0*meters2deg], #first cone
[9.0*meters2deg, 0.0*meters2deg], #second cone
[9.0*meters2deg, 6.0*meters2deg] ]) #third cone
self.num_waypoints = len(waypoints)
[lon0,lat0] = waypoints[0]
lat_factor = math.cos(lat0*3.14159/180.0)
self.wp_map = np.zeros(waypoints.shape)
self.wp_map[:,0] = deg2meters*(waypoints[:,0]-lon0)
self.wp_map[:,1] = deg2meters*lat_factor*(waypoints[:,1]-lat0)
self.wp_k = 1
time.sleep(1.0)
self.update_waypoint()
self.touch_cone_time = 0
def update_waypoint(self):
print "Update Waypoint"
#if(self.wp_k < self.num_waypoints):
self.map_wp.header.stamp = rospy.Time.now()
self.map_wp.pose.position.x = self.wp_map[self.wp_k,0]
self.map_wp.pose.position.y = self.wp_map[self.wp_k,1]
print "map_wp"
print self.map_wp
p_in_odom = self.xy_in_odom(self.map_wp)
print "p in odom"
print p_in_odom
if(p_in_odom):
self.cur_wp = p_in_odom
self.wp_k += 1
if(self.wp_k == self.num_waypoints):
self.wp_k = 1
print "wp_goal published"
self.wp_pub.publish(self.cur_wp)
self.wp_cone_pub.publish(p_in_odom) # publish this once to avoid obs so it will clear the costmap at the initial goal, until it finds an updated goal from the camera
def bump_switch_callback(self,sw):
#if(not self.cone_transition): # covered in main, we may want to know when the bump_switch is released
self.bump_switch = sw.data
def odom_callback(self,odom):
self.botx_odom = odom.pose.pose.position.x
self.boty_odom = odom.pose.pose.position.y
self.bot_pose_odom = odom.pose
def clicked_goal_callback(self,data):
print "Clicked Goal Callback"
if(data.header.frame_id != "odom"):
p_in_odom = self.xy_in_odom(data)
if(p_in_odom):
self.wp_pub.publish(p_in_odom)
else:
self.wp_pub.publish(data)
def raw_cone_callback(self, data):
#print "Raw Cone Callback"
p_in_map = self.xy_in_map(data)
if(p_in_map and (self.distance_between_poses(p_in_map, self.map_wp) < self.near_goal_distance_threshold)):
p_in_odom = self.xy_in_odom(data)
if(p_in_odom):
self.wp_cone_pub.publish(p_in_odom)
# WARNING, THIS CAN BE PUBLISHED JUST BEFORE update_waypoint updates self.map_wp
# THIS WILL CAUSE A FUTURE obs_cone_callback AFTER update_waypoint, and self.cur_wp will be reverted
def obs_cone_callback(self, data):
# ENSURE prev cone pose does not revert self.cur_wp if a raw cone pose was sent to avoid obs just before update_waypoint
if(not self.cone_transition and self.distance_between_poses(data, self.cur_wp) < self.near_goal_distance_threshold):
self.cur_wp = data
self.wp_pub.publish(self.cur_wp)
msg = Int16()
msg.data = 1
self.found_cone_pub.publish(msg)
self.found_cone = True
#print("Waypoint Manager Received nearest obstacle to cone")
def distance_between_poses(self, pose1, pose2):
dx = pose1.pose.position.x - pose2.pose.position.x
dy = pose1.pose.position.y - pose2.pose.position.y
dist_sqd = dx*dx + dy*dy
return np.sqrt(dist_sqd)
def xy_in_odom(self, poseStamped):
src_frame = poseStamped.header.frame_id
p_in_odom = None
count = 0
try:
self.tf_listener.waitForTransform("odom", src_frame, rospy.Time.now(), rospy.Duration(1.0))
p_in_odom = self.tf_listener.transformPose("odom", poseStamped)
except:
rospy.logwarn("Error converting to odom frame")
p_in_odom = None
return p_in_odom
def xy_in_map(self, poseStamped):
src_frame = poseStamped.header.frame_id
p_in_map = None
count = 0
try:
self.tf_listener.waitForTransform("map", src_frame, rospy.Time.now(), rospy.Duration(1.0))
p_in_map = self.tf_listener.transformPose("map", poseStamped)
except:
rospy.logwarn("Error converting to map frame")
p_in_map = None
return p_in_map
if __name__ == '__main__':
try:
wp_man = WaypointManager()
print("Starting Waypoint Manager")
r = rospy.Rate(50.0)
while not rospy.is_shutdown():
if(wp_man.cone_transition):
if( (rospy.Time.now() - wp_man.touch_cone_time).to_sec() > 5.0):
wp_man.cone_transition = False
else:
dist = wp_man.distance_between_poses(wp_man.cur_wp, wp_man.bot_pose_odom)
if ( (wp_man.bump_switch and dist < 1.0) or (dist < 1.0 and not wp_man.found_cone) or (dist < 0.7 and wp_man.found_cone) ):
wp_man.cone_transition = True
wp_man.touch_cone_time = rospy.Time.now()
print dist
print wp_man.bump_switch
msg = Int16()
msg.data = 0
wp_man.found_cone_pub.publish(msg) #Now wheele_local_planner will see current cone as obstacle and back up
wp_man.found_cone = False
wp_man.update_waypoint()
r.sleep()
except rospy.ROSInterruptException:
pass
| 1.945313 | 2 |
fairseq/criterions/kd_regularization_cross_entropy.py | Lollipop321/compressed-attention | 1 | 12757935 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from . import FairseqCriterion, register_criterion
@register_criterion('kd_regularization_cross_entropy')
class KDRegularizationCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.alpha = args.alpha
self.temperature = args.temperature
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: offs
parser.add_argument('--alpha', default=0., type=float, metavar='D',
help='params.reg_alpha')
parser.add_argument('--temperature', default=0., type=float, metavar='D',
help='params.reg_temperature')
# fmt: on
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'nll_loss': utils.item(nll_loss.data) if reduce else nll_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
# smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
"""
loss function for mannually-designed regularization: Tf-KD_{reg}
"""
alpha = self.alpha
T = self.temperature
correct_prob = 0.99 # the probability for correct class in u(k)
# loss_CE = F.cross_entropy(net_output, target)
output = net_output[0]
K = output.size(1)
multiplier = 100
teacher_soft = torch.ones_like(output).cuda()
teacher_soft = teacher_soft * (1 - correct_prob) / (K - 1) # p^d(k)
for i in range(output.shape[0]):
teacher_soft[i, target[i]] = correct_prob
loss_soft_regu = torch.nn.KLDivLoss()(F.log_softmax(output, dim=1), F.softmax(teacher_soft / T, dim=1)) * multiplier
if reduce:
nll_loss = nll_loss.sum()
# smooth_loss = smooth_loss.sum()
loss_soft_regu = loss_soft_regu.sum()
# eps_i = self.eps / lprobs.size(-1)
# loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
loss = (1. - alpha) * nll_loss + alpha * loss_soft_regu
return loss, nll_loss
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
return {
'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2),
'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / ntokens / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
| 2.34375 | 2 |
RRtoolFC/lib/nodeFactory.py | davtoh/RRTools | 1 | 12757936 | from __future__ import print_function
# http://stackoverflow.com/questions/15247075/how-can-i-dynamically-create-derived-classes-from-a-base-class
from builtins import object
from pyqtgraph.flowchart import Node
from pyqtgraph.flowchart.library.common import CtrlNode
from RRtoolbox.lib.inspector import funcData
# TODO: see info under sharedlibs to come up with a way to convert functions to flowchar nodes.
class NodeGenerator(object):
"""
Generate Nodes.
:param nodeName: name of the node class. if None, it generates name on the fly from wrapped function
:param terminals: generic inputs and outputs. if None, it generates
terminals on the fly from wrapped function
:param uiTemplate: template to use in UI controls. if None, it uses Node class
:param nodeClass: class to use to generate the node. if None, it uses a convenient
Node class on the fly from wrapped function
"""
def __init__(self,nodeName=None,terminals=None,uiTemplate=None,nodeClass=None,classTemplate="{}Node",selfAs=None,addfuncs=None):
""" define a customized NodeGenerator
:return:
"""
# these are totally needed for the node creation
self.nodeName = nodeName # variable to use in the Node class to know it is a Node and show its name
# generateUi in flowchar.library.common.py
# currently it supports: 'intSpin', 'doubleSpin', 'spin', 'checkLoaded', 'combo', 'color', 'tip'
self.nodeClass = nodeClass # either way all should be derived from Node class
self.classTemplate = classTemplate
# these are use for the node creation but not that important
self.uiTemplate = uiTemplate
self.terminals = terminals # see Terminal under flowchart/Terminal.py
self.selfAs = selfAs
self.addfuncs = addfuncs
def config(self,func):
# kwargs are temporal parameters tu use instead of defaults
# func is a function that wants to be converted on the fly
if self.uiTemplate:
nodeClass = self.nodeClass or CtrlNode
if not issubclass(nodeClass,CtrlNode):
raise TypeError("nodeClass is not subclass of CtrlNode")
else:
nodeClass = self.nodeClass or Node
if not issubclass(nodeClass,Node):
raise TypeError("nodeClass is not subclass of Node")
data = funcData(func)
keywords, varargs = data["keywords"],data["varargs"]
if False and (keywords or varargs):
where = "and".join([i for i in (keywords,varargs) if i])
raise Exception("generic function has {}. It must have explicit arguments".format(where))
if False and varargs:
raise Exception("NodeGenerator does not support positional arguments like '{}', try keywords arguments".format(varargs))
args = data["args"]
useDisplay = "display" in args
nodeName = self.nodeName or data["name"]
classname = self.classTemplate.format(nodeName)
if not classname: raise Exception("classTemplate did not generate a classname")
doc = data["doc"]
templates = []
if self.uiTemplate:
for tmpl in self.uiTemplate:
replace = tmpl[0]
for i, arg in enumerate(args[:]):
if replace ==arg:
templates.append(replace) # register in process_handles
del args[i] # it won't apear in terminals
elif keywords: templates.append(replace) # register if support for more variables
if self.terminals:
terminals = self.terminals # replace by user terminals
else:
terminals = {arg:{"io":"in"} for arg in args} # only inputs registered
classData = funcData(nodeClass.__init__)
# know if nodeClass supports these parameters
classArgs = classData["args"]
if classData["keywords"]:
useAllowAddInput = useAllowAddOutput = useAllowRemove = True
else:
useAllowAddInput = "allowAddInput" in classArgs
useAllowAddOutput = "allowAddOutput" in classArgs
useAllowRemove = "allowRemove" in classArgs
useTerminals = "terminals" in classArgs
# handle function should be
# def hf(self,**kwargs):
# pass # process something here
# initialize handles
_init_handles = [] # it always must be
# now begin to register
# know if processing function supports these parameters
allowAddInput = bool(data["keywords"])
allowAddOutput = False
allowRemove = allowAddInput or allowAddOutput
if useAllowAddInput:
def handle_allowAddInput(self,kwargs):
kwargs["allowAddInput"] = allowAddInput
_init_handles.append(handle_allowAddInput)
if useAllowAddOutput:
def handle_allowAddOutput(self,kwargs):
kwargs["allowAddOutput"] = allowAddOutput
_init_handles.append(handle_allowAddOutput)
if useAllowRemove:
def handle_allowRemove(self,kwargs):
kwargs["allowRemove"] = allowRemove
_init_handles.append(handle_allowRemove)
if useTerminals:
def handle_terminals(self,kwargs):
kwargs["terminals"] = terminals
_init_handles.append(handle_terminals)
_process_handles = []
##
if not useDisplay:
def handle_display(self,kwargs):
del kwargs["display"]
_process_handles.append(handle_display)
if self.selfAs:
tempself = self.selfAs
def handle_addself(self,kwargs):
kwargs[tempself] = self
_process_handles.append(handle_addself)
d = {}
for tmpl in templates:
exec("def handle_{0}(self,kwargs): kwargs[{0}] = self.ctrls[{0}]".format(tmpl), d)
_process_handles.append(d["handle_{}".format(tmpl)])
def init(self,name,**kwargs):
for h in self._init_handles:
h(self,kwargs)
nodeClass.__init__(self,name,**kwargs)
def process(self, **kwargs):
for h in self._process_handles:
h(self,kwargs)
return func(**kwargs)
conf = dict(__init__=init,process=process,__doc__=doc,nodeName=nodeName,
_init_handles=_init_handles,_process_handles=_process_handles)
if self.uiTemplate: conf["uiTemplate"] = self.uiTemplate
if self.addfuncs: conf.update(self.addfuncs)
# returns parameters to use with type(what, bases, dict)
return classname, (nodeClass,), conf
def wrap(self, func):
# inspect.getsourcelines(my_function)
return type(*self.config(func))
__call__ = wrap
if __name__ == "__main__":
#new_class = type("NewClassName", (BaseClass), {"new_method": lambda self: ...})
@NodeGenerator()
def my_function1(param1, param2):
"some comment here"
print("processing something")
output1,output2 = 10,100
return output1,output2 # it must be clear
@NodeGenerator()
def my_function2(param1, param2, defparam1 = 10):
print("processing something")
output1,output2 = 10,100 # this shoud works
return output1,output2 # it must be clear
@NodeGenerator()
def my_function3(param1, param2, defparam1 = 10, defparam2 = 20, *args, **kwargs):
print("processing something")
output1,output2 = 10,100
return output1,output2 # it must be clear
n1 = my_function1()
n2 = my_function2
n3 = my_function3
print(n1,n2,n3) | 3.171875 | 3 |
ideas/eternal_sunshine/gagan/utils.py | chrhenning/permafrostanalytics | 0 | 12757937 | <reponame>chrhenning/permafrostanalytics<gh_stars>0
import numpy as np
from PIL import Image
import torch
from torchvision import models
import torchvision.transforms as T
import os
def get_image(path, crop = []):
img = Image.open(path)
img = img.rotate(90, expand = 1)
return img.crop(crop)
def get_img_texture_features(path2img, crop = (50, 300, 2700, 4200), resize_to = [1200,800], box_size = [200,300], npatches = 20, patch_size = 30, verbose = True):
img = get_image(path2img, crop)
# resize ?
img = img.resize(resize_to, resample = 1)
img_list = crop_grid(img, box_size = box_size, top_offset = 100)
if verbose:
print('image block size : %d, %d'%(img_list[0].size))
img_patches = [generate_random_img_patches(I, patch_size=patch_size, num_locs=npatches) for I in img_list]
gclm_feats = np.stack([get_GCLM_features(p) for p in img_patches]).flatten()
return gclm_feats
def get_all_texture_mapping_features(path_to_images, crop = (50, 300, 2700, 4200) , resize_to = [800,1200],
box_size=[600,800], npatches = 20, patch_size = 30, verbose_every = 20):
dirs = os.listdir(path_to_images)
X = []
cnt = 0
for d in dirs:
fls = os.listdir(os.path.join(path_to_images, d))
for f in fls:
path2img = os.path.join(path_to_images, d, f)
# check file size
b = float(os.path.getsize(path2img))
if b < 300e3:
# blank dark image
continue
x = get_img_texture_features(path2img, crop, resize_to, box_size, npatches, patch_size, verbose=False)
cnt += 1
X.append(x)
if cnt % verbose_every ==0:
print(' done with %d images , feature space size %d '%(cnt, x.shape[-1]))
return X
def crop_grid(img, box_size = [900,500], top_offset = 0):
# can you split the image into small boxes of this size ?
H,W = img.size
nrows = int(np.floor(H / box_size[0]))
ncols = int(np.floor(W / box_size[1]))
imgs = []
left = 0
up = 0
low = up + box_size[1]
right = left + box_size[0]
for i in range(nrows):
for j in range(ncols):
I = img.crop((left, up, right, low))
imgs.append(I)
left += box_size[0]
right = left + box_size[0]
up += box_size[1]
low = up + box_size[1]
left = 0
right = left + box_size[0]
return imgs
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
def generate_random_img_locations(img, patch_size = 5, num_locs = 5):
# over sample row and column start indices
H,W = img.size
idx_rows = np.random.choice(np.arange(patch_size, H-patch_size), size = num_locs, replace=False)
idx_cols = np.random.choice(np.arange(patch_size, W-patch_size), size = num_locs, replace=False)
locations = [(r,c) for (r,c) in zip(idx_rows, idx_cols)]
patches = []
for loc in locations:
patches.append(np.array(img.crop([loc[0], loc[1], loc[0]+patch_size, loc[1]+patch_size] )))
return patches
def generate_random_img_patches(img, patch_size = 5, num_locs = 5):
# over sample row and column start indices and return numpy array
im = np.array(img)
H = im.shape[0]
W = im.shape[1]
idx_rows = np.random.choice(np.arange(patch_size, H-patch_size), size = num_locs, replace=True)
idx_cols = np.random.choice(np.arange(patch_size, W-patch_size), size = num_locs, replace=True)
locations = [(r,c) for (r,c) in zip(idx_rows, idx_cols)]
patches = []
for loc in locations:
I = im[loc[0] : loc[0]+patch_size, loc[1]:loc[1]+patch_size]
if I.shape[1] < patch_size:
pdb.set_trace()
patches.append(I)
return patches
def get_GCLM_features(patches):
# compute some GLCM properties each patch
xs = np.zeros((len(patches),1))
ys = np.zeros((len(patches),1))
for i,patch in enumerate(patches):
# convert patch to grayscale
patch = rgb2gray(patch).astype('uint8')
glcm = greycomatrix(patch, [5], [0], 256, symmetric=True, normed=True)
xs[i] = greycoprops(glcm, 'dissimilarity')[0, 0]
ys[i] = greycoprops(glcm, 'correlation')[0, 0]
return np.concatenate([xs, ys],axis=1).flatten() | 2.125 | 2 |
apps/_dashboard/__init__.py | lucadealfaro/py4web | 0 | 12757938 | <reponame>lucadealfaro/py4web
import base64
import copy
import datetime
import io
import os
import shutil
import subprocess
import sys
import uuid
import zipfile
import requests
from pydal.validators import CRYPT
from yatl.helpers import BEAUTIFY
import py4web
from py4web import (
HTTP,
URL,
Translator,
__version__,
abort,
action,
redirect,
request,
response,
)
from py4web.core import ErrorStorage, Fixture, Reloader, Session, dumps
from py4web.utils.factories import ActionFactory
from .diff2kryten import diff2kryten
from .utils import *
MODE = os.environ.get("PY4WEB_DASHBOARD_MODE", "none")
FOLDER = os.environ["PY4WEB_APPS_FOLDER"]
APP_FOLDER = os.path.dirname(__file__)
T_FOLDER = os.path.join(APP_FOLDER, "translations")
T = Translator(T_FOLDER)
# in demo mode cannot access the local tickets db
if MODE == 'demo':
class ErrorStorage():
def clear(self): pass
def get(self, *args, **kwargs): return None
error_storage = ErrorStorage()
session = Session()
def run(command, project):
"""for runing git commands inside an app (project)"""
return subprocess.check_output(
command.split(), cwd=os.path.join(FOLDER, project)
).decode()
def get_commits(project):
"""list of git commits for the project"""
output = run("git log", project)
commits = []
for line in output.split("\n"):
if line.startswith("commit "):
commit = {"code": line[7:], "message": "", "author": "", "date": ""}
commits.append(commit)
elif line.startswith("Author: "):
commit["author"] = line[8:]
elif line.startswith("Date: "):
commit["date"] = datetime.datetime.strptime(
line[6:].strip(), "%a %b %d %H:%M:%S %Y %z"
)
else:
commit["message"] += line.strip() + "\n"
return commits
def get_branches(project):
"""dictionary of git local branches for the project"""
output = run("git branch", project)
branches = {"current": "", "other": []}
for line in output.split("\n"):
if line.startswith("* "):
branches["current"] = line[2:]
elif not line == "":
branches["other"].append(line[2:])
return branches
def is_git_repo(project):
return os.path.exists(os.path.join(FOLDER, project, ".git/config"))
class Logged(Fixture):
def __init__(self, session):
self.__prerequisites__ = [session]
self.session = session
def on_request(self):
user = self.session.get("user")
if not user or not user.get("id"):
abort(403)
authenticated = ActionFactory(Logged(session))
session_secured = action.uses(Logged(session))
@action("version")
def version():
return __version__
if MODE in ("demo", "readonly", "full"):
@action("index")
@action.uses("index.html", session, T)
def index():
return dict(
languages=dumps(getattr(T.local, 'language', {})),
mode=MODE,
user_id=(session.get("user") or {}).get("id"),
)
@action("login", method="POST")
@action.uses(session)
def login():
if MODE == "demo":
valid = True
else:
valid = False
password = request.json.get("password")
password_file = os.environ.get("PY4WEB_PASSWORD_FILE")
if password and password_file and os.path.exists(password_file):
with open(password_file, "r") as fp:
encrypted_password = fp.read().strip()
valid = CRYPT()(password)[0] == encrypted_password
if valid:
session["user"] = dict(id=1)
return dict(user=valid, mode=MODE)
@action("logout", method="POST")
@action.uses(session)
def logout():
session["user"] = None
return dict()
@action("dbadmin")
@action.uses(Logged(session), "dbadmin.html")
def dbadmin():
return dict(languages=dumps(getattr(T.local, 'language', {})))
@action("info")
@session_secured
def info():
vars = [{"name": "python", "version": sys.version}]
for module in sorted(sys.modules):
if not "." in module:
try:
m = __import__(module)
if "__version__" in dir(m):
vars.append({"name": module, "version": m.__version__})
except ImportError:
pass
return {"status": "success", "payload": vars}
@action("routes")
@session_secured
def routes():
"""Returns current registered routes"""
return {"payload": Reloader.ROUTES, "status": "success"}
@action("apps")
@session_secured
def apps():
"""Returns a list of installed apps"""
apps = os.listdir(FOLDER)
apps = [
{"name": app, "error": Reloader.ERRORS.get(app)}
for app in apps
if os.path.isdir(os.path.join(FOLDER, app))
and not app.startswith("__")
and not app.startswith(".")
]
apps.sort(key=lambda item: item["name"])
return {"payload": apps, "status": "success"}
@action("delete_app/<name:re:\w+>", method="POST")
@session_secured
def delete_app(name):
"""delete the app"""
path = os.path.join(FOLDER, name)
timestamp = datetime.datetime.now().strftime("%Y-%m-%d")
archive = os.path.join(FOLDER, "%s.%s.zip" % (name, timestamp))
if os.path.exists(path) and os.path.isdir(path):
# zip the folder, just in case
shutil.make_archive(archive, "zip", path)
# then remove the app
shutil.rmtree(path)
return {"status": "success", "payload": "Deleted"}
return {"status": "success", "payload": "App does not exist"}
@action("new_file/<name:re:\w+>/<file_name:path>", method="POST")
@session_secured
def new_file(name, file_name):
"""asign an sanitize inputs"""
path = os.path.join(FOLDER, name)
form = request.json
if not os.path.exists(path):
return {"status": "success", "payload": "App does not exist"}
full_path = os.path.join(path, file_name)
if not full_path.startswith(path + os.sep):
return {"status": "success", "payload": "Invalid path"}
if os.path.exists(full_path):
return {"status": "success", "payload": "File already exists"}
parent = os.path.dirname(full_path)
if not os.path.exists(parent):
os.makedirs(parent)
with open(full_path, "w") as fp:
if full_path.endswith(".html"):
fp.write('[[extend "layout.html"]]\nHello World!')
elif full_path.endswith(".py"):
fp.write("# -*- coding: utf-8 -*-")
return {"status": "success"}
@action("walk/<path:path>")
@session_secured
def walk(path):
"""Returns a nested folder structure as a tree"""
top = os.path.join(FOLDER, path)
if not os.path.exists(top) or not os.path.isdir(top):
return {"status": "error", "message": "folder does not exist"}
store = {}
for root, dirs, files in os.walk(top, topdown=False):
store[root] = {
"dirs": list(
sorted(
[
{"name": dir, "content": store[os.path.join(root, dir)]}
for dir in dirs
if dir[0] != "." and dir[:2] != "__"
],
key=lambda item: item["name"],
)
),
"files": list(
sorted(
[
f
for f in files
if f[0] != "." and f[-1] != "~" and f[-4:] != ".pyc"
]
)
),
}
return {"payload": store[top], "status": "success"}
@action("load/<path:path>")
@session_secured
def load(path):
"""Loads a text file"""
path = safe_join(FOLDER, path) or abort()
content = open(path, "rb").read().decode("utf8")
return {"payload": content, "status": "success"}
@action("load_bytes/<path:path>")
@session_secured
def load_bytes(path):
"""Loads a binary file"""
path = safe_join(FOLDER, path) or abort()
return open(path, "rb").read()
@action("packed/<path:path>")
@session_secured
def packed(path):
"""Packs an app"""
appname = path.split(".")[-2]
appname = sanitize(appname)
app_dir = os.path.join(FOLDER, appname)
store = io.BytesIO()
zip = zipfile.ZipFile(store, mode="w", compression=zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(app_dir, topdown=False):
if not root.startswith("."):
for name in files:
if not (
name.endswith("~") or name.endswith(".pyc") or name[:1] in "#."
):
filename = os.path.join(root, name)
short = filename[len(app_dir + os.path.sep) :]
print("added", filename, short)
zip.write(filename, short)
zip.close()
data = store.getvalue()
response.headers["Content-Type"] = "application/zip"
return data
@action("tickets")
@session_secured
def tickets():
"""Returns most recent tickets grouped by path+error"""
tickets = error_storage.get()
return {"payload": tickets}
@action("clear")
@session_secured
def clear_tickets():
error_storage.clear()
@action("ticket/<ticket_uuid>")
@action.uses("ticket.html")
@session_secured
def error_ticket(ticket_uuid):
return dict(ticket=error_storage.get(ticket_uuid=ticket_uuid))
@action("rest/<path:path>", method=["GET", "POST", "PUT", "DELETE"])
@session_secured
def api(path):
# this is not final, requires pydal 19.5
args = path.split("/")
app_name = args[0]
from py4web.core import Reloader, DAL
from pydal.restapi import RestAPI, Policy
if MODE != "full":
raise HTTP(403)
module = Reloader.MODULES[app_name]
def url(*args):
return request.url + "/" + "/".join(args)
databases = [
name for name in dir(module) if isinstance(getattr(module, name), DAL)
]
if len(args) == 1:
def tables(name):
db = getattr(module, name)
return [
{
"name": t._tablename,
"fields": t.fields,
"link": url(name, t._tablename) + "?model=true",
}
for t in getattr(module, name)
]
return {
"databases": [
{"name": name, "tables": tables(name)} for name in databases
]
}
elif len(args) > 2 and args[1] in databases:
db = getattr(module, args[1])
id = args[3] if len(args) == 4 else None
policy = Policy()
for table in db:
policy.set(
table._tablename,
"GET",
authorize=True,
allowed_patterns=["**"],
allow_lookup=True,
fields=table.fields,
)
policy.set(table._tablename, "PUT", authorize=True, fields=table.fields)
policy.set(
table._tablename, "POST", authorize=True, fields=table.fields
)
policy.set(table._tablename, "DELETE", authorize=True)
data = action.uses(db, T)(
lambda: RestAPI(db, policy)(
request.method, args[2], id, request.query, request.json
)
)()
else:
data = {}
if "code" in data:
response.status = data["code"]
return data
if MODE == "full":
@action("reload")
@action("reload/<name>")
@session_secured
def reload(name=None):
"""Reloads installed apps"""
Reloader.import_app(name) if name else Reloader.import_apps()
return "ok"
@action("save/<path:path>", method="POST")
@session_secured
def save(path, reload_app=True):
"""Saves a file"""
app_name = path.split("/")[0]
path = safe_join(FOLDER, path) or abort()
with open(path, "wb") as myfile:
myfile.write(request.body.read())
if reload_app:
Reloader.import_app(app_name)
return {"status": "success"}
@action("delete/<path:path>", method="POST")
@session_secured
def delete(path):
"""Deletes a file"""
fullpath = safe_join(FOLDER, path) or abort()
recursive_unlink(fullpath)
return {"status": "success"}
def install_by_unzip_or_treecopy(source, source_dir, target_dir):
"""Installs an app by either unzipping it (if py4web installed from pip)
or by copying the directory tree (if installed from source)."""
if os.path.exists(source):
zfile = zipfile.ZipFile(source, "r")
zfile.extractall(target_dir)
zfile.close()
else:
shutil.copytree(source_dir, target_dir)
def prepare_target_dir(form, target_dir):
"""Prepares the target directory for the new app.
If should_exist is False, leaves the directory blank."""
if form["mode"] == "new":
if os.path.exists(target_dir):
abort(500) # already validated client side
elif form["mode"] == "replace":
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
else:
abort(500) # not a replacement
@action("new_app", method="POST")
@session_secured
def new_app():
form = request.json
# Directory for zipped assets
assets_dir = os.path.join(os.path.dirname(py4web.__file__), "assets")
app_name = form['name']
target_dir = safe_join(FOLDER, app_name)
if form["type"] == "minimal":
source = os.path.join(assets_dir, "py4web.app._minimal.zip")
source_dir = safe_join(FOLDER, "_minimal")
prepare_target_dir(form, target_dir)
install_by_unzip_or_treecopy(source, source_dir, target_dir)
elif form["type"] == "scaffold":
source = os.path.join(assets_dir, "py4web.app._scaffold.zip")
source_dir = safe_join(FOLDER, "_scaffold")
prepare_target_dir(form, target_dir)
install_by_unzip_or_treecopy(source, source_dir, target_dir)
elif form["type"] == "web":
prepare_target_dir(form, target_dir)
source = form["source"]
if source.endswith(".zip"): # install from the web (zip file)
res = requests.get(source)
mem_zip = io.BytesIO(res.content)
zfile = zipfile.ZipFile(mem_zip, "r")
zfile.extractall(target_dir)
zfile.close()
elif source.endswith(".git"): # clone from a git repo
process = subprocess.Popen(
["git", "clone", source, form["name"]], cwd=FOLDER
)
process.communicate()
if process.returncode != 0:
abort(500)
elif form["type"] == "upload":
prepare_target_dir(form, target_dir)
source_stream = io.BytesIO(base64.b64decode(form["file"]))
zfile = zipfile.ZipFile(source_stream, "r")
zfile.extractall(target_dir)
zfile.close()
else:
abort(500)
settings = os.path.join(target_dir, "settings.py")
if os.path.exists(settings):
with open(settings) as fp:
data = fp.read()
data = data.replace("<session-secret-key>", str(uuid.uuid4()))
with open(settings, "w") as fp:
fp.write(data)
Reloader.import_app(app_name)
return {"status": "success"}
#
# Below here work in progress
#
@action("gitlog/<project>")
@action.uses(Logged(session), "gitlog.html")
def gitlog(project):
if not is_git_repo(project):
return "Project is not a GIT repo"
branches = get_branches(project)
commits = get_commits(project)
return dict(
commits=commits, checkout=checkout, project=project, branches=branches
)
@authenticated.callback()
def checkout(project, commit):
if not is_git_repo(project):
raise HTTP(400)
run("git stash", project)
run("git checkout " + commit, project)
Reloader.import_app(project)
@action("swapbranch/<project>", method="POST")
@action.uses(Logged(session))
def swapbranch(project):
if not is_git_repo(project):
raise HTTP(400)
branch = (
request.forms.get("branches") if request.forms.get("branches") else "master"
)
# swap branches then go back to gitlog so new commits load
checkout(project, branch)
redirect(URL("gitlog", project))
return diff2kryten(patch)
@action("gitshow/<project>/<commit>")
@action.uses(Logged(session), "gitshow.html")
def gitshow(project, commit):
if not is_git_repo(project):
raise HTTP(400)
flag = request.params.get("showfull")
opt = ""
if flag == "true":
opt = " -U9999"
patch = run("git show " + commit + opt, project)
return diff2kryten(patch)
| 2.15625 | 2 |
Python/Pandas/loop_Enumerate.py | themohitpapneja/Code_Dump | 0 | 12757939 | <reponame>themohitpapneja/Code_Dump
a=[1,2]
for i,s in enumerate(a):
print(i,"index contains",s)
| 3.046875 | 3 |
miasmap.py | DarkStarSword/miasmata-fixes | 10 | 12757940 | #!/usr/bin/env python
from PIL import Image
import sys
width = 8238
height = 8193
minz = -1815
maxz = 1833
width = 4096
height = 4096
scale = 2
try:
image = Image.open('Map_FilledIn.jpg').transpose(Image.ROTATE_270).resize((width, height))
except:
import traceback
traceback.print_exc()
image = Image.new('RGB', (width, height), (0,0,0))
image = Image.eval(image, lambda x: x/3)
pix = image.load()
def save_image(filename):
print>>sys.stderr, 'Saving %s...' % filename
image.transpose(Image.ROTATE_90).save(filename)
def plot(x, y, (r, g, b)):
x = max(0, min(x / scale, width-1))
y = max(0, min(y / scale, height-1))
(r1, g1, b1) = pix[x, y]
pix[x, y] = (r1 + r, g1 + g, b1 + b)
def plot_rect(x1, y1, c1, x2, y2, c2):
xr = x2 - x1
yr = y2 - y1
def interpolate(p):
return [ int(p*v1 + (1.0-p)*v2) for (v1,v2) in zip(c1, c2) ]
for x in range(x1, x2+1, scale):
p = float(x - x1) / xr
p1 = p / 2.0
p2 = p1 + 0.5
rgb1 = interpolate(p1)
rgb2 = interpolate(p2)
plot(x, y1, rgb1)
plot(x, y2, rgb2)
for y in range(y1+1, y2, scale):
p = float(y - y1) / yr
p1 = p / 2.0
p2 = p1 + 0.5
rgb1 = interpolate(p1)
rgb2 = interpolate(p2)
plot(x1, y, rgb1)
plot(x2, y, rgb2)
def plot_point(x, y, rgb1 = (255, 255, 255), rgb2 = (192, 192, 192)):
plot(x, y, rgb1)
for (xx, yy) in ((x-1*scale, y), (x+1*scale, y), (x, y-1*scale), (x, y+1*scale)):
plot(xx, yy, rgb2)
def plot_cross(x, y, d = 20, rgb = (255, 255, 255)):
for (x1, y1) in zip(range(x-d, x+d), range(y-d, y+d)):
plot(x1, y1, rgb)
for (x1, y1) in zip(reversed(range(x-d, x+d)), range(y-d, y+d)):
plot(x1, y1, rgb)
def plot_square(x, y, d = 20, rgb = (255, 255, 255)):
for y1 in range(y-d, y+d, scale):
for x1 in range(x-d, x+d, scale):
plot(x1, y1, rgb)
# vi:noexpandtab:sw=8:ts=8
| 3.015625 | 3 |
labs/hello_world.py | MHSRoboticsCode/2015 | 0 | 12757941 | # 2015 lab 1
print('Hello World')
| 1.320313 | 1 |
setup.py | steve/couchapp | 1 | 12757942 | <filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 <NAME> <<EMAIL>>
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import os
import sys
from ez_setup import use_setuptools
if 'cygwin' in sys.platform.lower():
min_version='0.6c6'
else:
min_version='0.6a9'
try:
use_setuptools(min_version=min_version)
except TypeError:
# If a non-local ez_setup is already imported, it won't be able to
# use the min_version kwarg and will bail with TypeError
use_setuptools()
from setuptools import setup, find_packages, Extension, Feature
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, \
DistutilsPlatformError
data_files = []
for dir, dirs, files in os.walk('app-template'):
data_files.append((os.path.join('couchapp', dir),
[os.path.join(dir, file_) for file_ in files]))
for dir, dirs, files in os.walk('vendor'):
data_files.append((os.path.join('couchapp', dir),
[os.path.join(dir, file_) for file_ in files]))
for dir, dirs, files in os.walk('src/couchapp'):
for i, dirname in enumerate(dirs):
if dirname.startswith('.'): del dirs[i]
data_files.append((dir, [os.path.join(dir, file_) for file_ in files]))
speedups = Feature(
"options C speed-enhancement modules",
standard=True,
ext_modules = [
Extension("couchapp/contrib/simplejson._speedups", ["src/couchapp/contrib/simplejson/_speedups.c"]),
],
)
if sys.platform == 'win32' and sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError,
IOError)
else:
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError, x:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors, x:
raise BuildFailed()
def run_setup(with_binary):
if with_binary:
features = {'speedups': speedups}
else:
features = {}
setup(
name = 'Couchapp',
version = '0.3.2',
url = 'http://github.com/couchapp/couchapp/tree/master',
license = 'Apache License 2',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'Standalone CouchDB Application Development Made Simple.',
long_description = """CouchApp is a set of helpers and a jQuery plugin
that conspire to get you up and running on CouchDB quickly and
correctly. It brings clarity and order to the freedom of CouchDB's
document-based approach.""",
keywords = 'couchdb couchapp',
platforms = ['any'],
zip_safe = False,
packages=find_packages('src'),
package_dir={
'': 'src'
},
data_files = data_files,
include_package_data = True,
entry_points = {
'console_scripts': [
'couchapp = couchapp.bin.couchapp_cli:main',
]
},
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Topic :: Database',
'Topic :: Utilities',
],
features=features,
cmdclass={'build_ext': ve_build_ext},
test_suite='tests',
)
try:
run_setup(True)
except BuildFailed:
BUILD_EXT_WARNING = "WARNING: The C extension could not be compiled, speedups are not enabled."
print '*' * 75
print BUILD_EXT_WARNING
print "Failure information, if any, is above."
print "I'm retrying the build without the C extension now."
print '*' * 75
run_setup(False)
print '*' * 75
print BUILD_EXT_WARNING
print "Plain-Python installation succeeded."
print '*' * 75
| 1.875 | 2 |
third_party/shaderc/src/glslc/test/parameter_tests.py | zipated/src | 2,151 | 12757943 | # Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
import os.path
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader, StdinShader, TempFileName
@inside_glslc_testsuite('File')
class SimpleFileCompiled(expect.ValidObjectFile):
"""Tests whether or not a simple glsl file compiles."""
shader = FileShader('#version 310 es\nvoid main() {}', '.frag')
glslc_args = ['-c', shader]
@inside_glslc_testsuite('File')
class NotSpecifyingOutputName(expect.SuccessfulReturn,
expect.CorrectObjectFilePreamble):
"""Tests that when there is no -o and -E/-S/-c specified, output as a.spv."""
shader = FileShader('#version 140\nvoid main() {}', '.frag')
glslc_args = [shader]
def check_output_a_spv(self, status):
output_name = os.path.join(status.directory, 'a.spv')
return self.verify_object_file_preamble(output_name)
@inside_glslc_testsuite('Parameters')
class HelpParameters(
expect.ReturnCodeIsZero, expect.StdoutMatch, expect.StderrMatch):
"""Tests the --help flag outputs correctly and does not produce and error."""
glslc_args = ['--help']
expected_stdout = '''glslc - Compile shaders into SPIR-V
Usage: glslc [options] file...
An input file of - represents standard input.
Options:
-c Only run preprocess, compile, and assemble steps.
-Dmacro[=defn] Add an implicit macro definition.
-E Outputs only the results of the preprocessing step.
Output defaults to standard output.
-fshader-stage=<stage>
Treat subsequent input files as having stage <stage>.
Valid stages are vertex, fragment, tesscontrol, tesseval,
geometry, and compute.
-g Generate source-level debug information.
Currently this option has no effect.
--help Display available options.
--version Display compiler version information.
-I <value> Add directory to include search path.
-o <file> Write output to <file>.
A file name of '-' represents standard output.
-std=<value> Version and profile for input files. Possible values
are concatenations of version and profile, e.g. 310es,
450core, etc.
-M Generate make dependencies. Implies -E and -w.
-MM An alias for -M.
-MD Generate make dependencies and compile.
-MF <file> Write dependency output to the given file.
-MT <target> Specify the target of the rule emitted by dependency
generation.
-S Only run preprocess and compilation steps.
--target-env=<environment>
Set the target shader environment, and the semantics
of warnings and errors. Valid values are 'opengl',
'opengl_compat' and 'vulkan'. The default value is 'vulkan'.
-w Suppresses all warning messages.
-Werror Treat all warnings as errors.
-x <language> Treat subsequent input files as having type <language>.
The only supported language is glsl.
'''
expected_stderr = ''
@inside_glslc_testsuite('Parameters')
class HelpIsNotTooWide(expect.StdoutNoWiderThan80Columns):
"""Tests that --help output is not too wide."""
glslc_args = ['--help']
@inside_glslc_testsuite('Parameters')
class UnknownSingleLetterArgument(expect.ErrorMessage):
"""Tests that an unknown argument triggers an error message."""
glslc_args = ['-a']
expected_error = ["glslc: error: unknown argument: '-a'\n"]
@inside_glslc_testsuite('Parameters')
class UnknownMultiLetterArgument(expect.ErrorMessage):
"""Tests that an unknown argument triggers an error message."""
glslc_args = ['-zzz']
expected_error = ["glslc: error: unknown argument: '-zzz'\n"]
@inside_glslc_testsuite('Parameters')
class UnsupportedOption(expect.ErrorMessage):
"""Tests that an unsupported option triggers an error message."""
glslc_args = ['--unsupported-option']
expected_error = [
"glslc: error: unsupported option: '--unsupported-option'\n"]
@inside_glslc_testsuite('File')
class FileNotFound(expect.ErrorMessage):
"""Tests the error message if a file cannot be found."""
blabla_file = TempFileName('blabla.frag')
glslc_args = [blabla_file]
expected_error = [
"glslc: error: cannot open input file: '", blabla_file,
"': No such file or directory\n"]
@inside_glslc_testsuite('Unsupported')
class LinkingNotSupported(expect.ErrorMessage):
"""Tests the error message generated by linking not supported yet."""
shader1 = FileShader('#version 140\nvoid main() {}', '.vert')
shader2 = FileShader('#version 140\nvoid main() {}', '.frag')
glslc_args = [shader1, shader2]
expected_error = [
'glslc: error: linking multiple files is not supported yet. ',
'Use -c to compile files individually.\n']
@inside_glslc_testsuite('Unsupported')
class MultipleStdinUnsupported(expect.ErrorMessage):
"""Tests the error message generated by having more than one - input."""
glslc_args = ['-c', '-fshader-stage=vertex', '-', '-']
expected_error = [
'glslc: error: specifying standard input "-" as input more'
' than once is not allowed.\n']
@inside_glslc_testsuite('Parameters')
class StdinWithoutShaderStage(expect.StdoutMatch, expect.StderrMatch):
"""Tests that you must use -fshader-stage when specifying - as input."""
shader = StdinShader(
"""#version 140
int a() {
}
void main() {
int x = a();
}
""")
glslc_args = [shader]
expected_stdout = ''
expected_stderr = [
"glslc: error: '-': -fshader-stage required when input is from "
'standard input "-"\n']
| 2.171875 | 2 |
training/vae_dcgan.py | RuiLiFeng/LAE | 0 | 12757944 | # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Network architectures used in the StyleGAN2 paper."""
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.ops.upfirdn_2d import upsample_2d, downsample_2d, upsample_conv_2d, conv_downsample_2d
from dnnlib.tflib.ops.fused_bias_act import fused_bias_act
# NOTE: Do not import any application-specific modules here!
# Specify all network parameters as kwargs.
#----------------------------------------------------------------------------
# Get/create weight tensor for a convolution or fully-connected layer.
def get_weight(shape, gain=1, use_wscale=True, lrmul=1, weight_var='weight'):
fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
he_std = gain / np.sqrt(fan_in) # He init
# Equalized learning rate and custom learning rate multiplier.
if use_wscale:
init_std = 1.0 / lrmul
runtime_coef = he_std * lrmul
else:
init_std = he_std / lrmul
runtime_coef = lrmul
# Create variable.
init = tf.initializers.random_normal(0, init_std)
return tf.get_variable(weight_var, shape=shape, initializer=init) * runtime_coef
#----------------------------------------------------------------------------
# Fully-connected layer.
def dense_layer(x, fmaps, gain=1, use_wscale=True, lrmul=1, weight_var='weight'):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale, lrmul=lrmul, weight_var=weight_var)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolution layer with optional upsampling or downsampling.
def conv2d_layer(x, fmaps, kernel, up=False, down=False, resample_kernel=None, gain=1, use_wscale=True, lrmul=1, weight_var='weight'):
assert not (up and down)
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale, lrmul=lrmul, weight_var=weight_var)
if up:
x = upsample_conv_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
elif down:
x = conv_downsample_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
else:
x = tf.nn.conv2d(x, tf.cast(w, x.dtype), data_format='NCHW', strides=[1,1,1,1], padding='SAME')
return x
#----------------------------------------------------------------------------
# Apply bias and activation func.
def apply_bias_act(x, act='linear', alpha=None, gain=None, lrmul=1, bias_var='bias'):
b = tf.get_variable(bias_var, shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul
return fused_bias_act(x, b=tf.cast(b, x.dtype), act=act, alpha=alpha, gain=gain)
#----------------------------------------------------------------------------
# Naive upsampling (nearest neighbor) and downsampling (average pooling).
def naive_upsample_2d(x, factor=2):
with tf.variable_scope('NaiveUpsample'):
_N, C, H, W = x.shape.as_list()
x = tf.reshape(x, [-1, C, H, 1, W, 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
return tf.reshape(x, [-1, C, H * factor, W * factor])
def naive_downsample_2d(x, factor=2):
with tf.variable_scope('NaiveDownsample'):
_N, C, H, W = x.shape.as_list()
x = tf.reshape(x, [-1, C, H // factor, factor, W // factor, factor])
return tf.reduce_mean(x, axis=[3,5])
#----------------------------------------------------------------------------
# Modulated convolution layer.
def modulated_conv2d_layer(x, y, fmaps, kernel, up=False, down=False, demodulate=True, resample_kernel=None, gain=1, use_wscale=True, lrmul=1, fused_modconv=True, weight_var='weight', mod_weight_var='mod_weight', mod_bias_var='mod_bias'):
assert not (up and down)
assert kernel >= 1 and kernel % 2 == 1
# Get weight.
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale, lrmul=lrmul, weight_var=weight_var)
ww = w[np.newaxis] # [BkkIO] Introduce minibatch dimension.
# Modulate.
s = dense_layer(y, fmaps=x.shape[1].value, weight_var=mod_weight_var) # [BI] Transform incoming W to style.
s = apply_bias_act(s, bias_var=mod_bias_var) + 1 # [BI] Add bias (initially 1).
ww *= tf.cast(s[:, np.newaxis, np.newaxis, :, np.newaxis], w.dtype) # [BkkIO] Scale input feature maps.
# Demodulate.
if demodulate:
d = tf.rsqrt(tf.reduce_sum(tf.square(ww), axis=[1,2,3]) + 1e-8) # [BO] Scaling factor.
ww *= d[:, np.newaxis, np.newaxis, np.newaxis, :] # [BkkIO] Scale output feature maps.
# Reshape/scale input.
if fused_modconv:
x = tf.reshape(x, [1, -1, x.shape[2], x.shape[3]]) # Fused => reshape minibatch to convolution groups.
w = tf.reshape(tf.transpose(ww, [1, 2, 3, 0, 4]), [ww.shape[1], ww.shape[2], ww.shape[3], -1])
else:
x *= tf.cast(s[:, :, np.newaxis, np.newaxis], x.dtype) # [BIhw] Not fused => scale input activations.
# Convolution with optional up/downsampling.
if up:
x = upsample_conv_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
elif down:
x = conv_downsample_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
else:
x = tf.nn.conv2d(x, tf.cast(w, x.dtype), data_format='NCHW', strides=[1,1,1,1], padding='SAME')
# Reshape/scale output.
if fused_modconv:
x = tf.reshape(x, [-1, fmaps, x.shape[2], x.shape[3]]) # Fused => reshape convolution groups back to minibatch.
elif demodulate:
x *= tf.cast(d[:, :, np.newaxis, np.newaxis], x.dtype) # [BOhw] Not fused => scale output activations.
return x
#----------------------------------------------------------------------------
# Minibatch standard deviation layer.
def minibatch_stddev_layer(x, group_size=4, num_new_features=1):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c.
y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels.
y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups
y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Main generator network.
# Composed of two sub-networks (mapping and synthesis) that are defined below.
# Used in configs B-F (Table 1).
def Decoder_main(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
is_training = False, # Network is under training? Enables and disables specific features.
return_dlatents = False, # Return dlatents in addition to the images?
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls.
mapping_func = 'Decoder_mapping', # Build func name for the mapping network.
synthesis_func = 'Decoder_synthesis', # Build func name for the synthesis network.
**kwargs
):
# Setup components.
if 'synthesis' not in components:
components.synthesis = tflib.Network('G_synthesis', func_name=globals()[synthesis_func], **kwargs)
if 'mapping' not in components:
components.mapping = tflib.Network('G_mapping', func_name=globals()[mapping_func], **kwargs)
# Evaluate mapping network.
dlatents = components.mapping.get_output_for(latents_in, labels_in, is_training=is_training, **kwargs)
dlatents = tf.cast(dlatents, tf.float32)
images_out = components.synthesis.get_output_for(dlatents, is_training=is_training,
force_clean_graph=is_template_graph, **kwargs)
# Return requested outputs.
images_out = tf.identity(images_out, name='images_out')
if return_dlatents:
return images_out, dlatents
return images_out
def Decoder_mapping(
dlatents_in,
labels_in,
label_size = 0,
dlatent_size = 512,
dtype = 'float32',
act='lrelu',
**kwargs
):
dlatents_in.set_shape([None, dlatent_size])
labels_in.set_shape([None, label_size])
latents_in = tf.cast(dlatents_in, dtype)
labels_in = tf.cast(labels_in, dtype)
return tf.identity(dlatents_in, name='dlatents_in')
def Decoder_synthesis(
dlatents_in,
dlatent_size = 512,
num_channels = 3,
resolution = 128,
dtype = 'float32',
num_units = 1024,
resample_kernel = [1,3,3,1],
is_training = True,
act = 'lrelu',
**kwargs
):
# Primary inputs.
dlatents_in.set_shape([None, dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
resolution_log2 = int(np.log2(resolution))
assert resolution == 2 ** resolution_log2 and resolution >= 4
num_layers = resolution_log2 - 2
height = resolution // 2 ** (num_layers - 1)
width = resolution // 2 ** (num_layers - 1)
with tf.variable_scope('Dense'):
z = linear(dlatents_in, num_units * height * width)
z = tf.reshape(z, [-1, num_units, height, width])
z = tf.nn.relu(z)
for layer_id in range(num_layers - 1):
with tf.variable_scope('conv%d' % layer_id):
scale = 2 ** (layer_id + 1)
_out_shape = [tf.shape(z)[0], num_units // scale, height * scale,
width * scale]
z = deconv2d(z, _out_shape, stddev=0.0099999, conv_filters_dim=5)
z = tf.layers.batch_normalization(z, training=is_training)
z = tf.nn.relu(z)
with tf.variable_scope('toRGB'):
z = deconv2d(z, [tf.shape(z)[0], num_channels, resolution,
resolution], stddev=0.0099999, d_h=1, d_w=1, conv_filters_dim=5)
images_out = tf.nn.tanh(z)
return tf.identity(images_out, name='images_out')
def Encoder(
images_in,
labels_in,
label_size = 0,
dlatent_size = 512,
num_channels = 3,
resolution = 128,
num_units = 1024,
dtype = 'float32',
is_training = True,
resample_kernel = [1,3,3,1],
act = 'lrelu',
**kwargs
):
images_in.set_shape([None, num_channels, resolution, resolution])
images_in = tf.cast(images_in, dtype)
labels_in.set_shape([None, label_size])
labels_in = tf.cast(labels_in, dtype)
resolution_log2 = int(np.log2(resolution))
assert resolution == 2 ** resolution_log2 and resolution >= 4
num_layers = resolution_log2 - 2
x = images_in
for layer_id in range(num_layers):
with tf.variable_scope('conv%d' % layer_id):
scale = 2 ** (num_layers - layer_id - 1)
x = conv2d(x, num_units // scale, k_w=5, k_h=5, d_h=2, d_w=2, stddev=0.0099999)
x = tf.layers.batch_normalization(x, training=is_training)
x = tf.nn.relu(x)
x = tf.reshape(x, [-1, np.prod(x.shape[1:])])
with tf.variable_scope('mu'):
mu = linear(x, dlatent_size)
with tf.variable_scope('log_sigma'):
log_sigma = linear(x, dlatent_size)
with tf.variable_scope('reparametric'):
dlatents_out = reparametric(mu, log_sigma)
return tf.identity(dlatents_out, name='dlatents_out'), \
tf.identity(mu, name='mu'), tf.identity(log_sigma, name='log_sigma')
def reparametric(mu, log_sigma, distribution='normal', name=None):
sigma = tf.exp(log_sigma * 0.5)
if distribution == 'normal':
epi = tf.random.normal(tf.shape(mu), dtype=mu.dtype)
else:
raise ValueError('Not supported distribution type %s !' % distribution)
if name is not None:
z = tf.add(tf.multiply(epi, sigma), mu, name=name)
else:
z = tf.multiply(epi, sigma) + mu
return z
def deconv2d(input_, output_shape, d_h=2, d_w=2, stddev=0.02, scope=None, conv_filters_dim=None, padding='SAME'):
"""Transposed convolution (fractional stride convolution) layer.
"""
shape = input_.get_shape().as_list()
k_h = conv_filters_dim
k_w = k_h
assert len(shape) == 4, 'Conv2d_transpose works only with 4d tensors.'
assert len(output_shape) == 4, 'outut_shape should be 4dimensional'
with tf.variable_scope(scope or "deconv2d"):
w = tf.get_variable(
'filter', [k_h, k_w, output_shape[1], shape[1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv2d_transpose(
input_, w, output_shape=output_shape,
strides=[1, 1, d_h, d_w], padding=padding, data_format="NCHW")
deconv = apply_bias_act(deconv)
return deconv
def conv2d(inputs, output_dim, k_h, k_w, d_h, d_w, stddev=0.02, name="conv2d",
use_sn=False, use_bias=True):
"""Performs 2D convolution of the input."""
with tf.variable_scope(name):
w = tf.get_variable(
"kernel", [k_h, k_w, inputs.shape[1].value, output_dim],
initializer=weight_initializer(stddev=stddev))
outputs = tf.nn.conv2d(inputs, w, strides=[1, 1, d_h, d_w], padding="SAME", data_format="NCHW")
if use_bias:
outputs = apply_bias_act(outputs)
return outputs
def weight_initializer(initializer="orthogonal", stddev=0.02):
"""Returns the initializer for the given name.
Args:
initializer: Name of the initalizer. Use one in consts.INITIALIZERS.
stddev: Standard deviation passed to initalizer.
Returns:
Initializer from `tf.initializers`.
"""
if initializer == "normal":
return tf.initializers.random_normal(stddev=stddev)
if initializer == "truncated":
return tf.initializers.truncated_normal(stddev=stddev)
if initializer == "orthogonal":
return tf.initializers.orthogonal()
raise ValueError("Unknown weight initializer {}.".format(initializer))
def linear(inputs, output_size, scope=None, stddev=0.02, bias_start=0.0,
use_sn=False, use_bias=True):
"""Linear layer without the non-linear activation applied."""
shape = inputs.get_shape().as_list()
with tf.variable_scope(scope or "linear"):
kernel = tf.get_variable(
"kernel",
[shape[1], output_size],
initializer=weight_initializer(stddev=stddev))
outputs = tf.matmul(inputs, kernel)
if use_bias:
bias = tf.get_variable(
"bias",
[output_size],
initializer=tf.constant_initializer(bias_start))
outputs += bias
return outputs | 2.25 | 2 |
marshmallow_extended/fields/__init__.py | blackacornlabs/marshmallow_extended | 0 | 12757945 | <filename>marshmallow_extended/fields/__init__.py<gh_stars>0
from marshmallow.fields import *
from .instance import Instance
from .active import Active
from .email import Email
from .enum import Enum
from .icontains import IContains
__all__ = ["Field", "Raw", "Nested", "Mapping", "Dict", "List", "Tuple", "String", "UUID", "Number", "Integer",
"Decimal", "Boolean", "Float", "DateTime", "NaiveDateTime", "AwareDateTime", "Time", "Date", "TimeDelta",
"Url", "URL", "Email", "IP", "IPv4", "IPv6", "Method", "Function", "Str", "Bool", "Int", "Constant",
"Pluck", "Instance", "Active", "Email", "Enum", "IContains"]
| 1.898438 | 2 |
libraries/botframework-streaming/botframework/streaming/payloads/disassemblers/cancel_disassembler.py | andreikop/botbuilder-python | 388 | 12757946 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from uuid import UUID
from botframework.streaming.payload_transport import PayloadSender
from botframework.streaming.payloads.models import Header
class CancelDisassembler:
def __init__(self, *, sender: PayloadSender, identifier: UUID, type: str):
self._sender = sender
self._identifier = identifier
self._type = type
async def disassemble(self):
header = Header(type=self._type, id=self._identifier, end=True)
header.payload_length = 0
self._sender.send_payload(header, None, True, None)
return
| 2.125 | 2 |
odc_wrapper.py | SARScripts/OpenEO_ODC_Driver | 0 | 12757947 | <gh_stars>0
# coding=utf-8
# Author: <NAME> - Eurac Research - michele (dot) claus (at) eurac (dot) edu
# Date: 08/02/2022
import datacube
import numpy as np
import xarray as xr
import copy
from datetime import datetime
from time import time
import shapely
from shapely.geometry import shape
#libraries for polygon and polygon mask
import fiona
import shapely.geometry
import rasterio
from datacube.utils import geometry
from datacube.utils.geometry import Geometry, CRS
import dea_tools.datahandling # or some other submodule
from config import *
import logging
import sys
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler("odc_openeo_engine.log"),
logging.StreamHandler(sys.stdout)
]
)
class Odc:
def __init__(self,collections=None,timeStart=None,timeEnd=None,south=None,north=None,west=None,east=None,bands=None,resolutions=None,outputCrs=None,polygon=None,resamplingMethod=None,crs=None):
self.dc = datacube.Datacube(config = OPENDATACUBE_CONFIG_FILE)
self.collections = collections
self.timeStart = timeStart
self.timeEnd = self.exclusive_date(timeEnd)
self.south = south
self.north = north
self.west = west
self.east = east
self.bands = bands
self.resolutions = resolutions
self.outputCrs = outputCrs
self.resamplingMethod = resamplingMethod
self.polygon = polygon
self.geoms = None
self.crs = crs
self.data = None
self.query = None
self.build_query()
self.load_collection()
if self.polygon is not None: # We mask the data with the given polygon, i.e. we set to zero the values outside the polygon
self.apply_mask()
def sar2cube_collection(self):
return ('SAR2Cube' in self.collections) # Return True if it's a SAR2Cube collection, where spatial subsetting can't be performed in the usual way
def exclusive_date(self,date):
return str(np.datetime64(date) - np.timedelta64(1, 'D')).split(' ')[0] # Substracts one day
def build_query(self):
query = {}
query['product'] = self.collections
if self.bands is not None:
query['measurements'] = self.bands
if self.polygon is not None:
#crs = CRS("epsg:4326")
#geom = Geometry(geom=self.polygon, crs=crs)
#query['geopolygon'] = geom
self.get_bbox()
if (self.south is not None and self.north is not None and self.east is not None and self.west is not None and not self.sar2cube_collection()):
if self.crs is not None:
query['crs'] = 'epsg:' + str(self.crs)
query['x'] = (self.south,self.north)
query['y'] = (self.east,self.west)
query['output_crs'] = 'epsg:' + str(self.crs)
query['resolution'] = [10,10]
else:
query['latitude'] = (self.south,self.north)
query['longitude'] = (self.east,self.west)
if self.resolutions is not None:
query['resolution'] = self.resolutions
if self.outputCrs is not None:
query['output_crs'] = self.outputCrs
self.query = query
def load_collection(self):
datasets = self.dc.find_datasets(time=(self.timeStart,self.timeEnd),**self.query)
self.query['dask_chunks'] = {"time":1,"x": 1000, "y":1000} # This let us load the data as Dask chunks instead of numpy arrays
if self.resamplingMethod is not None:
if self.resamplingMethod == 'near':
self.query['resampling'] = 'nearest'
else:
##TODO add other method parsing here
self.query['resampling'] = self.resamplingMethod
try:
self.data = self.dc.load(datasets=datasets,**self.query).astype(np.float32)
if self.data.equals(xr.Dataset()):
raise Exception("load_collection returned an empty dataset, please check the requested bands, spatial and temporal extent.")
except Exception as e:
if (str(e)=='Product has no default CRS. Must specify \'output_crs\' and \'resolution\''):
# Identify the most common projection system in the input query
crs_query = copy.deepcopy(self.query)
crs_query.pop('product')
crs_query.pop('dask_chunks')
output_crs = dea_tools.datahandling.mostcommon_crs(dc=self.dc, product=self.collections, query=crs_query)
print(output_crs)
self.query['output_crs'] = output_crs
self.query['resolution'] = [10,10]
self.query['dask_chunks'] = {"time":1,"x": 1000, "y":1000}
self.data = self.dc.load(datasets=datasets,**self.query)
else:
raise e
if (self.sar2cube_collection() and self.south is not None and self.north is not None and self.east is not None and self.west is not None):
attrs = self.data.attrs
start_masking = time()
bbox = [self.west,self.south,self.east,self.north]
grid_lon = self.data.grid_lon[0]
grid_lat = self.data.grid_lat[0]
bbox_mask = np.bitwise_and(np.bitwise_and(grid_lon>bbox[0],grid_lon<bbox[2]),np.bitwise_and(grid_lat>bbox[1],grid_lat<bbox[3]))
# self.data = self.data.where(bbox_mask,drop=True)
bbox_mask = bbox_mask.where(bbox_mask,drop=True)
self.data = self.data * bbox_mask
self.data.attrs = attrs
logging.info("Elapsed time data masking: {}".format(time() - start_masking))
if self.sar2cube_collection():
self.data['grid_lon'] = self.data.grid_lon.where(self.data.grid_lon!=0)
self.data['grid_lat'] = self.data.grid_lat.where(self.data.grid_lat!=0)
def list_measurements(self): # Get all the bands available in the loaded data as a list of strings
measurements = []
content = str(self.data)
meas = []
lines = content.split('Data variables:')[1].split('Attributes:')[0].splitlines()[1:]
for line in lines:
meas.append(line.split(' (time')[0].replace(" ", ""))
measurements.append(meas)
return measurements
def build_geometry_fromshapefile(self):
shapes = fiona.open(self.polygon)
print('Number of shapes in ',self.polygon,' :',len(shapes))
print('crs ',shapes.crs['init'])
#copy attributes from shapefile and define shape_name
geoms = []
for i in range(len(shapes)):
geom_crs = geometry.CRS(shapes.crs['init'])
geo = shapes[i]['geometry']
geom = geometry.Geometry(geo, crs=geom_crs)
geoms.append(geom)
#geom_bs = shapely.geometry.shape(shapes[i]['geometry'])
#shape_name = shape_file.split('/')[-1].split('.')[0]+'_'+str(i)
return geoms
def get_bbox(self):
self.south = np.min([[el[1] for el in self.polygon[0]]])
self.north = np.max([[el[1] for el in self.polygon[0]]])
self.east = np.min([[el[0] for el in self.polygon[0]]])
self.west = np.max([[el[0] for el in self.polygon[0]]])
return
def apply_mask(self):
geoms = []
pol = {}
pol['type'] = 'Polygon'
coords = [[(el[0], el[1]) for el in self.polygon[0]]]
pol['coordinates'] = coords
geom = geometry.Geometry(pol, crs='epsg:4326')
geoms.append(geom)
mask = self.geometry_mask(geoms, self.data.geobox, invert=True)
self.data = self.data.where(mask)
return
def geometry_mask(self, geoms, geobox, all_touched=False, invert=False):
"""
Create a mask from shapes.
By default, mask is intended for use as a
numpy mask, where pixels that overlap shapes are False.
:param list[Geometry] geoms: geometries to be rasterized
:param datacube.utils.GeoBox geobox:
:param bool all_touched: If True, all pixels touched by geometries will be burned in. If
false, only pixels whose center is within the polygon or that
are selected by Bresenham's line algorithm will be burned in.
:param bool invert: If True, mask will be True for pixels that overlap shapes.
"""
return rasterio.features.geometry_mask([geom.to_crs(geobox.crs) for geom in geoms],
out_shape=geobox.shape,
transform=geobox.affine,
all_touched=all_touched,
invert=invert)
| 2.125 | 2 |
util/augmentation.py | Andi-Nov/PyTorch | 0 | 12757948 | import math
import random
import warnings
import numpy as np
import scipy.ndimage
import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
import torch.backends.cudnn as cudnn
from util.logconf import logging
log = logging.getLogger(__name__)
# log.setLevel(logging.WARN)
# log.setLevel(logging.INFO)
log.setLevel(logging.DEBUG)
def cropToShape(image, new_shape, center_list=None, fill=0.0):
# log.debug([image.shape, new_shape, center_list])
# assert len(image.shape) == 3, repr(image.shape)
if center_list is None:
center_list = [int(image.shape[i] / 2) for i in range(3)]
crop_list = []
for i in range(0, 3):
crop_int = center_list[i]
if image.shape[i] > new_shape[i] and crop_int is not None:
# We can't just do crop_int +/- shape/2 since shape might be odd
# and ints round down.
start_int = crop_int - int(new_shape[i]/2)
end_int = start_int + new_shape[i]
crop_list.append(slice(max(0, start_int), end_int))
else:
crop_list.append(slice(0, image.shape[i]))
# log.debug([image.shape, crop_list])
image = image[crop_list]
crop_list = []
for i in range(0, 3):
if image.shape[i] < new_shape[i]:
crop_int = int((new_shape[i] - image.shape[i]) / 2)
crop_list.append(slice(crop_int, crop_int + image.shape[i]))
else:
crop_list.append(slice(0, image.shape[i]))
# log.debug([image.shape, crop_list])
new_image = np.zeros(new_shape, dtype=image.dtype)
new_image[:] = fill
new_image[crop_list] = image
return new_image
def zoomToShape(image, new_shape, square=True):
# assert image.shape[-1] in {1, 3, 4}, repr(image.shape)
if square and image.shape[0] != image.shape[1]:
crop_int = min(image.shape[0], image.shape[1])
new_shape = [crop_int, crop_int, image.shape[2]]
image = cropToShape(image, new_shape)
zoom_shape = [new_shape[i] / image.shape[i] for i in range(3)]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
image = scipy.ndimage.interpolation.zoom(
image, zoom_shape,
output=None, order=0, mode='nearest', cval=0.0, prefilter=True)
return image
def randomOffset(image_list, offset_rows=0.125, offset_cols=0.125):
center_list = [int(image_list[0].shape[i] / 2) for i in range(3)]
center_list[0] += int(offset_rows * (random.random() - 0.5) * 2)
center_list[1] += int(offset_cols * (random.random() - 0.5) * 2)
center_list[2] = None
new_list = []
for image in image_list:
new_image = cropToShape(image, image.shape, center_list)
new_list.append(new_image)
return new_list
def randomZoom(image_list, scale=None, scale_min=0.8, scale_max=1.3):
if scale is None:
scale = scale_min + (scale_max - scale_min) * random.random()
new_list = []
for image in image_list:
# assert image.shape[-1] in {1, 3, 4}, repr(image.shape)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# log.info([image.shape])
zimage = scipy.ndimage.interpolation.zoom(
image, [scale, scale, 1.0],
output=None, order=0, mode='nearest', cval=0.0, prefilter=True)
image = cropToShape(zimage, image.shape)
new_list.append(image)
return new_list
_randomFlip_transform_list = [
# lambda a: np.rot90(a, axes=(0, 1)),
# lambda a: np.flip(a, 0),
lambda a: np.flip(a, 1),
]
def randomFlip(image_list, transform_bits=None):
if transform_bits is None:
transform_bits = random.randrange(0, 2 ** len(_randomFlip_transform_list))
new_list = []
for image in image_list:
# assert image.shape[-1] in {1, 3, 4}, repr(image.shape)
for n in range(len(_randomFlip_transform_list)):
if transform_bits & 2**n:
# prhist(image, 'before')
image = _randomFlip_transform_list[n](image)
# prhist(image, 'after ')
new_list.append(image)
return new_list
def randomSpin(image_list, angle=None, range_tup=None, axes=(0, 1)):
if range_tup is None:
range_tup = (0, 360)
if angle is None:
angle = range_tup[0] + (range_tup[1] - range_tup[0]) * random.random()
new_list = []
for image in image_list:
# assert image.shape[-1] in {1, 3, 4}, repr(image.shape)
image = scipy.ndimage.interpolation.rotate(
image, angle, axes=axes, reshape=False,
output=None, order=0, mode='nearest', cval=0.0, prefilter=True)
new_list.append(image)
return new_list
def randomNoise(image_list, noise_min=-0.1, noise_max=0.1):
noise = np.zeros_like(image_list[0])
noise += (noise_max - noise_min) * np.random.random_sample(image_list[0].shape) + noise_min
noise *= 5
noise = scipy.ndimage.filters.gaussian_filter(noise, 3)
# noise += (noise_max - noise_min) * np.random.random_sample(image_hsv.shape) + noise_min
new_list = []
for image_hsv in image_list:
image_hsv = image_hsv + noise
new_list.append(image_hsv)
return new_list
def randomHsvShift(image_list, h=None, s=None, v=None,
h_min=-0.1, h_max=0.1,
s_min=0.5, s_max=2.0,
v_min=0.5, v_max=2.0):
if h is None:
h = h_min + (h_max - h_min) * random.random()
if s is None:
s = s_min + (s_max - s_min) * random.random()
if v is None:
v = v_min + (v_max - v_min) * random.random()
new_list = []
for image_hsv in image_list:
# assert image_hsv.shape[-1] == 3, repr(image_hsv.shape)
image_hsv[:,:,0::3] += h
image_hsv[:,:,1::3] = image_hsv[:,:,1::3] ** s
image_hsv[:,:,2::3] = image_hsv[:,:,2::3] ** v
new_list.append(image_hsv)
return clampHsv(new_list)
def clampHsv(image_list):
new_list = []
for image_hsv in image_list:
image_hsv = image_hsv.clone()
# Hue wraps around
image_hsv[:,:,0][image_hsv[:,:,0] > 1] -= 1
image_hsv[:,:,0][image_hsv[:,:,0] < 0] += 1
# Everything else clamps between 0 and 1
image_hsv[image_hsv > 1] = 1
image_hsv[image_hsv < 0] = 0
new_list.append(image_hsv)
return new_list
# def torch_augment(input):
# theta = random.random() * math.pi * 2
# s = math.sin(theta)
# c = math.cos(theta)
# c1 = 1 - c
# axis_vector = torch.rand(3, device='cpu', dtype=torch.float64)
# axis_vector -= 0.5
# axis_vector /= axis_vector.abs().sum()
# l, m, n = axis_vector
#
# matrix = torch.tensor([
# [l*l*c1 + c, m*l*c1 - n*s, n*l*c1 + m*s, 0],
# [l*m*c1 + n*s, m*m*c1 + c, n*m*c1 - l*s, 0],
# [l*n*c1 - m*s, m*n*c1 + l*s, n*n*c1 + c, 0],
# [0, 0, 0, 1],
# ], device=input.device, dtype=torch.float32)
#
# return th_affine3d(input, matrix)
# following from https://github.com/ncullen93/torchsample/blob/master/torchsample/utils.py
# MIT licensed
# def th_affine3d(input, matrix):
# """
# 3D Affine image transform on torch.Tensor
# """
# A = matrix[:3,:3]
# b = matrix[:3,3]
#
# # make a meshgrid of normal coordinates
# coords = th_iterproduct(input.size(-3), input.size(-2), input.size(-1), dtype=torch.float32)
#
# # shift the coordinates so center is the origin
# coords[:,0] = coords[:,0] - (input.size(-3) / 2. - 0.5)
# coords[:,1] = coords[:,1] - (input.size(-2) / 2. - 0.5)
# coords[:,2] = coords[:,2] - (input.size(-1) / 2. - 0.5)
#
# # apply the coordinate transformation
# new_coords = coords.mm(A.t().contiguous()) + b.expand_as(coords)
#
# # shift the coordinates back so origin is origin
# new_coords[:,0] = new_coords[:,0] + (input.size(-3) / 2. - 0.5)
# new_coords[:,1] = new_coords[:,1] + (input.size(-2) / 2. - 0.5)
# new_coords[:,2] = new_coords[:,2] + (input.size(-1) / 2. - 0.5)
#
# # map new coordinates using bilinear interpolation
# input_transformed = th_trilinear_interp3d(input, new_coords)
#
# return input_transformed
#
#
# def th_trilinear_interp3d(input, coords):
# """
# trilinear interpolation of 3D torch.Tensor image
# """
# # take clamp then floor/ceil of x coords
# x = torch.clamp(coords[:,0], 0, input.size(-3)-2)
# x0 = x.floor()
# x1 = x0 + 1
# # take clamp then floor/ceil of y coords
# y = torch.clamp(coords[:,1], 0, input.size(-2)-2)
# y0 = y.floor()
# y1 = y0 + 1
# # take clamp then floor/ceil of z coords
# z = torch.clamp(coords[:,2], 0, input.size(-1)-2)
# z0 = z.floor()
# z1 = z0 + 1
#
# stride = torch.tensor(input.stride()[-3:], dtype=torch.int64, device=input.device)
# x0_ix = x0.mul(stride[0]).long()
# x1_ix = x1.mul(stride[0]).long()
# y0_ix = y0.mul(stride[1]).long()
# y1_ix = y1.mul(stride[1]).long()
# z0_ix = z0.mul(stride[2]).long()
# z1_ix = z1.mul(stride[2]).long()
#
# # input_flat = th_flatten(input)
# input_flat = x.contiguous().view(x[0], x[1], -1)
#
# vals_000 = input_flat[:, :, x0_ix+y0_ix+z0_ix]
# vals_001 = input_flat[:, :, x0_ix+y0_ix+z1_ix]
# vals_010 = input_flat[:, :, x0_ix+y1_ix+z0_ix]
# vals_011 = input_flat[:, :, x0_ix+y1_ix+z1_ix]
# vals_100 = input_flat[:, :, x1_ix+y0_ix+z0_ix]
# vals_101 = input_flat[:, :, x1_ix+y0_ix+z1_ix]
# vals_110 = input_flat[:, :, x1_ix+y1_ix+z0_ix]
# vals_111 = input_flat[:, :, x1_ix+y1_ix+z1_ix]
#
# xd = x - x0
# yd = y - y0
# zd = z - z0
# xm1 = 1 - xd
# ym1 = 1 - yd
# zm1 = 1 - zd
#
# x_mapped = (
# vals_000.mul(xm1).mul(ym1).mul(zm1) +
# vals_001.mul(xm1).mul(ym1).mul(zd) +
# vals_010.mul(xm1).mul(yd).mul(zm1) +
# vals_011.mul(xm1).mul(yd).mul(zd) +
# vals_100.mul(xd).mul(ym1).mul(zm1) +
# vals_101.mul(xd).mul(ym1).mul(zd) +
# vals_110.mul(xd).mul(yd).mul(zm1) +
# vals_111.mul(xd).mul(yd).mul(zd)
# )
#
# return x_mapped.view_as(input)
#
# def th_iterproduct(*args, dtype=None):
# return torch.from_numpy(np.indices(args).reshape((len(args),-1)).T)
#
# def th_flatten(x):
# """Flatten tensor"""
# return x.contiguous().view(x[0], x[1], -1)
| 2.234375 | 2 |
ucp/continuous_ucx_progress.py | efajardo-nv/ucx-py | 0 | 12757949 | <reponame>efajardo-nv/ucx-py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import asyncio
import socket
import weakref
class ProgressTask(object):
def __init__(self, worker, event_loop):
"""Creates a task that keeps calling worker.progress()
Notice, class and created task is carefull not to hold a
reference to `worker` so that a danling progress task will
not prevent `worker` to be garbage collected.
Parameters
----------
worker: UCXWorker
The UCX worker context to progress
event_loop: asyncio.EventLoop
The event loop to do progress in.
"""
self.weakref_worker = weakref.ref(worker)
self.event_loop = event_loop
self.asyncio_task = None
def __del__(self):
if self.asyncio_task is not None:
self.asyncio_task.cancel()
# Hash and equality is based on the event loop
def __hash__(self):
return hash(self.event_loop)
def __eq__(self, other):
return hash(self) == hash(other)
class NonBlockingMode(ProgressTask):
def __init__(self, worker, event_loop):
super().__init__(worker, event_loop)
self.asyncio_task = event_loop.create_task(self._progress_task())
async def _progress_task(self):
"""This helper function maintains a UCX progress loop."""
while True:
worker = self.weakref_worker()
if worker is None or not worker.initialized:
return
worker.progress()
del worker
# Give other co-routines a chance to run.
await asyncio.sleep(0)
class BlockingMode(ProgressTask):
def __init__(self, worker, event_loop, epoll_fd):
super().__init__(worker, event_loop)
# Creating a job that is ready straightaway but with low priority.
# Calling `await event_loop.sock_recv(rsock, 1)` will return when
# all non-IO tasks are finished.
# See <https://stackoverflow.com/a/48491563>.
self.rsock, wsock = socket.socketpair()
self.rsock.setblocking(0)
wsock.setblocking(0)
wsock.close()
# Bind an asyncio reader to a UCX epoll file descripter
event_loop.add_reader(epoll_fd, self._fd_reader_callback)
# Remove the reader on finalization
weakref.finalize(self, event_loop.remove_reader, epoll_fd)
def _fd_reader_callback(self):
worker = self.weakref_worker()
if worker is None or not worker.initialized:
return
worker.progress()
# Notice, we can safely overwrite `self.dangling_arm_task`
# since previous arm task is finished by now.
assert self.asyncio_task is None or self.asyncio_task.done()
self.asyncio_task = self.event_loop.create_task(self._arm_worker())
async def _arm_worker(self):
# When arming the worker, the following must be true:
# - No more progress in UCX (see doc of ucp_worker_arm())
# - All asyncio tasks that isn't waiting on UCX must be executed
# so that the asyncio's next state is epoll wait.
# See <https://github.com/rapidsai/ucx-py/issues/413>
while True:
worker = self.weakref_worker()
if worker is None or not worker.initialized:
return
worker.progress()
del worker
# This IO task returns when all non-IO tasks are finished.
# Notice, we do NOT hold a reference to `ctx` while waiting.
await self.event_loop.sock_recv(self.rsock, 1)
worker = self.weakref_worker()
if worker is None or not worker.initialized:
return
if worker.arm():
# At this point we know that asyncio's next state is
# epoll wait.
break
| 2.4375 | 2 |
searchbot/help.py | cleac/tgsearchbot | 0 | 12757950 | <filename>searchbot/help.py
def gen_help(bot_name):
return'''
Hello!
I am a telegram bot that generates duckduckgo links from tg directly.
I am an inline bot, you can access it via @{bot_name}.
If you have any questions feel free to leave issue at http://github.com/cleac/tgsearchbot
'''.format(bot_name=bot_name)
| 2.71875 | 3 |