content
stringlengths 5
1.05M
|
|---|
"""Unit tests for the token set index"""
from annif.lexical.tokenset import TokenSet, TokenSetIndex
def test_mllm_tokenset():
tokens = [1, 3, 5]
tset = TokenSet(tokens)
assert tset.subject_id is None
assert not tset.is_pref
assert len(tset) == len(tokens)
assert sorted(list(tset)) == sorted(tokens)
assert tset.contains(TokenSet(tokens))
assert tset.contains(TokenSet([1]))
assert not tset.contains(TokenSet([0]))
assert tset.key in tokens
def test_mllm_tokenset_empty_key():
assert TokenSet([]).key is None
def test_mllm_tokensetindex():
index = TokenSetIndex()
assert len(index) == 0
tset13 = TokenSet([1, 3], subject_id=1)
index.add(tset13)
assert len(index) == 1
index.add(TokenSet([])) # add empty
assert len(index) == 1
tset2 = TokenSet([2])
index.add(tset2)
tset23 = TokenSet([2, 3], subject_id=2)
index.add(tset23)
tset3 = TokenSet([3], subject_id=3, is_pref=True)
index.add(tset3)
tset34 = TokenSet([3, 4], subject_id=3, is_pref=False)
index.add(tset34)
tset5 = TokenSet([5])
index.add(tset5)
result = index.search(TokenSet([1, 2, 3, 4]))
assert len(result) == 4
assert (tset13, 0) in result
assert (tset2, 1) in result
assert (tset23, 0) in result
assert (tset3, 2) in result
assert tset34 not in [r[0] for r in result]
assert tset5 not in [r[0] for r in result]
|
import logging
import traceback
from functools import wraps
logger=logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
logger.propagate = True
def log_error(func):
@wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args,**kwargs)
except BaseException as error:
logger.error(traceback.format_exc())
return wrapped
|
# -*- coding: utf-8 -*-
"""
transistor.examples.books_to_scrape.workgroup
~~~~~~~~~~~~
This module implements a working example of a BaseWorker and BaseGroup.
:copyright: Copyright (C) 2018 by BOM Quote Limited
:license: The MIT License, see LICENSE for more details.
~~~~~~~~~~~~
"""
from transistor import BaseWorker
from examples.books_to_scrape.persistence import ndb
from transistor.persistence.newt_db.collections import SpiderList
from transistor.utility.logging import logger
class BooksWorker(BaseWorker):
"""
A Worker wraps the custom Spider object and processes it after returning
data from a scrape or crawl. The Worker can be combined into a Group of
an arbitrary number of Workers, to enable gevent based asynchronous I/O.
First, inherit from BaseWorker and then implement the pre_process_exports
and/or post_process_exports methods, as shown below. Other methods
that could be easily overriden include get_spider, get_spider_extractor, and
even process_exports could be overriden if needed.
Also, add any extra class attributes as needed here, to support your custom
Spider and Exporters.
"""
def pre_process_exports(self, spider, task):
"""
A hook point for customization before process_exports method is
called.
In this example, we use this method to save our spider data to
postgresql using newt.db.
:param spider: the Scraper or Crawler object (i.e. MouseKeyScraper())
:param task: just passing through the task item for printing.
"""
if self.job_id != 'NONE':
try:
# create the list with the job name if it doesnt already exist
ndb.root.spiders.add(self.job_id, SpiderList())
logger.info(f'Worker {self.name}-{self.number} created a new spider '
f'list for {self.job_id}')
except KeyError:
# will be raised if there is already a list with the same job_name
pass
# export the scraper data to the items object
items = self.load_items(spider)
# save the items object to newt.db
ndb.root.spiders[self.job_id].add(items)
ndb.commit()
logger.info(f'Worker {self.name}-{self.number} saved {items.__repr__()} to '
f'scrape_list "{self.job_id}" for task {task}.')
else:
# if job_id is NONE then we'll skip saving the objects
logger.info(f'Worker {self.name}-{self.number} said job_name is {self.job_id} '
f'so will not save it.')
def post_process_exports(self, spider, task):
"""
A hook point for customization after process_exports.
In this example, we append the returned scraper object to a
class attribute called `events`.
"""
self.events.append(spider)
logger.info(f'{self.name} has {spider.stock} inventory status.')
logger.info(f'pricing: {spider.price}')
logger.info(f'Worker {self.name}-{self.number} finished task {task}')
|
class NoMoreQuestionError(BaseException):
pass
|
import os
from typing import List
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import pandas as pd
import rdkit.RDLogger
from rdkit import Chem
from tqdm.auto import tqdm
from loguru import logger
def normalize_inchi(inchi: str):
try:
mol = Chem.MolFromInchi(inchi)
if mol is not None:
return Chem.MolToInchi(mol)
except Exception:
return None
def _normalize_inchi_batch(inchis: List[str], verbose: bool = True):
results = []
executor = ProcessPoolExecutor(max_workers=1)
if verbose:
logger.info("Start to normalize InChI")
for inchi in tqdm(inchis, disable=not verbose):
try:
results.append(
executor.submit(normalize_inchi, inchi).result()
)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise KeyboardInterrupt()
results.append(None)
executor.shutdown()
executor = ProcessPoolExecutor(max_workers=1)
executor.shutdown()
return pd.Series(results, name="InChI")
def normalize_inchi_batch(
inchis: List[str],
n_workers: int = os.cpu_count(),
verbose: bool = True,
):
if n_workers <= 1:
return _normalize_inchi_batch(inchis, verbose)
groups = np.array_split(inchis, n_workers)
with ProcessPoolExecutor(max_workers=n_workers) as executor:
futures = [
executor.submit(
_normalize_inchi_batch,
inchis=group,
verbose=verbose
)
for group in groups
]
normed_inchis = pd.concat(
[f.result() for f in futures],
ignore_index=True
)
return normed_inchis
def disable_rdlogger():
rdlogger = rdkit.RDLogger.logger()
rdlogger.setLevel(rdkit.RDLogger.ERROR)
rdkit.rdBase.DisableLog('rdApp.error')
|
#!/usr/bin/env python
# encoding: utf-8
import csv
import nltk.data
count = 0
rf = open("report_ONS.csv", "w")
csv_writer = csv.writer(rf)
with open("report_ONN.csv", "r") as f:
csv_reader = csv.reader(f)
for row in csv_reader:
text = row[5]
sent_detector = nltk.data.load("tokenizers/punkt/english.pickle")
sents = sent_detector.tokenize(text.strip())
for sent in sents:
csv_writer.writerow([count, sent])
count += 1
rf.close()
|
# coding=utf-8
import pandas as pd
import sys
import optparse
from lxml import etree
from sklearn import metrics
def main(argv=None):
# parse the input
parser = optparse.OptionParser()
parser.add_option('-g')
parser.add_option('-t')
options, args = parser.parse_args()
gold_file_name = options.g
test_file_name = options.t
# process file with gold markup
gold={}
tree = etree.parse(gold_file_name)
doc = tree.getroot()
itemlist = doc.findall("review")
test_ids = []
for itm in itemlist:
review_id = itm.get("id")
test_ids.append(int(review_id))
terms = itm.find("aspects").findall("aspect")
for xml_term in terms:
if xml_term.get("type")=="explicit" and xml_term.get("mark")=="Rel":
term_identifier = xml_term.get("from")+"_"+xml_term.get("to")
sentiment = xml_term.get("sentiment")
gold[review_id + "_" + term_identifier] = sentiment
# process file with participant markup
test = {}
tree = etree.parse(test_file_name)
doc = tree.getroot()
itemlist = doc.findall("review")
for itm in itemlist:
review_id = int(itm.get("id"))
if review_id in test_ids: #it's test review
terms = itm.find("aspects").findall("aspect")
for xml_term in terms:
if xml_term.get("type")=="explicit" and xml_term.get("mark")=="Rel":
term_identifier = xml_term.get("from")+"_"+xml_term.get("to")
sentiment = xml_term.get("sentiment")
key = str(review_id) + "_" + term_identifier
test[key] = sentiment
actual = []
predicted = []
out2write = ["","id\tactual\tpredicted"]
for key in gold:
if gold[key] == "neutral" or key not in test:
continue
actual.append(gold[key])
predicted.append(test[key])
out2write.append(key + "\t" + gold[key] + "\t" + test[key])
p_micro,r_micro,f_micro,_ = metrics.precision_recall_fscore_support(actual, predicted, average="micro")
p_macro,r_macro,f_macro,_ = metrics.precision_recall_fscore_support(actual, predicted, average="macro")
print("%f\t%f\t%f\t%f\t%f\t%f" % (p_micro,r_micro,f_micro, p_macro,r_macro,f_macro))
result_string = "macro_avg_f1=" + str(f_macro) + "\tmicro_avg_f1="+str(f_micro)
data_frame = pd.DataFrame({"col":[result_string] + out2write})
domain = gold_file_name.split("_")[1]
out_file_name = "eval_В_"+domain+".csv"
data_frame.to_csv(out_file_name, index=False, header=False, encoding="utf-8")
print("see "+out_file_name+" for details")
if __name__ == "__main__":
main(sys.argv[1:])
exit()
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("register", views.register, name="register"),
path("check", views.check, name="check"),
path("login", views.login_view, name="login_view"),
path("logout", views.logout_view, name="logout_view"),
path("add_to_cart", views.add_to_cart, name="add_to_cart"),
path("cart", views.cart, name="cart"),
path("stripe_session", views.stripe_session, name="stripe_session"),
path("checkout", views.checkout, name="checkout"),
path("orders", views.orders, name="orders")
]
|
import requests, base64, json
from flask import request, Response, render_template
def auth_required(method=None, okta=True):
def decorator(f):
def wrapper(*args, **kwargs):
def get_header():
auth_header = request.headers.get("Authorization")
if auth_header and 'Basic' in auth_header:
auth_header = auth_header[6:25]
elif auth_header and 'Bearer' in auth_header:
auth_header = auth_header
elif not auth_header:
auth_header = 'empty'
return auth_header
def basic_auth(username, password):
auth_header = get_header()
creds = username+":"+password
b64_creds = base64.b64encode(creds.encode())
if auth_header != b64_creds.decode():
return False
else:
return True
def oauth2(okta):
jwt = get_header()
if 'empty' not in jwt:
if okta:
is_valid = okta_jwt_remote_validator(jwt)
else:
##
## Validator for any other provider will be configured. For now, returning True as default.
##
is_valid = True
else:
is_valid = False
return is_valid
def okta_jwt_remote_validator(jwt):
'''
These lines are commented out for now.
This will be improved to use ENV Variables.
client_id = "0oa13mjq7j9jacY8M357"
client_secret = "HmQiJTBhJe46Ezk1nzapp138_8NbNI7aZcZpvJUk"
creds = client_id + ":" + client_secret
b64_creds = base64.b64encode(creds.encode()).decode()
auth_header = "Basic " + b64_creds
headers = {"Authorization":auth_header}
token = str(jwt[7:(len(jwt))])
body = {"token":token}
response = requests.post("https://adrian.okta.com/oauth2/ausa8dtz9H5QTLpmC356/v1/introspect", data=body, headers=headers)
response_json = response.json()
active = response_json["active"]
'''
active = True
return active
if method == 'basic_auth':
auth = basic_auth("user", "p@ss")
if auth == True:
return f(*args, **kwargs)
else:
return Response(render_template("403.html"), status=403, mimetype="text/html")
if method == 'oauth2':
auth = oauth2(okta)
if auth == True:
return f(*args, **kwargs)
else:
return Response(render_template("403.html"), status=403, mimetype="text/html")
wrapper.__name__ = f.__name__
return wrapper
return decorator
|
from pgcopy import CopyManager
from . import test_datatypes
class TestPublicSchema(test_datatypes.TypeMixin):
temp = ''
datatypes = ['integer', 'bool', 'varchar(12)']
def temp_schema_name(self):
# This will set self.schema_table correctly, so that
# TypeMixin.test_type will instantiate CopyManager
# with public schema specified explicitly
return "public"
def test_default_public(self):
# Use public schema by default
bincopy = CopyManager(self.conn, self.table, self.cols)
bincopy.copy(self.data)
select_list = ','.join(self.cols)
self.cur.execute("SELECT %s from %s" % (select_list, self.table))
self.checkResults()
def cast(self, v):
if isinstance(v, str):
return v.encode()
return v
|
# -*- coding: utf-8 -*-
"""
Test parsing of 'simple' offsets
"""
from __future__ import unicode_literals
import time
import datetime
import unittest
import parsedatetime as pdt
from . import utils
class test(unittest.TestCase):
@utils.assertEqualWithComparator
def assertExpectedResult(self, result, check, **kwargs):
return utils.compareResultByTimeTuplesAndFlags(result, check, **kwargs)
def setUp(self):
self.cal = pdt.Calendar()
(self.yr, self.mth, self.dy, self.hr,
self.mn, self.sec, self.wd, self.yd, self.isdst) = time.localtime()
def testOffsetAfterNoon(self):
s = datetime.datetime(self.yr, self.mth, self.dy, 10, 0, 0)
t = datetime.datetime(
self.yr, self.mth, self.dy, 12, 0, 0) + datetime.timedelta(hours=5)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(
self.cal.parse('5 hours after 12pm', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('five hours after 12pm', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('5 hours after 12 pm', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('5 hours after 12:00pm', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('5 hours after 12:00 pm', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('5 hours after noon', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('5 hours from noon', start), (target, 2))
def testOffsetBeforeNoon(self):
s = datetime.datetime.now()
t = (datetime.datetime(self.yr, self.mth, self.dy, 12, 0, 0) +
datetime.timedelta(hours=-5))
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(
self.cal.parse('5 hours before noon', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('5 hours before 12pm', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('five hours before 12pm', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('5 hours before 12 pm', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('5 hours before 12:00pm', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('5 hours before 12:00 pm', start), (target, 2))
def testOffsetBeforeModifiedNoon(self):
# A contrived test of two modifiers applied to noon - offset by
# -5 from the following day (-5 + 24)
s = datetime.datetime.now()
t = (datetime.datetime(self.yr, self.mth, self.dy, 12, 0, 0) +
datetime.timedelta(hours=-5 + 24))
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(
self.cal.parse('5 hours before next noon', start), (target, 2))
if __name__ == "__main__":
unittest.main()
|
pi=3.14
raio=5
area=pi*raio
print(area)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# copyright [2013] [Vitalii Lebedynskyi]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.request
import urllib.parse
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def do_get(url):
if not url:
raise ValueError("url cannot be empty")
stream = urllib.request.urlopen(url)
return stream.read().decode('utf-8')
def download_file(url, file):
if not url:
raise ValueError("url cannot be empty")
stream = urllib.request.urlopen(url)
with open(file, 'wb') as output:
output.write(stream.read())
return True
def get_random_headers():
return {"Content-Encoding": "UTF-8", "Accept-Charset": "UTF-8"}
|
from tkinter import *
import sqlite3
root = Tk()
root.title('Using Databases')
root.geometry('300x300')
# Creating or Connecting database
conn = sqlite3.connect('address_book.db')
# Creating cursor
c = conn.cursor()
c.execute("""CREATE TABLE address(
first_name text,
last_name text,
address text,
city text,
state text,
zip_code integer
)
""")
# Commit changes
conn.commit()
# Close connection
conn.close()
root.mainloop()
|
import logging
from functools import singledispatchmethod
from tinkoff.invest.services import Services
from tinkoff.invest.strategies.base.errors import UnknownSignal
from tinkoff.invest.strategies.base.signal import (
CloseLongMarketOrder,
CloseShortMarketOrder,
OpenLongMarketOrder,
OpenShortMarketOrder,
Signal,
)
from tinkoff.invest.strategies.base.signal_executor_base import SignalExecutor
from tinkoff.invest.strategies.moving_average.strategy_settings import (
MovingAverageStrategySettings,
)
from tinkoff.invest.strategies.moving_average.strategy_state import (
MovingAverageStrategyState,
)
logger = logging.getLogger(__name__)
class MovingAverageSignalExecutor(SignalExecutor):
def __init__(
self,
services: Services,
state: MovingAverageStrategyState,
settings: MovingAverageStrategySettings,
):
super().__init__(services, settings)
self._services = services
self._state = state
@singledispatchmethod
def execute(self, signal: Signal) -> None:
raise UnknownSignal()
@execute.register
def _execute_open_long_market_order(self, signal: OpenLongMarketOrder) -> None:
self.execute_open_long_market_order(signal)
self._state.long_open = True
self._state.position = signal.lots
logger.info("Signal executed %s", signal)
@execute.register
def _execute_close_long_market_order(self, signal: CloseLongMarketOrder) -> None:
self.execute_close_long_market_order(signal)
self._state.long_open = False
self._state.position = 0
logger.info("Signal executed %s", signal)
@execute.register
def _execute_open_short_market_order(self, signal: OpenShortMarketOrder) -> None:
self.execute_open_short_market_order(signal)
self._state.short_open = True
self._state.position = signal.lots
logger.info("Signal executed %s", signal)
@execute.register
def _execute_close_short_market_order(self, signal: CloseShortMarketOrder) -> None:
self.execute_close_short_market_order(signal)
self._state.short_open = False
self._state.position = 0
logger.info("Signal executed %s", signal)
|
import numpy as np
import os.path
import refnx, scipy
# the ReflectDataset object will contain the data
from refnx.dataset import ReflectDataset
# the reflect module contains functionality relevant to reflectometry
from refnx.reflect import ReflectModel
# the analysis module contains the curvefitting engine
from refnx.analysis import Objective, Transform, CurveFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
sns.set(palette="colorblind")
mpl.rcParams["xtick.labelsize"] = 10
mpl.rcParams["ytick.labelsize"] = 10
mpl.rcParams["axes.facecolor"] = "w"
mpl.rcParams["lines.linewidth"] = 2
mpl.rcParams["xtick.top"] = False
mpl.rcParams["xtick.bottom"] = True
mpl.rcParams["ytick.left"] = True
mpl.rcParams["grid.linestyle"] = "--"
mpl.rcParams["legend.fontsize"] = 10
mpl.rcParams["legend.facecolor"] = [1, 1, 1]
mpl.rcParams["legend.framealpha"] = 0.75
mpl.rcParams["axes.labelsize"] = 10
mpl.rcParams["axes.linewidth"] = 1
mpl.rcParams["axes.edgecolor"] = "k"
mpl.rcParams["axes.titlesize"] = 10
import sys
import sim_lengths as sl
import md_simulation as md
forcefield = sys.argv[1]
surface_pressure = sys.argv[2]
lt = float(sys.argv[3])
rough = float(sys.argv[4])
traj_dir = '../data/reflectometry2/dspc_{}/{}'.format(surface_pressure, forcefield)
anal_dir = "../output/reflectometry2/dspc_{}/".format(surface_pressure)
print('{} {}'.format(forcefield, surface_pressure))
head = ['D', 'D', 'H', 'H', 'H', 'D', 'D']
tail = ['H', 'H', 'H', 'D', 'D', 'D', 'D']
sol = ['acmw', 'D', 'D', 'acmw', 'D', 'acmw', 'D']
contrasts = ['d13acmw', 'd13d2o', 'hd2o', 'd70acmw', 'd70d2o', 'd83acmw', 'd83d2o']
fig = plt.figure(figsize=(4.13, 3.51*1.3))
gs = mpl.gridspec.GridSpec(1, 3)
ax1 = plt.subplot(gs[0, 0:2])
ax2 = plt.subplot(gs[0, 2])
all_chi = np.array([])
abc = {'trad': '(a)', 'slipids': '(b)', 'berger': '(c)', 'martini': '(d)'}
for ci, contrast in enumerate(contrasts):
for k in range(0, len(contrasts)):
if contrasts[k] == contrast:
break
models = []
datasets = []
structures = []
lgts = sl.get_lgts(head[k], tail[k], sol[k], forcefield)
l = np.array([])
timesteps = 0
for i in range(1, 2):
print('frame{}'.format(i))
try:
del sim
except:
pass
if forcefield == 'martini':
co = 30
else:
co = 15
sim = md.MDSimulation(traj_dir + '_frame{}.pdb'.format(i), flip=True,
verbose=True, layer_thickness=lt, roughness=rough)
sim.assign_scattering_lengths('neutron', atom_types=lgts[0], scattering_lengths=lgts[1])
sim.run()
layers_to_cut = int(co / lt) + 1
timesteps += sim.layers.shape[0]
l = np.append(l, sim.layers[:, :-layers_to_cut, :])
n = l.reshape(timesteps, sim.layers.shape[1]-layers_to_cut, sim.layers.shape[2])
data_dir = '../data/reflectometry2/dspc_{}/'.format(surface_pressure)
dataset = ReflectDataset(os.path.join(data_dir, '{}{}.dat'.format(contrast, surface_pressure)))
refy = np.zeros((n.shape[0], dataset.x.size))
sldy = []
chi = np.zeros((n.shape[0]))
print(n.shape[0])
for i in range(n.shape[0]):
sim.av_layers = n[i, :, :]
model = ReflectModel(sim)
model.scale.setp(1, vary=True, bounds=(0.00000001, np.inf))
model.bkg.setp(dataset.y[-1], vary=True, bounds=(dataset.y[-1]*0.9, dataset.y[-1]*1.1))
objective = Objective(model, dataset, transform=Transform('YX4'))
fitter = CurveFitter(objective)
res = fitter.fit()
refy[i] = model(dataset.x, x_err=dataset.x_err)*(dataset.x)**4
sldy.append(sim.sld_profile()[1])
chi[i] = objective.chisqr()
all_chi = np.append(all_chi, objective.chisqr())
if i == 0:
ax1.errorbar(dataset.x,
dataset.y*(dataset.x)**4 * 10**(ci-1),
yerr=dataset.y_err*(
dataset.x)**4 * 10**(ci-1),
linestyle='', marker='o',
color=sns.color_palette()[ci])
ax1.plot(dataset.x,
model(dataset.x,
x_err=dataset.x_err)*(
dataset.x)**4 * 10**(ci-1),
color=sns.color_palette()[ci], alpha=0.1)
zs, sld = sim.sld_profile()
if zs.min() > -20:
x2 = np.linspace(-20, zs.min(), 100)
zs = np.append(x2, zs)
y2 = np.zeros_like(x2)
sld = np.append(y2, sld)
if zs.max() < 80:
x3 = np.linspace(zs.max(), 81, 100)
y3 = np.ones_like(x3) * sld[-1]
zs = np.append(zs, x3)
sld = np.append(sld, y3)
ax2.plot(zs, sld + ci*10, color=sns.color_palette()[ci],
alpha=0.1)
ax2.set_xlim([-20, 80])
ax1.plot(dataset.x,
np.average(refy, axis=0) * 10**(ci-1),
color='k', zorder=10)
file_open = open('{}dspc_{}_{}_{}_chi_short.txt'.format(anal_dir, forcefield, surface_pressure, contrast), 'w')
file_open.write('{:.2f}'.format(np.average(chi)))
file_open.close()
print(contrast)
file_open = open('{}dspc_{}_{}_all_chi_short.txt'.format(anal_dir, forcefield, surface_pressure), 'w')
file_open.write('${:.2f}\\pm{:.2f}$'.format(np.average(all_chi), np.std(all_chi)))
file_open.close()
ax1.set_ylabel(r'$Rq^4$/Å$^{-4}$')
ax1.set_yscale('log')
ax1.set_xlabel(r'$q$/Å$^{-1}$')
ax2.set_xlabel(r'$z$/Å')
ax2.set_ylabel(r'SLD/$10^{-6}$Å$^{-2}$')
plt.tight_layout()
fig_dir = "../reports/figures/reflectometry2/"
plt.savefig('{}dspc_{}_{}_ref_sld_short.pdf'.format(fig_dir, forcefield, surface_pressure), bbox_inches='tight', pad_inches=0.1)
plt.close()
|
from pyautogui import press, pixelMatchesColor
flag = 0
while True:
if pixelMatchesColor(889, 393, (161, 116, 56), 1):
flag = 1
elif pixelMatchesColor(1026, 391, (161, 116, 56), 1):
flag =0
if flag:
press("right")
else:
press("left")
##you have to install pyautogui
##Plus you might have to configure according to your screen I mean the coordinates
##889, 393 is for the left branch wood
##1026 , 391 is for the right branch wood
##(211, 247, 255) Sky
##(161, 116, 56) Wood
##Score totally depends upon you system
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from enum import Enum
from lxml import etree
import sys
import os
import codecs
import re
import spacy
import pickle
import json
import networkx as nx
import TermPreprocessor2 as tprep
import OntologyOps as ontutils
import NlpUtils.NounPhraseMerger as npm
import NlpUtils.ExclusionPhraseMerger as epm
import ExclusionDetector as eh
from networkx.readwrite import json_graph
import time
nlp = spacy.load('/ext/NlpFinalModel')
TRAINING_DATA = [
u'We may give your personal information to advertisers',
u'We may give your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may give advertisers your personal information',
u'We may give advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may give to advertisers your personal information',
u'We may give to advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be given to advertisers',
u'your personal information, demographic information, and financial information may be given to advertisers, analytics providers, and our business partners',
u'your personal information may be given by advertisers',
u'your personal information, demographic information, and financial information may be given by advertisers, analytics providers, and our business partners',
u'We may choose to give your personal information to advertisers',
u'We may choose to give your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'You may be required by us to give your personal information to advertisers',
u'You may be required by us to give your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We are requiring you to give your personal information to advertisers',
u'We are requiring you to give your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We require giving your personal information to advertisers',
u'We require giving your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may give advertisers with your personal information',
u'We may give advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may distribute your personal information',
u'advertisers, analytics providers, and our business partners may distribute your personal information, demographic information, and financial information',
u'We may distribute your personal information to advertisers',
u'We may distribute your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may distribute advertisers your personal information',
u'We may distribute advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may distribute to advertisers your personal information',
u'We may distribute to advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be distributed to advertisers',
u'your personal information, demographic information, and financial information may be distributed to advertisers, analytics providers, and our business partners',
u'your personal information may be distributed',
u'your personal information, demographic information, and financial information may be distributed',
u'your personal information may be distributed by advertisers',
u'your personal information, demographic information, and financial information may be distributed by advertisers, analytics providers, and our business partners',
u'We may choose to distribute your personal information',
u'We may choose to distribute your personal information, demographic information, and financial information',
u'We may choose to distribute your personal information to advertisers',
u'We may choose to distribute your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'You may be required by us to distribute your personal information',
u'You may be required by us to distribute your personal information, demographic information, and financial information',
u'You may be required by us to distribute your personal information to advertisers',
u'You may be required by us to distribute your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We are requiring you to distribute your personal information',
u'We are requiring you to distribute your personal information, demographic information, and financial information',
u'We are requiring you to distribute your personal information to advertisers',
u'We are requiring you to distribute your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We require distributing your personal information',
u'We require distributing your personal information, demographic information, and financial information',
u'We require distributing your personal information to advertisers',
u'We require distributing your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may distribute advertisers with your personal information',
u'We may distribute advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may share your personal information',
u'advertisers, analytics providers, and our business partners may share your personal information, demographic information, and financial information',
u'We may share your personal information with advertisers',
u'We may share your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'We may share advertisers your personal information',
u'We may share advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may share with advertisers your personal information',
u'We may share with advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be shared with advertisers',
u'your personal information, demographic information, and financial information may be shared with advertisers, analytics providers, and our business partners',
u'your personal information may be shared',
u'your personal information, demographic information, and financial information may be shared',
u'your personal information may be shared by advertisers',
u'your personal information, demographic information, and financial information may be shared by advertisers, analytics providers, and our business partners',
u'We may choose to share your personal information',
u'We may choose to share your personal information, demographic information, and financial information',
u'We may choose to share your personal information with advertisers',
u'We may choose to share your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'You may be required by us to share your personal information',
u'You may be required by us to share your personal information, demographic information, and financial information',
u'You may be required by us to share your personal information with advertisers',
u'You may be required by us to share your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'We are requiring you to share your personal information',
u'We are requiring you to share your personal information, demographic information, and financial information',
u'We are requiring you to share your personal information with advertisers',
u'We are requiring you to share your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'We require sharing your personal information',
u'We require sharing your personal information, demographic information, and financial information',
u'We require sharing your personal information with advertisers',
u'We require sharing your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'We may share advertisers with your personal information',
u'We may share advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may obtain your personal information',
u'advertisers, analytics providers, and our business partners may obtain your personal information, demographic information, and financial information',
u'We may obtain advertisers your personal information',
u'We may obtain advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be obtained',
u'your personal information, demographic information, and financial information may be obtained',
u'your personal information may be obtained by advertisers',
u'your personal information, demographic information, and financial information may be obtained by advertisers, analytics providers, and our business partners',
u'We may choose to obtain your personal information',
u'We may choose to obtain your personal information, demographic information, and financial information',
u'You may be required by us to obtain your personal information',
u'You may be required by us to obtain your personal information, demographic information, and financial information',
u'We are requiring you to obtain your personal information',
u'We are requiring you to obtain your personal information, demographic information, and financial information',
u'We require obtaining your personal information',
u'We require obtaining your personal information, demographic information, and financial information',
u'We may obtain advertisers with your personal information',
u'We may obtain advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may trade your personal information',
u'advertisers, analytics providers, and our business partners may trade your personal information, demographic information, and financial information',
u'We may trade your personal information with advertisers',
u'We may trade your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'We may trade advertisers your personal information',
u'We may trade advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may trade with advertisers your personal information',
u'We may trade with advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be traded with advertisers',
u'your personal information, demographic information, and financial information may be traded with advertisers, analytics providers, and our business partners',
u'your personal information may be traded',
u'your personal information, demographic information, and financial information may be traded',
u'your personal information may be traded by advertisers',
u'your personal information, demographic information, and financial information may be traded by advertisers, analytics providers, and our business partners',
u'We may choose to trade your personal information',
u'We may choose to trade your personal information, demographic information, and financial information',
u'We may choose to trade your personal information with advertisers',
u'We may choose to trade your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'You may be required by us to trade your personal information',
u'You may be required by us to trade your personal information, demographic information, and financial information',
u'You may be required by us to trade your personal information with advertisers',
u'You may be required by us to trade your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'We are requiring you to trade your personal information',
u'We are requiring you to trade your personal information, demographic information, and financial information',
u'We are requiring you to trade your personal information with advertisers',
u'We are requiring you to trade your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'We require trading your personal information',
u'We require trading your personal information, demographic information, and financial information',
u'We require trading your personal information with advertisers',
u'We require trading your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'We may trade advertisers with your personal information',
u'We may trade advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may rent your personal information',
u'advertisers, analytics providers, and our business partners may rent your personal information, demographic information, and financial information',
u'We may rent your personal information to advertisers',
u'We may rent your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may rent advertisers your personal information',
u'We may rent advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may rent to advertisers your personal information',
u'We may rent to advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be rented to advertisers',
u'your personal information, demographic information, and financial information may be rented to advertisers, analytics providers, and our business partners',
u'your personal information may be rented',
u'your personal information, demographic information, and financial information may be rented',
u'your personal information may be rented by advertisers',
u'your personal information, demographic information, and financial information may be rented by advertisers, analytics providers, and our business partners',
u'We may choose to rent your personal information',
u'We may choose to rent your personal information, demographic information, and financial information',
u'We may choose to rent your personal information to advertisers',
u'We may choose to rent your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'You may be required by us to rent your personal information',
u'You may be required by us to rent your personal information, demographic information, and financial information',
u'You may be required by us to rent your personal information to advertisers',
u'You may be required by us to rent your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We are requiring you to rent your personal information',
u'We are requiring you to rent your personal information, demographic information, and financial information',
u'We are requiring you to rent your personal information to advertisers',
u'We are requiring you to rent your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We require renting your personal information',
u'We require renting your personal information, demographic information, and financial information',
u'We require renting your personal information to advertisers',
u'We require renting your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may rent advertisers with your personal information',
u'We may rent advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may check your personal information',
u'advertisers, analytics providers, and our business partners may check your personal information, demographic information, and financial information',
u'We may check advertisers your personal information',
u'We may check advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be checked',
u'your personal information, demographic information, and financial information may be checked',
u'your personal information may be checked by advertisers',
u'your personal information, demographic information, and financial information may be checked by advertisers, analytics providers, and our business partners',
u'We may choose to check your personal information',
u'We may choose to check your personal information, demographic information, and financial information',
u'You may be required by us to check your personal information',
u'You may be required by us to check your personal information, demographic information, and financial information',
u'We are requiring you to check your personal information',
u'We are requiring you to check your personal information, demographic information, and financial information',
u'We require checking your personal information',
u'We require checking your personal information, demographic information, and financial information',
u'We may check advertisers with your personal information',
u'We may check advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may sell your personal information',
u'advertisers, analytics providers, and our business partners may sell your personal information, demographic information, and financial information',
u'We may sell your personal information to advertisers',
u'We may sell your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may sell advertisers your personal information',
u'We may sell advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may sell to advertisers your personal information',
u'We may sell to advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be sold to advertisers',
u'your personal information, demographic information, and financial information may be sold to advertisers, analytics providers, and our business partners',
u'your personal information may be sold',
u'your personal information, demographic information, and financial information may be sold',
u'your personal information may be sold by advertisers',
u'your personal information, demographic information, and financial information may be sold by advertisers, analytics providers, and our business partners',
u'We may choose to sell your personal information',
u'We may choose to sell your personal information, demographic information, and financial information',
u'We may choose to sell your personal information to advertisers',
u'We may choose to sell your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'You may be required by us to sell your personal information',
u'You may be required by us to sell your personal information, demographic information, and financial information',
u'You may be required by us to sell your personal information to advertisers',
u'You may be required by us to sell your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We are requiring you to sell your personal information',
u'We are requiring you to sell your personal information, demographic information, and financial information',
u'We are requiring you to sell your personal information to advertisers',
u'We are requiring you to sell your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We require selling your personal information',
u'We require selling your personal information, demographic information, and financial information',
u'We require selling your personal information to advertisers',
u'We require selling your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may sell advertisers with your personal information',
u'We may sell advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may use your personal information',
u'advertisers, analytics providers, and our business partners may use your personal information, demographic information, and financial information',
u'We may use advertisers your personal information',
u'We may use advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be used',
u'your personal information, demographic information, and financial information may be used',
u'your personal information may be used by advertisers',
u'your personal information, demographic information, and financial information may be used by advertisers, analytics providers, and our business partners',
u'We may choose to use your personal information',
u'We may choose to use your personal information, demographic information, and financial information',
u'You may be required by us to use your personal information',
u'You may be required by us to use your personal information, demographic information, and financial information',
u'We are requiring you to use your personal information',
u'We are requiring you to use your personal information, demographic information, and financial information',
u'We require using your personal information',
u'We require using your personal information, demographic information, and financial information',
u'We may use advertisers with your personal information',
u'We may use advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'We may provide your personal information to advertisers',
u'We may provide your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may provide advertisers your personal information',
u'We may provide advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may provide to advertisers your personal information',
u'We may provide to advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be provided to advertisers',
u'your personal information, demographic information, and financial information may be provided to advertisers, analytics providers, and our business partners',
u'your personal information may be provided by advertisers',
u'your personal information, demographic information, and financial information may be provided by advertisers, analytics providers, and our business partners',
u'We may choose to provide your personal information to advertisers',
u'We may choose to provide your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'You may be required by us to provide your personal information to advertisers',
u'You may be required by us to provide your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We are requiring you to provide your personal information to advertisers',
u'We are requiring you to provide your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We require providing your personal information to advertisers',
u'We require providing your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may provide advertisers with your personal information',
u'We may provide advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may transfer your personal information',
u'advertisers, analytics providers, and our business partners may transfer your personal information, demographic information, and financial information',
u'We may transfer your personal information to advertisers',
u'We may transfer your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may transfer advertisers your personal information',
u'We may transfer advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may transfer to advertisers your personal information',
u'We may transfer to advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be transferred to advertisers',
u'your personal information, demographic information, and financial information may be transferred to advertisers, analytics providers, and our business partners',
u'your personal information may be transferred',
u'your personal information, demographic information, and financial information may be transferred',
u'your personal information may be transferred by advertisers',
u'your personal information, demographic information, and financial information may be transferred by advertisers, analytics providers, and our business partners',
u'We may choose to transfer your personal information',
u'We may choose to transfer your personal information, demographic information, and financial information',
u'We may choose to transfer your personal information to advertisers',
u'We may choose to transfer your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'You may be required by us to transfer your personal information',
u'You may be required by us to transfer your personal information, demographic information, and financial information',
u'You may be required by us to transfer your personal information to advertisers',
u'You may be required by us to transfer your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We are requiring you to transfer your personal information',
u'We are requiring you to transfer your personal information, demographic information, and financial information',
u'We are requiring you to transfer your personal information to advertisers',
u'We are requiring you to transfer your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We require transfering your personal information',
u'We require transfering your personal information, demographic information, and financial information',
u'We require transfering your personal information to advertisers',
u'We require transfering your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may transfer advertisers with your personal information',
u'We may transfer advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'We may send your personal information to advertisers',
u'We may send your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may send advertisers your personal information',
u'We may send advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may send to advertisers your personal information',
u'We may send to advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be sent to advertisers',
u'your personal information, demographic information, and financial information may be sent to advertisers, analytics providers, and our business partners',
u'your personal information may be sent by advertisers',
u'your personal information, demographic information, and financial information may be sent by advertisers, analytics providers, and our business partners',
u'We may choose to send your personal information to advertisers',
u'We may choose to send your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'You may be required by us to send your personal information to advertisers',
u'You may be required by us to send your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We are requiring you to send your personal information to advertisers',
u'We are requiring you to send your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We require sending your personal information to advertisers',
u'We require sending your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may send advertisers with your personal information',
u'We may send advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may access your personal information',
u'advertisers, analytics providers, and our business partners may access your personal information, demographic information, and financial information',
u'We may access advertisers your personal information',
u'We may access advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be accessed',
u'your personal information, demographic information, and financial information may be accessed',
u'your personal information may be accessed by advertisers',
u'your personal information, demographic information, and financial information may be accessed by advertisers, analytics providers, and our business partners',
u'We may choose to access your personal information',
u'We may choose to access your personal information, demographic information, and financial information',
u'You may be required by us to access your personal information',
u'You may be required by us to access your personal information, demographic information, and financial information',
u'We are requiring you to access your personal information',
u'We are requiring you to access your personal information, demographic information, and financial information',
u'We require accessing your personal information',
u'We require accessing your personal information, demographic information, and financial information',
u'We may access advertisers with your personal information',
u'We may access advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may transmit your personal information',
u'advertisers, analytics providers, and our business partners may transmit your personal information, demographic information, and financial information',
u'We may transmit your personal information to advertisers',
u'We may transmit your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may transmit advertisers your personal information',
u'We may transmit advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may transmit to advertisers your personal information',
u'We may transmit to advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be transmitted to advertisers',
u'your personal information, demographic information, and financial information may be transmitted to advertisers, analytics providers, and our business partners',
u'your personal information may be transmitted',
u'your personal information, demographic information, and financial information may be transmitted',
u'your personal information may be transmitted by advertisers',
u'your personal information, demographic information, and financial information may be transmitted by advertisers, analytics providers, and our business partners',
u'We may choose to transmit your personal information',
u'We may choose to transmit your personal information, demographic information, and financial information',
u'We may choose to transmit your personal information to advertisers',
u'We may choose to transmit your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'You may be required by us to transmit your personal information',
u'You may be required by us to transmit your personal information, demographic information, and financial information',
u'You may be required by us to transmit your personal information to advertisers',
u'You may be required by us to transmit your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We are requiring you to transmit your personal information',
u'We are requiring you to transmit your personal information, demographic information, and financial information',
u'We are requiring you to transmit your personal information to advertisers',
u'We are requiring you to transmit your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We require transmitting your personal information',
u'We require transmitting your personal information, demographic information, and financial information',
u'We require transmitting your personal information to advertisers',
u'We require transmitting your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may transmit advertisers with your personal information',
u'We may transmit advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may save your personal information',
u'advertisers, analytics providers, and our business partners may save your personal information, demographic information, and financial information',
u'We may save advertisers your personal information',
u'We may save advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be saved',
u'your personal information, demographic information, and financial information may be saved',
u'your personal information may be saved by advertisers',
u'your personal information, demographic information, and financial information may be saved by advertisers, analytics providers, and our business partners',
u'We may choose to save your personal information',
u'We may choose to save your personal information, demographic information, and financial information',
u'You may be required by us to save your personal information',
u'You may be required by us to save your personal information, demographic information, and financial information',
u'We are requiring you to save your personal information',
u'We are requiring you to save your personal information, demographic information, and financial information',
u'We require saving your personal information',
u'We require saving your personal information, demographic information, and financial information',
u'We may save advertisers with your personal information',
u'We may save advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may store your personal information',
u'advertisers, analytics providers, and our business partners may store your personal information, demographic information, and financial information',
u'We may store advertisers your personal information',
u'We may store advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be stored',
u'your personal information, demographic information, and financial information may be stored',
u'your personal information may be stored by advertisers',
u'your personal information, demographic information, and financial information may be stored by advertisers, analytics providers, and our business partners',
u'We may choose to store your personal information',
u'We may choose to store your personal information, demographic information, and financial information',
u'You may be required by us to store your personal information',
u'You may be required by us to store your personal information, demographic information, and financial information',
u'We are requiring you to store your personal information',
u'We are requiring you to store your personal information, demographic information, and financial information',
u'We require storing your personal information',
u'We require storing your personal information, demographic information, and financial information',
u'We may store advertisers with your personal information',
u'We may store advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may disclose your personal information',
u'advertisers, analytics providers, and our business partners may disclose your personal information, demographic information, and financial information',
u'We may disclose your personal information to advertisers',
u'We may disclose your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may disclose advertisers your personal information',
u'We may disclose advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may disclose to advertisers your personal information',
u'We may disclose to advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be disclosed to advertisers',
u'your personal information, demographic information, and financial information may be disclosed to advertisers, analytics providers, and our business partners',
u'your personal information may be disclosed',
u'your personal information, demographic information, and financial information may be disclosed',
u'your personal information may be disclosed by advertisers',
u'your personal information, demographic information, and financial information may be disclosed by advertisers, analytics providers, and our business partners',
u'We may choose to disclose your personal information',
u'We may choose to disclose your personal information, demographic information, and financial information',
u'We may choose to disclose your personal information to advertisers',
u'We may choose to disclose your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'You may be required by us to disclose your personal information',
u'You may be required by us to disclose your personal information, demographic information, and financial information',
u'You may be required by us to disclose your personal information to advertisers',
u'You may be required by us to disclose your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We are requiring you to disclose your personal information',
u'We are requiring you to disclose your personal information, demographic information, and financial information',
u'We are requiring you to disclose your personal information to advertisers',
u'We are requiring you to disclose your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We require disclosing your personal information',
u'We require disclosing your personal information, demographic information, and financial information',
u'We require disclosing your personal information to advertisers',
u'We require disclosing your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may disclose advertisers with your personal information',
u'We may disclose advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may exchange your personal information',
u'advertisers, analytics providers, and our business partners may exchange your personal information, demographic information, and financial information',
u'We may exchange your personal information with advertisers',
u'We may exchange your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'We may exchange advertisers your personal information',
u'We may exchange advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may exchange with advertisers your personal information',
u'We may exchange with advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be exchanged with advertisers',
u'your personal information, demographic information, and financial information may be exchanged with advertisers, analytics providers, and our business partners',
u'your personal information may be exchanged',
u'your personal information, demographic information, and financial information may be exchanged',
u'your personal information may be exchanged by advertisers',
u'your personal information, demographic information, and financial information may be exchanged by advertisers, analytics providers, and our business partners',
u'We may choose to exchange your personal information',
u'We may choose to exchange your personal information, demographic information, and financial information',
u'We may choose to exchange your personal information with advertisers',
u'We may choose to exchange your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'You may be required by us to exchange your personal information',
u'You may be required by us to exchange your personal information, demographic information, and financial information',
u'You may be required by us to exchange your personal information with advertisers',
u'You may be required by us to exchange your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'We are requiring you to exchange your personal information',
u'We are requiring you to exchange your personal information, demographic information, and financial information',
u'We are requiring you to exchange your personal information with advertisers',
u'We are requiring you to exchange your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'We require exchanging your personal information',
u'We require exchanging your personal information, demographic information, and financial information',
u'We require exchanging your personal information with advertisers',
u'We require exchanging your personal information, demographic information, and financial information with advertisers, analytics providers, and our business partners',
u'We may exchange advertisers with your personal information',
u'We may exchange advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may know your personal information',
u'advertisers, analytics providers, and our business partners may know your personal information, demographic information, and financial information',
u'We may know advertisers your personal information',
u'We may know advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be knew',
u'your personal information, demographic information, and financial information may be knew',
u'your personal information may be knew by advertisers',
u'your personal information, demographic information, and financial information may be knew by advertisers, analytics providers, and our business partners',
u'We may choose to know your personal information',
u'We may choose to know your personal information, demographic information, and financial information',
u'You may be required by us to know your personal information',
u'You may be required by us to know your personal information, demographic information, and financial information',
u'We are requiring you to know your personal information',
u'We are requiring you to know your personal information, demographic information, and financial information',
u'We require knowing your personal information',
u'We require knowing your personal information, demographic information, and financial information',
u'We may know advertisers with your personal information',
u'We may know advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may report your personal information',
u'advertisers, analytics providers, and our business partners may report your personal information, demographic information, and financial information',
u'We may report your personal information to advertisers',
u'We may report your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may report advertisers your personal information',
u'We may report advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'We may report to advertisers your personal information',
u'We may report to advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be reported to advertisers',
u'your personal information, demographic information, and financial information may be reported to advertisers, analytics providers, and our business partners',
u'your personal information may be reported',
u'your personal information, demographic information, and financial information may be reported',
u'your personal information may be reported by advertisers',
u'your personal information, demographic information, and financial information may be reported by advertisers, analytics providers, and our business partners',
u'We may choose to report your personal information',
u'We may choose to report your personal information, demographic information, and financial information',
u'We may choose to report your personal information to advertisers',
u'We may choose to report your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'You may be required by us to report your personal information',
u'You may be required by us to report your personal information, demographic information, and financial information',
u'You may be required by us to report your personal information to advertisers',
u'You may be required by us to report your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We are requiring you to report your personal information',
u'We are requiring you to report your personal information, demographic information, and financial information',
u'We are requiring you to report your personal information to advertisers',
u'We are requiring you to report your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We require reporting your personal information',
u'We require reporting your personal information, demographic information, and financial information',
u'We require reporting your personal information to advertisers',
u'We require reporting your personal information, demographic information, and financial information to advertisers, analytics providers, and our business partners',
u'We may report advertisers with your personal information',
u'We may report advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may receive your personal information',
u'advertisers, analytics providers, and our business partners may receive your personal information, demographic information, and financial information',
u'We may receive advertisers your personal information',
u'We may receive advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be received',
u'your personal information, demographic information, and financial information may be received',
u'your personal information may be received by advertisers',
u'your personal information, demographic information, and financial information may be received by advertisers, analytics providers, and our business partners',
u'We may choose to receive your personal information',
u'We may choose to receive your personal information, demographic information, and financial information',
u'You may be required by us to receive your personal information',
u'You may be required by us to receive your personal information, demographic information, and financial information',
u'We are requiring you to receive your personal information',
u'We are requiring you to receive your personal information, demographic information, and financial information',
u'We require receiving your personal information',
u'We require receiving your personal information, demographic information, and financial information',
u'We may receive advertisers with your personal information',
u'We may receive advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may gather your personal information',
u'advertisers, analytics providers, and our business partners may gather your personal information, demographic information, and financial information',
u'We may gather advertisers your personal information',
u'We may gather advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be gathered',
u'your personal information, demographic information, and financial information may be gathered',
u'your personal information may be gathered by advertisers',
u'your personal information, demographic information, and financial information may be gathered by advertisers, analytics providers, and our business partners',
u'We may choose to gather your personal information',
u'We may choose to gather your personal information, demographic information, and financial information',
u'You may be required by us to gather your personal information',
u'You may be required by us to gather your personal information, demographic information, and financial information',
u'We are requiring you to gather your personal information',
u'We are requiring you to gather your personal information, demographic information, and financial information',
u'We require gathering your personal information',
u'We require gathering your personal information, demographic information, and financial information',
u'We may gather advertisers with your personal information',
u'We may gather advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
u'advertisers may collect your personal information',
u'advertisers, analytics providers, and our business partners may collect your personal information, demographic information, and financial information',
u'We may collect advertisers your personal information',
u'We may collect advertisers, analytics providers, and our business partners your personal information, demographic information, and financial information',
u'your personal information may be collected',
u'your personal information, demographic information, and financial information may be collected',
u'your personal information may be collected by advertisers',
u'your personal information, demographic information, and financial information may be collected by advertisers, analytics providers, and our business partners',
u'We may choose to collect your personal information',
u'We may choose to collect your personal information, demographic information, and financial information',
u'You may be required by us to collect your personal information',
u'You may be required by us to collect your personal information, demographic information, and financial information',
u'We are requiring you to collect your personal information',
u'We are requiring you to collect your personal information, demographic information, and financial information',
u'We require collecting your personal information',
u'We require collecting your personal information, demographic information, and financial information',
u'We may collect advertisers with your personal information',
u'We may collect advertisers, analytics providers, and our business partners with your personal information, demographic information, and financial information',
]
def cleanupUnicodeErrors(term):
# Cleanup from mistakes before... this should really be fixed during the intial parsing of the document...
t = re.sub(u'\ufffc', u' ', term)
t = re.sub(u'“', u'', t)
t = re.sub(u'â€\u009d', u'', t)
t = re.sub(u'â\u0080\u0094', u'', t)
t = re.sub(u'â\u0080\u009d', u'', t)
t = re.sub(u'â\u0080\u009c', u'', t)
t = re.sub(u'â\u0080\u0099', u'', t)
t = re.sub(u'â€', u'', t)
t = re.sub(u'äë', u'', t)
t = re.sub(u'ä', u'', t)
t = re.sub(u'\u0093', u'', t)
t = re.sub(u'\u0092', u'', t)
t = re.sub(u'\u0094', u'', t)
t = re.sub(u'\u00a7', u'', t)#Section symbol
t = re.sub(u'\u25cf', u'', t)#bullet point symbol
t = re.sub(u'´', u'\'', t)
t = re.sub(u'\u00ac', u'', t)
t = re.sub(u'\u00ad', u'-', t)
t = re.sub(u'\u2211', u'', t)
t = re.sub(u'\ufb01', u'fi', t)
t = re.sub(u'\uff0c', u', ', t)
t = re.sub(u'\uf0b7', u'', t)
t = re.sub(u'\u037e', u';', t)
return t
class Analytics:
def __init__(self):
self.dataStore = {}
self.currentDoc = None
def recordPolicyStatementAnalytics(self, policyStatement):
#negation_distance and exceptImpact
sentenceText = cleanupUnicodeErrors(policyStatement['original_sentence'])
if 'exceptImpact' in policyStatement and policyStatement['exceptImpact']:
if sentenceText not in self.dataStore[self.currentDoc]['exceptions']:
self.dataStore[self.currentDoc]['exceptions'][sentenceText] = 0
self.dataStore[self.currentDoc]['exceptions'][sentenceText] += 1
if 'negation_distance' in policyStatement and policyStatement['negation_distance'] >= 0:
if sentenceText not in self.dataStore[self.currentDoc]['negations']:
self.dataStore[self.currentDoc]['negations'][sentenceText] = []
self.dataStore[self.currentDoc]['negations'][sentenceText].append((policyStatement['negation_distance'], policyStatement['action'][1].i))
if sentenceText not in self.dataStore[self.currentDoc]['all']:
self.dataStore[self.currentDoc]['all'][sentenceText] = 0
self.dataStore[self.currentDoc]['all'][sentenceText] += 1
def startDoc(self, filename):
self.currentDoc = filename
self.dataStore[self.currentDoc] = { 'performance' : {
'startTime' : time.time(),
'endTime' : 0,
},
'negations' : {
},
'exceptions' : {
},
'all': {
}
}
def endDoc(self):
if self.currentDoc == None:
print 'Error writing end time. No current document.'
return
self.dataStore[self.currentDoc]['performance']['endTime'] = time.time()
self.currentDoc = None
class AnnotationType(Enum):
NONE = 0
DATA_OBJ = 1
SHARE_VERB = 2
COLLECT_VERB = 3
SHARE_AND_COLLECT_VERB = 4
ENTITY = 5
@property
def isShareOrCollect(self):
return self in [AnnotationType.SHARE_VERB, AnnotationType.COLLECT_VERB, AnnotationType.SHARE_AND_COLLECT_VERB]
@property
def isCollect(self):
return self == AnnotationType.COLLECT_VERB
@property
def isData(self):
return self == AnnotationType.DATA_OBJ
@property
def isEntity(self):
return self == AnnotationType.ENTITY
@property
def isNotNone(self):
return self != AnnotationType.NONE
@property
def isNone(self):
return self == AnnotationType.NONE
#TODO add pass
class KeyphraseTagger:
# "use" is a special case... We may use your information in cojunction with advertisers to blah blah blah.
def __init__(self):
self.shareVerbs = [u'share', u'sell', u'provide', u'trade', u'transfer', u'give', u'distribute', u'disclose', u'send', u'rent', u'exchange', u'report', u'transmit']
self.collectVerbs = [u'collect', u'check', u'know', u'use', u'obtain', u'access', u'receive', u'gather', u'store', u'save']
def getTag(self, token):
def isShareVerb(self, token):
return token.pos == spacy.symbols.VERB and token.lemma_ in self.shareVerbs
def isCollectVerb(self, token):
return token.pos == spacy.symbols.VERB and token.lemma_ in self.collectVerbs
#TODO do we really want "service|app|application" here? And not check if it is a subject or how related to the verb?
def isEntity(self, token):
return True if token.text.lower() in [u'we', u'i', u'us', u'me', u'you'] or token.ent_type_ in [u'PERSON', u'ORG'] else False
def isDataObject(self, token): # TODO do we want to allow multi-token matches or just merge?
return token.ent_type_ == u'DATA' and token.pos != spacy.symbols.VERB
#############################
if isShareVerb(self, token):
return AnnotationType.SHARE_VERB
elif isCollectVerb(self, token):
return AnnotationType.COLLECT_VERB
elif isDataObject(self, token):
return AnnotationType.DATA_OBJ
elif isEntity(self, token):
return AnnotationType.ENTITY
return AnnotationType.NONE
def tagSentence(self, sentence):
res = {}
for token in sentence:
tag = self.getTag(token)
if tag.isNotNone:
res[(token.i, token)] = self.getTag(token)
return res
# TODO Refactor -- these should all be instance methods, so you don't need to keep passing common objects (graph, sentence, tokenTags)
class DependencyGraphConstructor:
@staticmethod
def getConjugatedVerbs(sentence, targetTok = None):
def isComma(token):
return token.pos_ == u'PUNCT' and token.text == u','
def isCConj(token):
return token.pos == spacy.symbols.CCONJ and token.lemma_ in [u'and', u'or', u'nor']
def isNegation(token):
return token.dep == spacy.symbols.neg
def getConjugatedVerbsInternal(results, token):
if token.pos == spacy.symbols.VERB:
results.append(token)
for tok in token.children:
if tok.i < token.i:#Ensure we only look at children that appear AFTER the token in the sentence
continue
if tok.dep == spacy.symbols.conj and tok.pos == spacy.symbols.VERB:
if not getConjugatedVerbsInternal(results, tok):
return False
elif not (isComma(tok) or isCConj(tok) or isNegation(tok)):
return False
return True
def isTokenContainedIn(token, conjugatedVerbs):
for vbuffer in conjugatedVerbs:
if token in vbuffer:
return True
return False
conjugatedVerbs = []
vbuffer = []
for token in sentence:
if token.pos == spacy.symbols.VERB:
# Make sure we didn't already cover the verb...
if isTokenContainedIn(token, conjugatedVerbs):
continue
vbuffer = []
getConjugatedVerbsInternal(vbuffer, token)
if len(vbuffer) > 1:
conjugatedVerbs.append(vbuffer)
if targetTok != None:
for vbuffer in conjugatedVerbs:
if targetTok in vbuffer:
return vbuffer
return []
return conjugatedVerbs
@staticmethod
def getRootNodes(graph):
def hasNoInEdges(graph, node):
return len([n for n in graph.in_edges(node)]) == 0
root = [ n for n in graph.nodes if hasNoInEdges(graph, n) ]
return root # Could be multiple trees...
@staticmethod
def collapseConjugatedEntities(graph, sentence, tokenTags):
def traverseDownward(graph, node):
outEdges = [ dst for src,dst in graph.out_edges(node) ] # Treat it as a stack instead...
while len(outEdges) > 0:
n = outEdges.pop()
if graph[node][n]['label'] == u'conj' and node[2] == n[2] and node[2] in [AnnotationType.DATA_OBJ, AnnotationType.ENTITY]:
# Remove link from X --> Y
graph.remove_edge(node, n)
#Replace node...
graph.nodes[node]['lemma'] = u'{},{}'.format(graph.nodes[node]['lemma'], graph.nodes[n]['lemma'])
graph.nodes[node]['lemmaList'].extend(graph.nodes[n]['lemmaList'])
graph.nodes[node]['label'] = u'{}({}) - {}'.format(node[2], graph.nodes[node]['lemma'], node[1].i)
outEdges2 = [ e for e in graph.out_edges(n) ]
# Add all out links from Y --> Z to X --> Z (return all nodes, so we can add to outEdges...)
for src,dst in outEdges2:
graph.add_edge(node, dst, label = graph[src][dst]['label'])
graph.remove_edge(src,dst)
outEdges.append(dst)
graph.remove_node(n)
continue
traverseDownward(graph, n)
##################
# Get root node...
roots = DependencyGraphConstructor.getRootNodes(graph)
for r in roots:
traverseDownward(graph, r)
@staticmethod
def getNodeAnnotationTag(node):
return node[2]
@staticmethod
def isVerb(graph, node):
return graph.nodes[node]['pos'] == u'VERB'
@staticmethod
def areAnnotationTagsEqual(node1, node2):
t1 = DependencyGraphConstructor.getNodeAnnotationTag(node1)
t2 = DependencyGraphConstructor.getNodeAnnotationTag(node2)
return t1 == t2 or t1.isShareOrCollect and t2.isShareOrCollect
@staticmethod
def collapseConjugatedVerbs(graph, sentence, tokenTags):
def getNewTag(n1, n2):
if n2[2] != AnnotationType.SHARE_AND_COLLECT_VERB and n1[2].isNotNone and n1[2] != n2[2]:
if n2[2].isNone:
return n1[2]
elif (n1[2] == AnnotationType.SHARE_VERB and n2[2] == AnnotationType.COLLECT_VERB) or (n1[2] == AnnotationType.COLLECT_VERB and n2[2] == AnnotationType.SHARE_VERB) or n1[2] == AnnotationType.SHARE_AND_COLLECT_VERB:
return AnnotationType.SHARE_AND_COLLECT_VERB
return n2[2]
def replaceNode(graph, origNode, newNode):
# Add out edges
for s,t in graph.out_edges(origNode):
graph.add_edge(newNode, t, label = graph[s][t]['label'])
# Add in edges
for s,t in graph.in_edges(origNode):
graph.add_edge(s, newNode, label = graph[s][t]['label'])
# Remove node from graph
graph.remove_node(origNode)
def addNewVerbNode(graph, node1, node2, docStart, docEnd):
newTag = getNewTag(node1, node2) # Get new annotation tag
newKey = (node2[0], node2[1], newTag)#FIXME this doesn't really represent the updated tag...
negation = graph.node[node2]['neg'] # CHECKME can node1 ever be negated if node2 is not?
newLemma = u'{},{}'.format(graph.nodes[node1]['lemma'], graph.nodes[node2]['lemma'])
newNodeLabel = u'{}({}{})'.format(newTag, newLemma, u' - NOT' if negation else u'')
newLemmaList = []
newLemmaList.extend(graph.nodes[node1]['lemmaList'])
newLemmaList.extend(graph.nodes[node2]['lemmaList'])
if newKey != node2:
graph.add_node(newKey, label=newNodeLabel, lemma=newLemma, lemmaList=newLemmaList, tag = newTag, dep=node2[1].dep_, pos=node2[1].pos_, neg=negation, docStart=docStart, docEnd=docEnd)
return (newKey, True)
graph.nodes[node2]['lemma'] = newLemma
graph.nodes[node2]['label'] = newNodeLabel
graph.nodes[node2]['neg'] = negation
graph.nodes[node2]['lemmaList'] = newLemmaList
graph.nodes[node2]['tag'] = newTag
graph.nodes[node2]['startDoc'] = docStart
graph.nodes[node2]['endDoc'] = docEnd
return (node2, False)
######################################
# Let's just walk the damn graph...
def traverseDownward(graph, node):
outEdges = [ dst for src,dst in graph.out_edges(node) ] # Treat it as a stack instead...
while len(outEdges) > 0:
n = outEdges.pop()
if graph[node][n]['label'] == u'conj' and DependencyGraphConstructor.areAnnotationTagsEqual(node, n) and DependencyGraphConstructor.isVerb(graph, node) and DependencyGraphConstructor.isVerb(graph, n):
#TODO the key changes due to the annotation tag potentially changing...
#TODO ensure separation
nodeTok = node[1]
nodeChildTok = n[1]
if nodeChildTok in DependencyGraphConstructor.getConjugatedVerbs(sentence, targetTok = nodeTok):
#Remove link from X --> Y
graph.remove_edge(node, n)
# Get new Tag
newTag = getNewTag(node, n)
if newTag == node:
graph.nodes[node]['lemma'] = u'{},{}'.format(graph.nodes[node]['lemma'], graph.nodes[n]['lemma'])
graph.nodes[node]['lemmaList'].extend(graph.nodes[n]['lemmaList'])
graph.nodes[node]['label'] = u'{}({}) - {}'.format(node[2], graph.nodes[node]['lemma'], node[1].i)
# Add all out links from Y --> Z to X --> Z (return all nodes, so we can add to outEdges...)
for src,dst in graph.out_edges(n):
graph.add_edge(node, dst, label = graph[src][dst]['label'])
graph.remove_edge(src,dst)
outEdges.append(dst)
graph.remove_node(n)
else:
# Add new tag...
startDoc = nodeTok.i if nodeTok.i < nodeChildTok.i else nodeChildTok.i
endDoc = nodeTok.i if nodeTok.i > nodeChildTok.i else nodeChildTok.i
newNode,addedNewNode = addNewVerbNode(graph, n, node, startDoc, endDoc)
if addedNewNode:
# Add in edges
for s,t in graph.in_edges(node):
graph.add_edge(s, newNode, label = graph[s][t]['label'])
graph.remove_edge(s,t)
# Add out edges
for s,t in graph.out_edges(node):
graph.add_edge(newNode, t, label = graph[s][t]['label'])
graph.remove_edge(s,t)
if not addedNewNode:
newNode = node
# Add all out links from Y --> Z to X --> Z (return all nodes, so we can add to outEdges...)
for src,dst in graph.out_edges(n):
graph.add_edge(newNode, dst, label = graph[src][dst]['label'])
graph.remove_edge(src,dst)
outEdges.append(dst)
# Remove node from graph
if addedNewNode:
graph.remove_node(node)
node = newNode
graph.remove_node(n)
continue
traverseDownward(graph, n)
roots = DependencyGraphConstructor.getRootNodes(graph)
for r in roots:
traverseDownward(graph, r)
@staticmethod
def isVerbNegated(token, sentence):
def isVerbNegatedInternal(token):
return any(t.dep == spacy.symbols.neg for t in token.children)
if isVerbNegatedInternal(token):
return True
# Check if verb is part of conjugated verb phrase, if so, check if any of those are negated
conjugatedVerbs = DependencyGraphConstructor.getConjugatedVerbs(sentence, token)
for tok in conjugatedVerbs:
if isVerbNegatedInternal(tok):
return True
# Check if verb is xcomp, if so check if prior verb is negated?
#TODO should also do advcl
if token.dep == spacy.symbols.xcomp or token.dep == spacy.symbols.advcl:
return DependencyGraphConstructor.isVerbNegated(token.head, sentence)
return False
@staticmethod
def pruneUnattachedNodes(graph):
def pruneChildren(graph, node):
for s,t in graph.out_edges(node):
pruneChildren(graph, t)
if node in graph.nodes:
graph.remove_node(node)
def removeNodes(graph, nodesToRemove):
for node in nodesToRemove:
pruneChildren(graph, node)
def hasNoOutEdges(graph, node):
return len([n for n in graph.out_edges(node)]) == 0
def hasNoInEdges(graph, node):
return len([n for n in graph.in_edges(node)]) == 0
def doesGraphContainVerb(graph, root):
if root[2].isShareOrCollect:
return True
for _,n in graph.out_edges(root):
if doesGraphContainVerb(graph, n):
return True
return False
nodesToRemove = [ node for node in graph.nodes if hasNoOutEdges(graph, node) and hasNoInEdges(graph, node) ]
removeNodes(graph, nodesToRemove)
# Let's prune graphs that have no verbs...
potentialRoots = [ node for node in graph.nodes if hasNoInEdges(graph, node) ]
if len(potentialRoots) > 1:
subGraphsToPrune = [r for r in potentialRoots if not doesGraphContainVerb(graph, r)]
if len(subGraphsToPrune) < len(potentialRoots) and len(subGraphsToPrune) > 0:
removeNodes(graph, subGraphsToPrune)
@staticmethod
def pruneNonSharingVerbs(graph):
def getHead(graph, node):
parents = [ src for src,_ in graph.in_edges(node) ]
return parents[0] if len(parents) > 0 else node
def subTreeContainsLabeledTags(graph, node, checkMatch=False):
if checkMatch and node[2].isNotNone:
return True
for _,dst in graph.out_edges(node):
if subTreeContainsLabeledTags(graph, dst, True):
return True
return False
def childrenContainDataPractice(node): # One of the descendents need to contain a share verb...
if (node[1].pos == spacy.symbols.VERB and node[2].isShareOrCollect and subTreeContainsLabeledTags(graph, node)):
return True
# elif(node[1].pos == spacy.symbols.VERB and node[1].dep_ == 'relcl'):# Only IF THE HEAD IS A DATA OBJECT OR ENTITY...
# n2 = getHead(graph, node)[2]
# if n2.isData or n2.isEntity:
# return True
for s,child in graph.out_edges(node):
if childrenContainDataPractice(child):
return True
return False
def pruneChildren(graph, node):
for s,t in graph.out_edges(node):
pruneChildren(graph, t)
if node in graph.nodes:
graph.remove_node(node)
def removeNodes(graph, nodesToRemove):
for node in nodesToRemove:
pruneChildren(graph, node)
def hasNoOutEdges(graph, node):
return len([n for n in graph.out_edges(node)]) == 0
def hasNoInEdges(graph, node):
return len([n for n in graph.in_edges(node)]) == 0
#############################
nodesToRemove = [ node for node in graph.nodes if node[1].pos == spacy.symbols.VERB and not childrenContainDataPractice(node) ]
removeNodes(graph, nodesToRemove)
nodesToRemove = [ node for node in graph.nodes if hasNoOutEdges(graph, node) and hasNoInEdges(graph, node) ]
removeNodes(graph, nodesToRemove)
nodesToRemove = [ node for node in graph.nodes if hasNoOutEdges(graph, node) and node[2].isNone and node[1].dep not in [spacy.symbols.nsubj, spacy.symbols.dobj, spacy.symbols.nsubjpass] ]
while len(nodesToRemove) > 0:
removeNodes(graph, nodesToRemove)
nodesToRemove = [ node for node in graph.nodes if hasNoOutEdges(graph, node) and node[2].isNone and node[1].dep not in [spacy.symbols.nsubj, spacy.symbols.dobj, spacy.symbols.nsubjpass] ]
########################
@staticmethod
def convertDTreeToNxGraph(sentence, tokenTags):
def addNode(key, node, graph, sentence):
if key not in graph:
negation = False
if key[2].isShareOrCollect:
negation = DependencyGraphConstructor.isVerbNegated(node, sentence)
graph.add_node(key, label=u'{}({}{}) - {}'.format(key[2], node.lemma_, u' - NOT' if negation else u'', node.i), tag = key[2], lemma = node.lemma_, lemmaList=[node.lemma_ if node.lemma_ != u'-PRON-' else node.text.lower()], dep=node.dep_, pos=node.pos_, neg=negation, docStart=node.i, docEnd=node.i)
else:
graph.add_node(key, label=u'{}({}) - {}'.format(key[2], node.lemma_, node.i), tag = key[2], lemma = node.lemma_, lemmaList=[node.lemma_ if node.lemma_ != u'-PRON-' else node.text.lower()], dep=node.dep_, pos=node.pos_, neg=negation, docStart=node.i, docEnd=node.i)
def convertDTreeToNxGraphInternal(root, graph, tokenTags, sentence):
rkey = DependencyGraphConstructor.getKey(root, tokenTags)
if rkey not in graph:
addNode(rkey, root, graph, sentence)
for c in root.children:
ckey = DependencyGraphConstructor.getKey(c, tokenTags)
if ckey not in graph:
addNode(ckey, c, graph, sentence)
graph.add_edge(rkey, ckey, label = c.dep_)
convertDTreeToNxGraphInternal(c, graph, tokenTags, sentence)
##############
dgraph = nx.DiGraph()
convertDTreeToNxGraphInternal(sentence.root, dgraph, tokenTags, sentence)
return dgraph
@staticmethod
def drawGraph(g, filename):
# try:
# A = nx.drawing.nx_agraph.to_agraph(g)
# A.draw(filename, prog='dot', args='-Granksep=2.0')
# except:# FIXME unicode error here for some reason...
# pass
return
@staticmethod
def getKey(root, tokenTags):
tKey = (root.i, root)
tag = AnnotationType.NONE if tKey not in tokenTags else tokenTags[tKey]
return (root.i, root, tag)
@staticmethod
def getSimplifiedDependencyGraph(sentence, tokenTags):
def getPathBetweenNodes(g, itok, jtok, tokenTags):
pathNodes = nx.shortest_path(g.to_undirected(), DependencyGraphConstructor.getKey(itok, tokenTags), DependencyGraphConstructor.getKey(jtok, tokenTags))
return g.subgraph(pathNodes).copy()
##############################
if len(tokenTags) <= 1: # Need two or more tokens...
return None
g = DependencyGraphConstructor.convertDTreeToNxGraph(sentence, tokenTags)
graphs = []
taggedTokens = [(token, tokenTags[(token.i, token)]) for i,token in tokenTags]
for i,(itok,itag) in enumerate(taggedTokens):
for j,(jtok, jtag) in enumerate(taggedTokens[i+1:]):
graphs.append(getPathBetweenNodes(g, itok, jtok, tokenTags))
#Do not prune subjects and objects...
#TODO is it just share verbs or all?
for i,(itok,itag) in enumerate(taggedTokens):
if itag.isShareOrCollect:
for _, dst in g.out_edges(DependencyGraphConstructor.getKey(itok, tokenTags)):
if dst[1].dep in [spacy.symbols.dobj, spacy.symbols.nsubj, spacy.symbols.nsubjpass] and dst[2].isNone:
graphs.append(getPathBetweenNodes(g, itok, dst[1], tokenTags))
#################################
g = nx.compose_all(graphs)
DependencyGraphConstructor.collapseConjugatedVerbs(g, sentence, tokenTags)
# Prune non-attached nodes...
DependencyGraphConstructor.pruneUnattachedNodes(g)
DependencyGraphConstructor.collapseConjugatedEntities(g, sentence, tokenTags)
DependencyGraphConstructor.pruneNonSharingVerbs(g)
#DependencyGraphConstructor.drawGraph(g, 'simplified_graph.png')
return g
class PolicyTransformer:
# TODO refactor so that these are instance methods
# implicit everyone rule
@staticmethod
def applyPolicyTransformationRules(policyStatements, analyticsObj):
def addPolicies(entity, collect, dataObjects, original_sentence, simplifiedStatements, actionLemma):
#FIXME should not get a token at this point...
if (type(entity) == unicode and entity == u'you') or (type(entity) == spacy.tokens.token.Token and entity.text == u'you'):
return
for d in dataObjects:
simplifiedStatements.append((cleanupUnicodeErrors(entity), cleanupUnicodeErrors(collect), cleanupUnicodeErrors(d), cleanupUnicodeErrors(original_sentence), cleanupUnicodeErrors(actionLemma)))
def addPoliciesByEntities(entities, collect, dataObjects, original_sentence, simplifiedStatements, actionLemma):
if entities is not None and len(entities) > 0:
if type(entities) == list:
for e in entities:
addPolicies(e, collect, dataObjects, original_sentence, simplifiedStatements, actionLemma)
else:
addPolicies(entities, collect, dataObjects, original_sentence, simplifiedStatements, actionLemma)
else:
addPolicies(u'third_party_implicit', collect, dataObjects, original_sentence, simplifiedStatements, actionLemma)
def getAgentText(agent):
if agent is None: #TODO CHECKME: Should we really have an implicit first party
return u'we_implicit'
if type(agent) == unicode:
return agent
if type(agent[1]) == unicode:
return agent[1]
return agent[1].lemma_ if agent[1].lemma_ != '-PRON-' else agent[1].text.lower()
#return agent[1] if type(agent[1]) == unicode else agent[1].text.lower() #This needs to be the lemma unless -PRON-
def handleShareVerb(pstatement, actionLemma, simplifiedStatements):
agents = [ getAgentText(a) for a in pstatement['agent'] ]
original_sentence = pstatement['original_sentence']
# Ensure that we don't create a conflict...
# For example, if we have a sentence that claims "we do not collect or share X.", do not assume first party collect
if pstatement['action'][2] == AnnotationType.SHARE_AND_COLLECT_VERB and pstatement['is_negated']:
pass#FIXME clean up condition check
else:
addPoliciesByEntities(agents, u'collect', pstatement['data_objects'], original_sentence, simplifiedStatements, actionLemma)
# If it is "you share/not share" and entities is nil, do not assume third-party
if len(agents) == 1 and type(agents[0]) == unicode and agents[0] == u'you':
if pstatement['entities'] is None or len(pstatement['entities']) == 0:
pstatement['entities'] = [u'we_implicit']
collect = u'not_collect' if pstatement['is_negated'] else u'collect'
#not sell, (you) not provide, trade, rent, exchange,
# collect = u'collect' if actionLemma in [u'sell', u'rent', u'trade', u'exchange'] else collect
# # you do not provide us does not mean not collect necessarily...
# if len(agents) == 1 and type(agents[0]) == unicode and agents[0] == u'you' and actionLemma in [u'provide', u'give']:
# return
addPoliciesByEntities(pstatement['entities'], collect, pstatement['data_objects'], original_sentence, simplifiedStatements, actionLemma)
def handleCollectVerb(pstatement, actionLemma, simplifiedStatements):
agents = [ getAgentText(a) for a in pstatement['agent'] ]
collect = u'not_collect' if pstatement['is_negated'] else u'collect'
if pstatement['is_negated'] and actionLemma == u'use':
return
#not use, store, save. "Use" is typically conditional, so ignore negation (e.g., not use for...)
collect = u'collect' if actionLemma in [u'store', u'save'] else collect
original_sentence = pstatement['original_sentence']
addPoliciesByEntities(agents, collect, pstatement['data_objects'], original_sentence, simplifiedStatements, actionLemma)
simplifiedStatements = []
#Array of statements
for pstatement in policyStatements:
#TODO analytics... exceptImpact, negation_distance
analyticsObj.recordPolicyStatementAnalytics(pstatement)
#Get the lemmas and do it this way instead...
for actionLemma in pstatement['action_lemmas']:
if actionLemma in [u'share', u'sell', u'provide', u'trade', u'transfer', u'give', u'distribute', u'disclose', u'send', u'rent', u'exchange', u'report', u'transmit']: #TODO refactor
handleShareVerb(pstatement, actionLemma, simplifiedStatements)
elif actionLemma in [u'collect', u'check', u'know', u'use', u'obtain', u'access', u'receive', u'gather', u'store', u'save']:#TODO refactor
handleCollectVerb(pstatement, actionLemma, simplifiedStatements)
# if pstatement['action'][2] in [AnnotationType.SHARE_VERB, AnnotationType.SHARE_AND_COLLECT_VERB]:
# handleShareVerb(pstatement, simplifiedStatements)
# if pstatement['action'][2] in [AnnotationType.COLLECT_VERB, AnnotationType.SHARE_AND_COLLECT_VERB]:
# handleCollectVerb(pstatement, simplifiedStatements)
return list(set(simplifiedStatements))
@staticmethod
def handleExceptions(policyStatements, keyPhraseTagger, tags): #TODO probably don't need the tagger...
def clonePolicyStatement(pol):
return {'data_objects' : pol['data_objects'], 'entities' : pol['entities'], 'agent' : pol['agent'], 'action' : pol['action'], 'action_lemmas' : pol['action_lemmas'], 'is_negated' : pol['is_negated'], 'negation_distance' : pol['negation_distance'], 'original_sentence' : pol['original_sentence'], u'exceptions' : pol['exceptions'] }
def lemmatize(tokens):
return u' '.join(t.lemma_ for t in tokens)
def getRelevantTags(e, tags):
return {(term.i, term):tags[(term.i, term)] for term in e if (term.i, term) in tags}
def isAllData(tags):
return all(tags[k].isData for k in tags)
def isAllEntity(tags):
return all(tags[k].isEntity for k in tags)
newStatements = []
removePolicyStatements = []
for pol in policyStatements:
if pol['exceptions'] is not None and len(pol['exceptions']) > 0:
# Get all exceptions at first that can be resolved with keywords or all data and entity
excepts = [ (v,e) for v,e in pol['exceptions'] ]
for v,e in pol['exceptions']:
#Record how often exceptions affect policy statements...
relTags = getRelevantTags(e, tags)
elemma = lemmatize(e)
if re.search(r'^.*\b(consent|you\sagree|your\s(express\s)?permission|you\sprovide|opt([\s\-](out|in))?|respond\sto\syou(r)?|disclose\sin\sprivacy\spolicy|follow(ing)?\scircumstance|permit\sby\schildren\'s\sonline\sprivacy\sprotection\sact)\b.*$', elemma):
#Only do the exceptions in negative cases...
# For example, we do not want to reverse: "We collect your personal information without your consent"
if not pol['is_negated']:
continue
newPol = clonePolicyStatement(pol)
newPol['is_negated'] = not newPol['is_negated']
newPol['exceptions'] = None#TODO do we ever really need this again?
newPol['exceptImpact'] = True
newStatements.append(newPol)
excepts.remove((v,e))
removePolicyStatements.append(pol)
elif elemma in [u'require by law', 'we receive subpoena', u'law']:
#Only do the exceptions in negative cases...
# For example, we do not want to reverse: "We collect your personal information without your consent"
if not pol['is_negated']:
continue
newPol = clonePolicyStatement(pol)
newPol['entities'] = [u'government agency']
newPol['is_negated'] = not newPol['is_negated']
newPol['exceptions'] = None#TODO do we ever really need this again?
newPol['exceptImpact'] = True
newStatements.append(newPol)
excepts.remove((v,e))
removePolicyStatements.append(pol)
elif len(relTags) == len(e):
newPol = clonePolicyStatement(pol)
newPol['is_negated'] = not newPol['is_negated']
newPol['exceptions'] = None#TODO do we ever really need this again?
newPol['exceptImpact'] = True
# If ALL data items
if isAllData(relTags):
newPol['data_objects'] = [ data.lemma_ for index,data in relTags ]
newStatements.append(newPol)
excepts.remove((v,e))
#removePolicyStatements.append(pol)
# If ALL entities
elif isAllEntity(relTags):
if newPol['action'][2].isCollect:
newPol['agent'] = [ data.lemma_ for index,data in relTags ]
else:
newPol['entities'] = [ data.lemma_ for index,data in relTags ]
newStatements.append(newPol)
excepts.remove((v,e))
#removePolicyStatements.append(pol)
else: #Not sure what it is, let's flip it anyway...
if not pol['is_negated']:
continue
newPol = clonePolicyStatement(pol)
newPol['is_negated'] = not newPol['is_negated']
newPol['exceptImpact'] = True
newPol['exceptions'] = None#TODO do we ever really need this again?
newStatements.append(newPol)
excepts.remove((v,e))
removePolicyStatements.append(pol)
for pol in newStatements:
policyStatements.append(pol)
for pol in removePolicyStatements:
if pol in policyStatements:
policyStatements.remove(pol)
return policyStatements
class GraphCompare:
@staticmethod
def nmatchCallback(n1, n2):
def getVerbGroup(lemmaList):
groups = [[u'share', u'trade', u'exchange'],
[u'transmit', u'send', u'give', u'provide'],
[u'sell', u'transfer', u'distribute', u'disclose', u'rent', u'report'],
[u'collect', u'check', u'know', u'use', u'obtain', u'access', u'receive', u'gather', u'store', u'save' ]
]
results = []
for lemma in lemmaList:
for i,g in enumerate(groups):
if lemma in g:
results.append(i)
return set(results) #This should really never happen as long as the two lists in sync
if n1['tag'].isShareOrCollect and n2['tag'].isShareOrCollect:
vg1 = getVerbGroup(n1['lemmaList'])
vg2 = getVerbGroup(n2['lemmaList'])
return len(vg1.intersection(vg2)) > 0
# return getVerbGroup(n1['lemmaList']) == getVerbGroup(n2['lemmaList'])
#return n1['dep'] == n2['dep'] and groupsMatch #TODO should we ensure verb matches?
if n1['tag'].isNone and n2['tag'].isNone and n1['pos'] == u'ADP' and n2['pos'] == u'ADP':
return n1['tag'] == n2['tag'] and n1['dep'] == n2['dep'] and n1['lemma'] == n2['lemma']
if n1['tag'].isNone and n2['tag'].isNone and n1['pos'] == u'VERB' and n2['pos'] == u'VERB':
if n1['dep'] == u'ROOT' or n2['dep'] == u'ROOT':
return n1['tag'] == n2['tag'] and n1['pos'] == n2['pos']
return n1['tag'] == n2['tag'] and n1['dep'] == n2['dep']
@staticmethod
def ematchCallback(n1, n2):
return n1['label'] == n2['label']
class PatternDiscover:
def __init__(self, nlpModel, analyticsObj):
self.tagger = KeyphraseTagger()
self.parser = spacy.load(nlpModel) if type(nlpModel) != spacy.lang.en.English else nlpModel
self.patterns = []
self.learnedPatternsCounter = 0
self.analyticsObj = analyticsObj
def parseText(self, paragraph):
paragraph = re.sub(r'\bid\b', u'identifier', paragraph) # Spacy parses "id" as "i would", so fix here...
doc = self.parser(paragraph)
epm.mergeExcludePhrases(doc, self.parser.vocab)
npm.mergeNounPhrasesDoc(doc, self.parser.vocab)
return doc
def containsShareOrCollect(self, tags):
return any(tags[k].isShareOrCollect for k in tags)
def containsDataObject(self, tags):
return any(tags[k].isData for k in tags)
def train(self, paragraph):
doc = self.parseText(paragraph)
dgraphs = []
for sentence in doc.sents:
tags = self.tagger.tagSentence(sentence)
if len(tags) <= 0:
continue
if not self.containsShareOrCollect(tags):
continue
depGraph = DependencyGraphConstructor.getSimplifiedDependencyGraph(sentence, tags)
if depGraph is not None: # We have a problem here, why would it return None?
isIso = False
for p in self.patterns:
if nx.algorithms.isomorphism.is_isomorphic(depGraph, p, node_match=GraphCompare.nmatchCallback, edge_match=GraphCompare.ematchCallback):
isIso = True
break
if isIso:
continue
DependencyGraphConstructor.drawGraph(depGraph, 'TRAINED_PATTERNS/{}.png'.format(self.learnedPatternsCounter))
self.learnedPatternsCounter += 1
self.patterns.append(depGraph)
dgraphs.append(depGraph)
return dgraphs
def extractData(self, depGraph, subgraph, sentence, verbose=False):
def isVerbNearestAncestor(targetVerb, exceptVerb):
if targetVerb == exceptVerb:
return True
if exceptVerb.pos == spacy.symbols.VERB and self.tagger.getTag(exceptVerb).isShareOrCollect:
return False
if exceptVerb.head == exceptVerb: # Hit the root
return False
return isVerbNearestAncestor(targetVerb, exceptVerb.head)
def getReleventExceptions(verb, exceptions):
if exceptions is None or len(exceptions) == 0:
return exceptions
return [ (v,e) for v,e in exceptions if isVerbNearestAncestor(verb, v) ]
def getNearestAnnotVerb(depGraph, node):
for s,_ in depGraph.in_edges(node):
if s[2].isShareOrCollect:
return s
for s,_ in depGraph.in_edges(node):
res = getNearestAnnotVerb(depGraph, s)
if res is not None:
return res
return None
def hasSubjectAndDobj(depGraph, node):
hasSubject = any(n for _,n in depGraph.out_edges(node) if n[1].dep in [spacy.symbols.nsubj, spacy.symbols.nsubjpass])
hasObject = any(n for _,n in depGraph.out_edges(node) if n[1].dep in [spacy.symbols.dobj])
return hasSubject and hasObject
def extractDataObjects(depGraph, baseNode):
def extractDataObjectsInternal(results, depGraph, baseNode):
for _,node in depGraph.out_edges(baseNode):
if node[2].isData :
results.append(node)
elif node[2].isShareOrCollect: # Extract from NEAREST verb only
continue
elif node[1].pos == spacy.symbols.ADP and node[1].lemma_ in [u'except when', u'except where', u'unless when', u'unless where', u'except for', u'except in', u'except under', u'unless for', u'unless in', u'unless under', u'apart from', u'aside from', u'with the exception of', u'other than', u'except to', u'unless to', u'unless as', u'except as']:
continue
extractDataObjectsInternal(results, depGraph, node)
##########################
dataObjects = []
#TODO if relcl, should we check the parent first?
extractDataObjectsInternal(dataObjects, depGraph, baseNode)
#Only do this if we don't have a direct object AND subject...
if len(dataObjects) == 0 and not hasSubjectAndDobj(depGraph,baseNode):
# Get from nearest parent?
v = getNearestAnnotVerb(depGraph, baseNode)
extractDataObjectsInternal(dataObjects, depGraph, v)
return dataObjects
def getAgent(depGraph, baseNode):
def getEntityConjunctions(depGraph, node):
def getEntityConjunctionsInternal(depGraph, node, res):
for _,target in depGraph.out_edges(node):
if depGraph[node][target]['label'] == 'conj':
res.append(target)
getEntityConjunctionsInternal(depGraph, target, res)
return res
res = [node]
res = getEntityConjunctionsInternal(depGraph, node, res)
return res
def getAgentInternal(depGraph, baseNode, skipTraverseUpwards=False, isXcomp=False):
nsubj = None
nsubjpass = None
agentPobj = None
dobj = None
# Check children for the subject or agent if subject is passive...
for _,node in depGraph.out_edges(baseNode):
if depGraph[baseNode][node]['label'] == 'nsubj':
nsubj = node
elif depGraph[baseNode][node]['label'] == 'nsubjpass':
nsubjpass = node
elif depGraph[baseNode][node]['label'] == 'dobj' or depGraph[baseNode][node]['label'] == 'dative':
dobj = node
elif depGraph[baseNode][node]['label'] == 'agent': #"Agent" dependency tag
for _,node2 in depGraph.out_edges(node):
if node2[2].isEntity:
agentPobj = node2
if nsubj is None:
nsubj = nsubjpass
if isXcomp:
#If xcomp prefer dobj over nsubj...
if dobj is not None and dobj[2].isEntity:
return getEntityConjunctions(depGraph, dobj)
if nsubj is not None and nsubj[2].isEntity:
return getEntityConjunctions(depGraph, nsubj)
if nsubjpass is not None and nsubjpass[2].isEntity:
return getEntityConjunctions(depGraph, nsubjpass)
if agentPobj is not None and agentPobj[2].isEntity:
return getEntityConjunctions(depGraph, agentPobj)
else:
if nsubj is not None and nsubj[2].isEntity:
return getEntityConjunctions(depGraph, nsubj)
if nsubjpass is not None and nsubjpass[2].isEntity:
return getEntityConjunctions(depGraph, nsubjpass)
if agentPobj is not None and agentPobj[2].isEntity:
return getEntityConjunctions(depGraph, agentPobj)
if dobj is not None and dobj[2].isEntity:
return getEntityConjunctions(depGraph, dobj)
if not skipTraverseUpwards:
# If we don't find anything, get the parent verb if exists and search there
for node,_ in depGraph.in_edges(baseNode):
res = getAgentInternal(depGraph, node, skipTraverseUpwards=True, isXcomp=baseNode[1].dep in [spacy.symbols.xcomp, spacy.symbols.advcl])
if res is not None:
return res
return None
##################
agent = getAgentInternal(depGraph, baseNode)
if agent is None or len(agent) == 0: #If we haven't found anything return the default (i.e., "we") -- Rationale: "Personal information may be collected." means implicit "we"
return [u'we_implicit'] # Implicit first party
return agent
def ignoreActionObjectPair(verb, dobjects):
if verb.lemma_ == u'send': #Ignore send email or message
for d in dobjects:
if re.search(r'.*\b(email|message)\b.*', d):
return True
return False
def getVerbNegationDistance(token, sentence):
def isVerbNegatedInternal(token):
for t in token.children:
if t.dep == spacy.symbols.neg:
#TODO need to record this somewhere for analytics purposes...
return t.i
return -1
#return any(t.dep == spacy.symbols.neg for t in token.children)
dist = isVerbNegatedInternal(token)
if dist >= 0:
return dist
# Check if verb is part of conjugated verb phrase, if so, check if any of those are negated
conjugatedVerbs = DependencyGraphConstructor.getConjugatedVerbs(sentence, token)
for tok in conjugatedVerbs:
dist = isVerbNegatedInternal(tok)
if dist >= 0:
return dist
# Check if verb is xcomp, if so check if prior verb is negated? adjks
if token.dep == spacy.symbols.xcomp:
return getVerbNegationDistance(token.head, sentence)
return -1
def extractEntities(depGraph, baseNode):
def extractEntitiesInternal(results, depGraph, baseNode):
agent = getAgent(depGraph, baseNode)
for _,node in depGraph.out_edges(baseNode):
if node[2].isEntity and node not in agent:
results.append(node)
elif node[2].isShareOrCollect: # Extract from NEAREST annotated verb only
continue
elif node[1].pos == spacy.symbols.ADP and node[1].lemma_ in [u'except when', u'except where', u'unless when', u'unless where', u'except for', u'except in', u'except under', u'unless for', u'unless in', u'unless under', u'apart from', u'aside from', u'with the exception of', u'other than', u'except to', u'unless to', u'unless as', u'except as']:
continue
extractEntitiesInternal(results, depGraph, node)
##########################
entities = []
extractEntitiesInternal(entities, depGraph, baseNode)
return entities
#########################
def convertAgentToText(depGraph, agent):
if agent is None:
return agent
result = []
for a in agent:
if type(a) == unicode:
result.append(a)
continue
result.extend(depGraph.nodes[a]['lemmaList'])
return result
results = []
if verbose:
print 'Found match.\n\t', sentence
# Start at the verbs...
exceptions = eh.checkException(sentence)#TODO should probably check the verb match here instead of doing it below...
for n in depGraph:
if n[2].isShareOrCollect and n in subgraph: # Only extract from subgraph...
# dataObjects = [ d[1].lemma_ for d in extractDataObjects(depGraph, n) ]
dataObjects = []
for d in extractDataObjects(depGraph, n):
dataObjects.extend(depGraph.nodes[d]['lemmaList'])
# entities = [ e[1].lemma_ for e in extractEntities(depGraph, n) ]
entities = []
for e in extractEntities(depGraph, n):
entities.extend(depGraph.nodes[e]['lemmaList'])
agent = getAgent(depGraph, n)
#Agent to text
agent = convertAgentToText(depGraph, agent)
if len(dataObjects) == 0 or ignoreActionObjectPair(n[1], dataObjects): # skip <VERB, send>, <email>
continue
actionLemmas = depGraph.nodes[n]['lemmaList']
#Get related exceptions rooted under the specific share/collect verb...
relExcepts = getReleventExceptions(n[1], exceptions)
if verbose:
print n, (u'NOT', n[1].i, getVerbNegationDistance(n[1], sentence)) if depGraph.nodes[n]['neg'] else u''
print '\tDATA: ', dataObjects
print '\tAGENT: ', agent
print '\tENTITIES: ', entities
#print '\tTYPE: ', ptype
print '\tEXCEPTIONS: ', exceptions
negDist = getVerbNegationDistance(n[1], sentence) if depGraph.nodes[n]['neg'] else -1
results.append({'data_objects' : dataObjects, 'entities' : entities, 'agent' : agent, 'action' : n, 'action_lemmas' : actionLemmas, 'is_negated' : depGraph.nodes[n]['neg'], 'negation_distance' : negDist, 'original_sentence' : sentence.text, u'exceptions' : relExcepts })
return results
def test(self, paragraph):
def ensureAnnotationTagSetsEqual(tagSet1, tagSet2):
def combineShareCollectTagSets(tagset):
if AnnotationType.SHARE_AND_COLLECT_VERB in tagset:
if AnnotationType.SHARE_VERB in tagset:
tagset.remove(AnnotationType.SHARE_VERB)
if AnnotationType.COLLECT_VERB in tagset:
tagset.remove(AnnotationType.COLLECT_VERB)
#TODO REMOVE ME
# Treat everything as share or collect
removedNodes = False
for t in [AnnotationType.SHARE_VERB, AnnotationType.COLLECT_VERB, AnnotationType.SHARE_AND_COLLECT_VERB]:
if t in tagset:
tagset.remove(t)
removedNodes = True
if removedNodes:
tagset.add(AnnotationType.SHARE_VERB)
return tagset
###################
tagSet1 = combineShareCollectTagSets(tagSet1)
tagSet2 = combineShareCollectTagSets(tagSet2)
return len(tagSet1) != len(tagSet2) or len(tagSet2 - tagSet1) > 0 or len(tagSet1 - tagSet2) > 0
def getTagsFromGraph(depGraph):
return set([ n[2] for n in depGraph.nodes if n[2].isNotNone ])
def doesSentenceStartWithInterrogitive(sentence):#TODO we may want to be smarter about this...
return any(child.lemma_ in [u'who', u'what', u'when', u'where', u'why', u'how', u'do'] and child.dep == spacy.symbols.advmod for child in sentence.root.children)
##########################
results = []
doc = self.parseText(paragraph)
for sentence in doc.sents:
tags = self.tagger.tagSentence(sentence)
if len(tags) <= 0:
continue
if not self.containsShareOrCollect(tags) or not self.containsDataObject(tags) or doesSentenceStartWithInterrogitive(sentence):
continue
#Prune the tree..
depGraph = DependencyGraphConstructor.getSimplifiedDependencyGraph(sentence, tags)
if len(tags) <= 0 or depGraph is None:
continue
if not self.containsShareOrCollect(tags) or not self.containsDataObject(tags) or doesSentenceStartWithInterrogitive(sentence):
continue
uniqueTags = getTagsFromGraph(depGraph)
subgraphs = []
for p in self.patterns:
ptags = getTagsFromGraph(p)
# Ensure pattern and test sentence have same types of tags present
if ensureAnnotationTagSetsEqual(uniqueTags, ptags):
continue
GM = nx.algorithms.isomorphism.GraphMatcher(depGraph, p, node_match=GraphCompare.nmatchCallback, edge_match=GraphCompare.ematchCallback)
matchFound = False
for subgraph in GM.subgraph_isomorphisms_iter():
# Ensure all of the tags in p are present in subgraph (i.e., avoid single token subgraph matches)
subgraphTags = set([ k[2] for k in subgraph if k[2].isNotNone ])
if ensureAnnotationTagSetsEqual(subgraphTags, ptags):
continue
subgraphs.extend(subgraph.keys())
if len(subgraphs) > 0:
#DependencyGraphConstructor.drawGraph(subgraph, 'TRAINED_PATTERNS/SUBGRAPH.png')
res = self.extractData(depGraph, subgraphs, sentence)
res = PolicyTransformer.handleExceptions(res, self.tagger, tags)
res = PolicyTransformer.applyPolicyTransformationRules(res, self.analyticsObj)
for r in res:
results.append(r)
return results if len(results) > 0 else None
def loadTrainingData(filename):
pass
def loadTestingData(path):
data = []
for root,dirs,files in os.walk(path):
for f in files:
data.append((f, [ line.strip() for line in codecs.open(os.path.join(root, f), 'r', 'utf-8') ]))
return data
def aggregateBySentence(policies):
results = {}
if policies is not None:#We can just do extend instead of append if we're not going to be verbose here...
for actor,collect,data,orig_sentence,actionLemma in policies:
if orig_sentence not in results:
results[orig_sentence] = set()
results[orig_sentence].add((actor, collect, data, actionLemma))
return results
def prettyPrintResults(policies):
res = aggregateBySentence(policies)
for sen in res:
print sen
for pol in res[sen]:
print '\t', pol
def val(v):
res = v if type(v) == unicode else v.lemma_
return res.encode('utf-8')
def valTxt(v):
res = v if type(v) == unicode else v.text
return res.encode('utf-8')
def getOutputFilename(filename, outputDir):
fname,ext = os.path.splitext(os.path.basename(filename))
return os.path.join(outputDir, '{}.pickle'.format(fname))
def dumpData(res, fname, outDir):
outFile = getOutputFilename(fname, outDir)
pickle.dump(res, open(outFile, 'wb'))
def dumpTree(tok, tab=u''):
print tab, tok.lemma_, tok.pos_, tok.dep_, tok.i, tok.ent_type_
for child in tok.children:
dumpTree(child, tab + u'\t')
def drawGraph(graph):
# A = nx.drawing.nx_agraph.to_agraph(graph)
# return A.draw(format='png', prog='dot', args='-Granksep=2.0')
return
analytics = Analytics()
pd = PatternDiscover(nlpModel = nlp, analyticsObj=analytics)
for sentence in TRAINING_DATA:
pd.train(sentence)
# TODO serialize the patterns and load up instead of training again...
print len(pd.patterns)
print len(pd.patterns)
# In[70]:
def loadTestingDataFromFile(path):
data = []
for filepath in codecs.open(path, 'r', 'utf-8'):
filepath = filepath.strip()
data.append((os.path.basename(filepath), [ line.strip() for line in codecs.open(filepath, 'r', 'utf-8') ]))
return data
subsetNum = sys.argv[1]
testing_data = loadTestingDataFromFile('/ext/input/policySubsets/{}.txt'.format(subsetNum))
complete_results = {}
for filename,text in testing_data:
results = []
if os.path.isfile(getOutputFilename(filename, '/ext/output/policy')) and os.path.isfile(getOutputFilename(filename, '/ext/output/analytics')):
print 'Skipping', filename
continue
print '--------------------Parsing {}--------------------'.format(filename)
analytics.startDoc(filename)
for line in text:
try: #FIXME remove this exception block
print line
res = pd.test(line)
if res is not None:#We can just do extend instead of append if we're not going to be verbose here...
res = [ (val(ent), val(col), val(dat), valTxt(sen), val(actionLemma)) for ent,col,dat,sen,actionLemma in res ]
results.extend(res)
prettyPrintResults(res)
except RuntimeError as err:
with codecs.open('/ext/output/log/ERROR_LOG_REC_{}.log'.format(subsetNum), 'a', 'utf-8') as logfile:
logfile.write(u'{} --- {}\n'.format(filename, line))
analytics.endDoc()
dumpData(results, filename, '/ext/output/policy')
dumpData(analytics.dataStore[filename], filename, '/ext/output/analytics')
complete_results[filename] = results
print '--------------------------------------------------'.format(filename)
# Pickle the results...
#pickle.dump(complete_results, open('/ext/output/policy_results.pickle', 'wb'))
#json.dump(complete_results, open('/ext/output/policy_results.json', 'wb'), indent=4)
#pickle.dump(analytics.dataStore, open('/ext/output/analytics_data.pickle', 'wb'))
#json.dump(analytics.dataStore, codecs.open('/ext/output/analytics_data.json', 'wb', 'utf-8'), indent=4)
|
'''
This file is a part of Test Mile Arjuna
Copyright 2018 Test Mile Software Testing Pvt Ltd
Website: www.TestMile.com
Email: support [at] testmile.com
Creator: Rahul Verma
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from arjuna.tpi import Arjuna
from arjuna.unitee.types.root import *
from arjuna.unitee.types.containers import *
from arjuna.core.reader.hocon import *
from arjuna.core.utils import sys_utils
from arjuna.unitee.test.defs.fixture import *
from .group import *
from xml.etree.ElementTree import Element
from arjuna.core.utils import etree_utils
from arjuna.unitee.utils import run_conf_utils
from arjuna.core.config import ConfigContainer
class StageDef(Root):
def __init__(self, sdef, id, stage_xml):
super().__init__()
self.gdefs = []
self.config = ConfigContainer()
self.tmcount = 0
self.threads = 1
self.sdef = sdef
self.__iter = None
self.__fixtures = FixturesDef()
self.root = stage_xml
self.unitee = Arjuna.get_unitee_instance()
self.id = id
self.name = "stage{:d}".format(id)
if not isinstance(stage_xml, Element):
self.console.display_error("Fatal: [Arjuna Error] Unsuppored input argument supplied for stage creation: {}".format(stage_xml))
sys_utils.fexit()
else:
self.__process(stage_xml)
# self.nodes.append(SessionSubNode(self, len(self.nodes) + 1, input))
@property
def fixture_defs(self):
return self.__fixtures
def __process(self, group_hocon):
def display_err_and_exit(msg):
self.console.display_error((msg + " Fix session template file: {}").format(self.sdef.fpath))
sys_utils.fexit()
stage_attrs = etree_utils.convert_attribs_to_cidict(self.root)
if "name" in stage_attrs:
self.name = stage_attrs['name'].strip()
if not self.name:
display_err_and_exit(">>name<< attribute in stage definition should be a non-empty string.")
threads_err_msg = ">>threads<< attribute in stage definition can be integer >=1."
if "threads" in stage_attrs:
self.threads = stage_attrs['threads'].strip()
try:
self.threads = int(self.threads)
except:
display_err_and_exit(threads_err_msg)
else:
if self.threads <=0:
display_err_and_exit(threads_err_msg)
node_dict = etree_utils.convert_to_cidict(self.root)
if "groups" not in node_dict:
display_err_and_exit(">>stage<< element in session definition must contain >>groups<< element.")
for child_tag, child in node_dict.items():
child_tag = child_tag.lower()
if child_tag == 'config':
config = child
for option in config:
run_conf_utils.validate_config_xml_child("session", self.sdef.fpath, option)
run_conf_utils.add_config_node_to_configuration("session", self.config, option)
elif child_tag == 'fixtures':
fixtures = child
for child in fixtures:
run_conf_utils.validate_fixture_xml_child("session", "stage", self.sdef.fpath, child)
run_conf_utils.add_fixture_node_to_fixdefs(self.fixture_defs, child)
elif child_tag =='groups':
if "group" not in etree_utils.convert_to_cidict(child):
display_err_and_exit(">>groups<< element in stage definition must contain atleast one >>group<< element.")
groups = list(child)
for index, group in enumerate(groups):
run_conf_utils.validate_group_xml_child("session", self.sdef.fpath, group)
node = GroupDef(self.sdef, self, len(self.gdefs) + 1, group)
self.gdefs.append(node)
else:
display_err_and_exit("Unexpected element >>{}<< found in >>stage<< definition in session file.".format(child.tag))
def pick(self):
for gdef in self.gdefs:
self.tmcount += gdef.pick()
self.__iter = iter(self.gdefs)
|
from orbs_client.account import create_account
from orbs_client.client import Client
|
"""
test_passlock.py
Tests for passlock.
"""
import logging
from passlock import __version__
logging.disable(logging.CRITICAL)
def test_version():
assert __version__ == '0.1.4'
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# [START documentai_process_quality_document]
# TODO(developer): Uncomment these variables before running the sample.
# project_id= 'YOUR_PROJECT_ID'
# location = 'YOUR_PROJECT_LOCATION' # Format is 'us' or 'eu'
# processor_id = 'YOUR_PROCESSOR_ID' # Create processor in Cloud Console
# file_path = '/path/to/local/pdf'
def process_document_quality_sample(
project_id: str, location: str, processor_id: str, file_path: str
):
from google.cloud import documentai_v1beta3 as documentai
# You must set the api_endpoint if you use a location other than 'us', e.g.:
opts = {}
if location == "eu":
opts = {"api_endpoint": "eu-documentai.googleapis.com"}
client = documentai.DocumentProcessorServiceClient(client_options=opts)
# The full resource name of the processor, e.g.:
# projects/project-id/locations/location/processor/processor-id
# You must create new processors in the Cloud Console first
name = f"projects/{project_id}/locations/{location}/processors/{processor_id}"
with open(file_path, "rb") as image:
image_content = image.read()
# Read the file into memory
document = {"content": image_content, "mime_type": "application/pdf"}
# Configure the process request
request = {"name": name, "raw_document": document}
# Recognizes text entities in the PDF document
result = client.process_document(request=request)
print("Document processing complete.\n")
# Read the quality-specific information from the output from the
# Intelligent Document Quality Processor:
# https://cloud.google.com/document-ai/docs/processors-list#processor_doc-quality-processor
# OCR and other data is also present in the quality processor's response.
# Please see the OCR and other samples for how to parse other data in the
# response.
document = result.document
for entity in document.entities:
conf_percent = "{:.1%}".format(entity.confidence)
page_num = ""
try:
page_num = str(int(entity.page_anchor.page_refs.page) + 1)
except AttributeError:
page_num = "1"
print(f"Page {page_num} has a quality score of {conf_percent}:")
for prop in entity.properties:
conf_percent = "{:.1%}".format(prop.confidence)
print(f" * {prop.type_} score of {conf_percent}")
# [END documentai_process_quality_document]
|
# Generated by Django 3.1.6 on 2021-03-31 12:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20210330_1640'),
]
operations = [
migrations.AlterField(
model_name='basket',
name='customer',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customer', to='core.customer'),
),
migrations.AlterField(
model_name='basketitem',
name='basket',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='basket', to='core.basket'),
),
migrations.AlterField(
model_name='basketitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product', to='core.product'),
),
]
|
# coding: utf-8
import celery
@celery.shared_task()
def sleep(message, seconds=1):
import time
time.sleep(seconds)
print(f'bar: {message}')
return seconds
|
#!/usr/bin/env python3
import os
import glob
import subprocess
from utility.general_repo_tools import get_repo_root
if __name__ == '__main__':
root = get_repo_root()
format_directories = [os.path.join(root, d) for d in ['apps', 'apps_test', 'core_csiro']]
ignored_files = ["*gatt_efr32*", "*gatt_nrf52*", "*rpc_server*", "*rpc_client*"]
for d in format_directories:
c_finder_args = ["find", d, "-iname", "*.c"]
h_finder_args = ["find", d, "-iname", "*.h"]
for f in ignored_files:
c_finder_args += ["-not", "(", "-name", f, ")"]
h_finder_args += ["-not", "(", "-name", f, ")"]
formatter_args = ["xargs", "clang-format", "-i", "-style=file"]
print("Formatting source files in {:s}...".format(d))
c_finder = subprocess.Popen(c_finder_args, stdout=subprocess.PIPE)
result = subprocess.check_output(formatter_args, stdin=c_finder.stdout)
print("Formatting header files in {:s}...".format(d))
h_finder = subprocess.Popen(h_finder_args, stdout=subprocess.PIPE)
result = subprocess.check_output(formatter_args, stdin=h_finder.stdout)
print("Formatting complete")
|
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
# write your code in Python 3.6
if len(A) < 3:
return 0
A = sorted(A)
product_A = A[0] * A[1] * A[-1]
product_B = A[-1] * A[-2] * A[-3]
max_product = max(product_A, product_B)
return max_product
|
from django.contrib.auth import get_user_model
from django.contrib import messages
from django.core.mail import send_mail
from django.conf import settings
from django.db.models import Q
from django.db.models.functions import Concat
from django.db.models import Value
from .serializers import (
UserDetailSerializer,
UserRUDSerializer,
)
from rest_framework.generics import (
CreateAPIView,
RetrieveUpdateDestroyAPIView,
ListAPIView,
)
from rest_framework.views import APIView
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from ..models import Profile
from rest_framework.status import (
HTTP_200_OK,
HTTP_400_BAD_REQUEST,
HTTP_201_CREATED
)
from rest_framework.views import APIView
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
User=get_user_model()
from rest_framework.exceptions import NotFound
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from allauth.account.models import EmailConfirmation, EmailConfirmationHMAC
from django.http import HttpResponse, HttpResponseRedirect
from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
from rest_auth.registration.views import SocialLoginView
from rest_auth.registration.views import SocialConnectView
# from posts.api.serializers import PostDetailSerializer
# from posts.api.pagination import StandardResultPagination
# from posts.models import Post
from django.utils import timezone
from django.contrib.sessions.models import Session
import datetime
class GoogleLogin(SocialConnectView):
adapter_class = GoogleOAuth2Adapter
class ConfirmEmailView(APIView):
permission_classes = [AllowAny]
def get(self, *args, **kwargs):
self.object = confirmation = self.get_object()
try:
confirmation.confirm(self.request)
return Response({"details":"E-mail ID registered successfully!"})
except:
# A React Router Route will handle the failure scenario
return Response({"details":"Failed to register E-mail ID. Invalid Link!"})
def get_object(self, queryset=None):
key = self.kwargs['key']
email_confirmation = EmailConfirmationHMAC.from_key(key)
if not email_confirmation:
if queryset is None:
queryset = self.get_queryset()
try:
email_confirmation = queryset.get(key=key.lower())
except EmailConfirmation.DoesNotExist:
# A React Router Route will handle the failure scenario
return Response({"details":"Failed to register E-mail ID. An error occured!"})
return email_confirmation
def get_queryset(self):
qs = EmailConfirmation.objects.all_valid()
qs = qs.select_related("email_address__user")
return qs
class DeleteAllUnexpiredSessionsForUser(APIView):
def get(self, request):
try:
unexpired_sessions = Session.objects.filter(expire_date__gte=timezone.now())
[
session.delete() for session in unexpired_sessions
if str(request.user.id) == session.get_decoded().get('_auth_user_id')
]
except:
return Response({"detail":"Error!"})
return Response({"detail":"Successfully deleted all existing sessions!"})
class CurrentUserAPIView(APIView):
def get(self, request):
serializer = UserDetailSerializer(request.user,context={'request': request})
newdict={"sessionkey":request.session.session_key}
newdict.update(serializer.data)
return Response(serializer.data)
class UserListAPIView(ListAPIView):
serializer_class=UserDetailSerializer
permission_classes = [AllowAny]
def get_queryset(self):
qs=User.objects.all()
query=self.request.GET.get('s')
if query is not None:
qs=qs.filter(
Q(username__icontains=query)|
Q(first_name__icontains=query)|
Q(last_name__icontains=query)
).distinct()
return qs
class UserRUDView(RetrieveUpdateDestroyAPIView):
lookup_field= 'username'
serializer_class=UserRUDSerializer
queryset=User
def get(self, request, username, *args, **kwargs):
if(username!=request.user.username):
return Response({"detail": "Not found."}, status=400)
else:
serializer = UserRUDSerializer(request.user,context={'request': request})
return Response(serializer.data)
def update(self, request, username, *args, **kwargs):
if(username!=request.user.username):
return Response({"detail": "Not found."}, status=400)
else:
serializer = UserRUDSerializer(request.user,context={'request': request})
return Response(serializer.data)
def destroy(self, request, username, *args, **kwargs):
if(username!=request.user.username):
return Response({"detail": "Not found."}, status=400)
else:
serializer = UserRUDSerializer(request.user,context={'request': request})
return Response(serializer.data)
# def get_queryset(self,*args, **kwargs):
# print(*args, **kwargs)
# return User.objects.all()
class FollowUnfollowAPIView(APIView):
serializer_class = UserDetailSerializer
permission_classes = [IsAuthenticated]
lookup_field = 'username'
queryset = User.objects.all()
def get(self, request, slug, format=None):
message = "ERROR"
toggle_user = get_object_or_404(User, username__iexact=slug)
if request.user.is_authenticated:
# print("Hey", request.user, toggle_user)
is_following = Profile.objects.toggle_follow(request.user, toggle_user)
user_qs = get_object_or_404(User, username=toggle_user)
serializer = UserDetailSerializer(user_qs,context={'request': request})
serializer2 = UserDetailSerializer(request.user,context={'request': request})
new_serializer_data = dict(serializer.data)
new_serializer_data2 = dict(serializer2.data)
new_serializer_data.update({'following': is_following})
new_serializer_data.update({'count': request.user.profile.following.all().count()})
new_serializer_data.update({'count2': toggle_user.followed_by.all().count()})
new_serializer_data.update({'logged': new_serializer_data2})
return Response(new_serializer_data)
return Response({"message": message}, status=400)
class FollowRemoveAPIView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, slug, format=None):
message = "ERROR"
toggle_user = get_object_or_404(User, username__iexact=slug)
if request.user.is_authenticated:
# print("Hey", request.user, toggle_user)
is_following = Profile.objects.toggle_remove_follow(request.user, toggle_user)
user_qs = get_object_or_404(User, username=toggle_user)
serializer = UserDetailSerializer(user_qs,context={'request': request})
new_serializer_data = dict(serializer.data)
new_serializer_data.update({'following': is_following})
new_serializer_data.update({'count': request.user.followed_by.all().count()})
return Response(new_serializer_data)
return Response({"message": message}, status=400)
# class UserPostListAPIView(ListAPIView):
# serializer_class = PostDetailSerializer
# pagination_class = StandardResultPagination
# def get_queryset(self, *args, **kwargs):
# qsuser = Post.objects.filter(user__username=self.kwargs['slug']).order_by("-updated_on")
# print(self.request.GET)
# search = self.request.GET.get("s", None)
# if search:
# qsfn = qsuser.annotate(full_name=Concat('user__first_name', Value(' '), 'user__last_name'))
# qs=qsfn.filter(
# Q(content__icontains=search) |
# Q(user__username__icontains=search) |
# Q(user__first_name__icontains=search) |
# Q(user__last_name__icontains=search) |
# Q(full_name__icontains=search)
# )
# return qs
# else:
# return qsuser
#Useless
# class UserCreateAPIView(CreateAPIView):
# serializer_class = UserCreateSerializer
# queryset = User.objects.all()
# def post(self,request,*args,**kwargs):
# serializer = UserCreateSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# subject="Thank you for signing up!"
# message="Welcome to local host"
# from_mail=settings.EMAIL_HOST_USER
# to_list=[serializer.data['email'],settings.EMAIL_HOST_USER]
# # send_mail(subject,message,from_mail,to_list,fail_silently=True)
# return Response(serializer.data, status=HTTP_201_CREATED)
# return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
# class UserLoginAPIView(APIView):
# permission_classes=[AllowAny]
# serializer_class = UserLoginSerializer
# def post(self,request,*args,**kwargs):
# serializer = UserLoginSerializer(data=request.data)
# if serializer.is_valid(raise_exception=True):
# return Response(serializer.data,status=HTTP_200_OK)
# return Response(serializer.errors,status=HTTP_400_BAD_REQUEST)
|
# coding=utf-8
# -*- python -*-
#
# This file is part of GDSCTools software
#
# Copyright (c) 2015 - Wellcome Trust Sanger Institute
# All rights reserved
#
# File author(s): Thomas Cokelaer <cokelaer@gmail.com>
#
# Distributed under the BSD 3-Clause License.
# See accompanying file LICENSE.txt distributed with this software
#
# website: http://github.com/CancerRxGene/gdsctools
#
##############################################################################
"""Code related to the ANOVA analysis to find associations between drug IC50s
and genomic features"""
from statsmodels.stats import multitest
import easydev
import numpy as np
from gdsctools.qvalue import QValue
__all__ = ['MultipleTesting', 'cohens', "signed_effects"]
def multiple_correction(pvalues, method='fdr'):
mt = MultipleTesting(method=method)
values = mt.get_corrected_pvalues(pvalues, method=None)
return values
class MultipleTesting(object):
"""This class eases the computation of multiple testing corrections
The method implemented so far are based on statsmodels or a local
implementation of **qvalue** method.
================ =============================================
method name Description
================ =============================================
bonferroni one-step correction
sidak one-step correction
holm-sidak step down method using Sidak adjustments
holm step down method using Bonferroni adjustments
simes-hochberg step up method (independent)
hommel close method based on Simes tests (non
negative)
fdr_bh FDR Benjamini-Hochberg (non-negative)
fdr_by FDR Benjamini-Yekutieli (negative)
fdr_tsbky FDR 2-stage Benjamini-Krieger-Yekutieli
non negative
frd_tsbh FDR 2-stage Benjamini-Hochberg'
non-negative
fdr same as fdr_bh
qvalue see :class:`~gdsctools.qvalue.QValue` class
================ =============================================
.. seealso:: :mod:`gdsctools.qvalue`.
.. seealso:: http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2907892/
"""
def __init__(self, method=None):
""".. rubric:: Constructor
:param method: default to **fdr** that is the FDR Benjamini-Hochberg
correction.
"""
#: set of valid methods
self.valid_methods = ['bonferroni', 'sidak', 'fdr_by',
'holm-sidak', 'simes-hochberg', 'hommel', 'fdr_bh',
'fdr_tsbh', 'fdr_tsbky', 'fdr', 'qvalue']
self._method = 'fdr'
if method is not None:
self.method = method
# parameter of the multiple test (e.g. used if method is bonferroni
self.alpha = 0.1
def _get_method(self):
return self._method
def _set_method(self, method):
easydev.check_param_in_list(method, self.valid_methods)
if method == 'fdr':
method = 'fdr_bh'
self._method = method
method = property(_get_method, _set_method, doc="get/set method")
def get_corrected_pvalues(self, pvalues, method=None):
"""Return corrected pvalues
:param list pvalues: list or array of pvalues to correct.
:param method: use the one defined in the constructor by default
but can be overwritten here
"""
if method is not None:
self.method = method
pvalues = np.array(pvalues)
if self.method == 'qvalue':
qv = QValue(pvalues)
corrections = qv.qvalue()
return corrections
else:
corrections = multitest.multipletests(pvalues,
alpha=self.alpha, method=self.method)[1]
return corrections
def plot_comparison(self, pvalues, methods=None):
"""Simple plot to compare the pvalues correction methods
.. plot::
:include-source:
:width: 80%
from gdsctools.stats import MultipleTesting
mt = MultipleTesting()
pvalues = [1e-10, 9.5e-2, 2.2e-1, 3.6e-1, 5e-1, 6e-1,8e-1,9.6e-1]
mt.plot_comparison(pvalues,
methods=['fdr_bh', 'qvalue', 'bonferroni', 'fdr_tsbh'])
.. note:: in that example, the qvalue and FDR are identical, but
this is not true in general.
"""
if methods is None:
methods = self.valid_methods
import pylab
pylab.clf()
for method in methods:
pv = self.get_corrected_pvalues(pvalues, method=method)
pylab.plot(pvalues, pv, 'o-', label=method.replace("_","\_"))
pylab.legend(loc='best')
pylab.ylabel('corrected pvalues')
pylab.grid()
pylab.ylim([0, 1.05])
def cohens(x, y):
r"""Effect size metric through Cohen's *d* metric
:param x: first vector
:param y: second vector
:return: absolute effect size value
The Cohen's effect size *d* is defined as the difference
between two means divided by a standard deviation of the data.
.. math::
d = \frac{\bar{x}_1 - \bar{x}_2}{s}
For two independent samples, the *pooled standard deviation* is used
instead, which is defined as:
.. math::
s = \sqrt{ \frac{(n_1-1)s_1^2 + (n_2-1)s_2^2}{n_1+n_2-2} }
A Cohen's *d* is frequently used in estimating sample sizes for
statistical testing: a lower *d* value indicates the necessity of
larger sample sizes, and vice versa.
.. note:: we return the absolute value
:references: https://en.wikipedia.org/wiki/Effect_size
"""
x = np.array(x)
y = np.array(y)
Nx = len(x) - 1. # note the dot to cast to float
Ny = len(y) - 1.
# mean difference:
md = np.abs(x.mean() - y.mean())
# here, we want same as in R that is unbiased variance
# so we use ddof = 1
xv = x.var(ddof=1)
yv = y.var(ddof=1)
csd = Nx * xv + Ny * yv
csd /= Nx + Ny # make sure this is float
csd = np.sqrt(csd)
return md / csd
def glass(x, y):
r"""Return Effect size through Glass :math:`\Delta` estimator
:param x: first sample
:param y: second sample
:return: 2 values (one or each sample)
The Glass effect size is computed as
.. math::
\Delta = \frac{\bar{x}_1-\bar{x}_2}{\sigma_i}
.. note:: the standard deviation is the unbiased one (divided by N-1)
where :math:`\sigma` is the standard deviation of either group
"""
x = np.array(x)
y = np.array(y)
# mean difference:
md = np.abs(x.mean() - y.mean())
# here, we want same as in R that is unbiased variance
# so we use ddof = 1
g1 = md / x.std(ddof=1)
g2 = md / y.std(ddof=1)
return g1, g2
def signed_effects(df):
import numpy as np
_colname_deltas = 'FEATURE_delta_MEAN_IC50'
_colname_effect_size = 'FEATURE_IC50_effect_size'
deltas = df[_colname_deltas]
effects = df[_colname_effect_size]
signed_effects = list(np.sign(deltas) * effects)
return signed_effects
|
# See
# import this into lldb with a command like
# command script import pmat.py
import lldb
import shlex
import optparse
def pmat(debugger, command, result, dict):
# Use the Shell Lexer to properly parse up command options just like a
# shell would
command_args = shlex.split(command)
parser = create_pmat_options()
try:
(options, args) = parser.parse_args(command_args)
except:
return
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
frame = process.GetSelectedThread().GetSelectedFrame()
if frame:
var = frame.FindVariable(args[0])
if var:
array = var.GetChildMemberWithName("matA")
if array:
id = array.GetValueAsUnsigned (lldb.LLDB_INVALID_ADDRESS)
if id != lldb.LLDB_INVALID_ADDRESS:
debugger.HandleCommand ('po [0x%x dump]' % id)
def create_pmat_options():
usage = "usage: %prog"
description='''Print a dump of a vMAT_Array instance.'''
parser = optparse.OptionParser(description=description, prog='pmat',usage=usage)
return parser
#
# code that runs when this script is imported into LLDB
#
def __lldb_init_module (debugger, dict):
# This initializer is being run from LLDB in the embedded command interpreter
# Make the options so we can generate the help text for the new LLDB
# command line command prior to registering it with LLDB below
# add pmat
parser = create_pmat_options()
pmat.__doc__ = parser.format_help()
# Add any commands contained in this module to LLDB
debugger.HandleCommand('command script add -f %s.pmat pmat' % __name__)
|
# -*- coding: utf-8 -*-
from .pyver import PY2
__all__ = (
'BufferIO',
'StringIO',
)
if PY2:
from io import BytesIO, StringIO
class BufferIO(BytesIO):
pass
else:
from io import StringIO
class BufferIO(StringIO):
pass
|
from django.apps import AppConfig
class DocMgmtConfig(AppConfig):
name = 'doc_mgmt'
|
from flask import Flask
from flask_cors import CORS
import config as c
app = Flask(__name__)
CORS(app)
@app.route("/")
def helloWorld():
return "Hello, cross-origin-world!"
|
#!/usr/bin/env python
# Constrainted EM algorithm for Shared Response Model
# A Reduced-Dimension fMRI Shared Response Model
# Po-Hsuan Chen, Janice Chen, Yaara Yeshurun-Dishon, Uri Hasson, James Haxby, Peter Ramadge
# Advances in Neural Information Processing Systems (NIPS), 2015. (to appear)
# movie_data is a three dimensional matrix of size voxel x TR x nsubjs
# movie_data[:,:,m] is the data for subject m, which will be X_m^T in the standard
# mathematic notation
# E-step:
# E_s : nvoxel x nTR
# E_sst : nvoxel x nvoxel x nTR
# M-step:
# W_m : nvoxel x nvoxel x nsubjs
# sigma_m2 : nsubjs
# Sig_s : nvoxel x nvoxel
import numpy as np, scipy, random, sys, math, os
from scipy import stats
def align(movie_data, options, args):
print 'SRM',
sys.stdout.flush()
nsubjs = len(movie_data)
for m in range(nsubjs):
assert movie_data[0].shape[1] == movie_data[m].shape[1], 'numbers of TRs are different among subjects'
nTR = movie_data[0].shape[1]
nfeature = args.nfeature
align_algo = args.align_algo
current_file = options['working_path']+align_algo+'_current.npz'
# zscore the data
print 'zscoring data'
nvoxel = np.zeros((nsubjs,),dtype=int)
for m in xrange(nsubjs):
nvoxel[m] = movie_data[m].shape[0]
bX = np.zeros((sum(nvoxel),nTR))
voxel_str = 0
for m in range(nsubjs):
bX[voxel_str:(voxel_str+nvoxel[m]),:] = stats.zscore(movie_data[m].T ,axis=0, ddof=1).T
voxel_str = voxel_str + nvoxel[m]
del movie_data
# initialization when first time run the algorithm
if not os.path.exists(current_file):
print 'initialization of parameters'
bSig_s = np.identity(nfeature)
bW = np.zeros((sum(nvoxel),nfeature))
sigma2 = np.zeros(nsubjs)
ES = np.zeros((nfeature,nTR))
bmu = []
for m in xrange(nsubjs):
bmu.append(np.zeros((nvoxel[m],)))
#initialization
voxel_str = 0
if args.randseed is not None:
print 'randinit',
np.random.seed(args.randseed)
for m in xrange(nsubjs):
print m,
A = np.random.random((nvoxel[m],nfeature))
Q, R_qr = np.linalg.qr(A)
bW[voxel_str:(voxel_str+nvoxel[m]),:] = Q
sigma2[m] = 1
bmu[m] = np.mean(bX[voxel_str:(voxel_str+nvoxel[m]),:],1)
voxel_str = voxel_str + nvoxel[m]
else:
for m in xrange(nsubjs):
print m,
Q = np.eye(nvoxel[m],nfeature)
bW[voxel_str:(voxel_str+nvoxel[m]),:] = Q
sigma2[m] = 1
bmu[m] = np.mean(bX[voxel_str:(voxel_str+nvoxel[m]),:],1)
voxel_str = voxel_str + nvoxel[m]
niter = 0
np.savez_compressed(options['working_path']+align_algo+'_'+str(niter)+'.npz',\
bSig_s = bSig_s, bW = bW, bmu=bmu, sigma2=sigma2, ES=ES, nvoxel=nvoxel, niter=niter)
print ''
else:
# more iterations starts from previous results
workspace = np.load(current_file)
niter = workspace['niter']
workspace = np.load(options['working_path']+align_algo+'_'+str(niter)+'.npz')
bSig_s = workspace['bSig_s']
bW = workspace['bW']
bmu = workspace['bmu']
sigma2 = workspace['sigma2']
ES = workspace['ES']
niter = workspace['niter']
nvoxel = workspace['nvoxel']
# remove mean
bX = bX - bX.mean(axis=1)[:,np.newaxis]
print str(niter+1)+'th',
bSig_x = bW.dot(bSig_s).dot(bW.T)
voxel_str = 0
for m in range(nsubjs):
bSig_x[voxel_str:(voxel_str+nvoxel[m]),voxel_str:(voxel_str+nvoxel[m])] += sigma2[m]*np.identity(nvoxel[m])
voxel_str = voxel_str + nvoxel[m]
inv_bSig_x = scipy.linalg.inv(bSig_x)
ES = bSig_s.T.dot(bW.T).dot(inv_bSig_x).dot(bX)
bSig_s = bSig_s - bSig_s.T.dot(bW.T).dot(inv_bSig_x).dot(bW).dot(bSig_s) + ES.dot(ES.T)/float(nTR)
voxel_str = 0
for m in range(nsubjs):
print ('.'),
sys.stdout.flush()
Am = bX[voxel_str:(voxel_str+nvoxel[m]),:].dot(ES.T)
pert = np.zeros((Am.shape))
np.fill_diagonal(pert,1)
Um, sm, Vm = np.linalg.svd(Am+0.001*pert,full_matrices=0)
bW[voxel_str:(voxel_str+nvoxel[m]),:] = Um.dot(Vm)
sigma2[m] = np.trace(bX[voxel_str:(voxel_str+nvoxel[m]),:].T.dot(bX[voxel_str:(voxel_str+nvoxel[m]),:]))\
-2*np.trace(bX[voxel_str:(voxel_str+nvoxel[m]),:].T.dot(bW[voxel_str:(voxel_str+nvoxel[m]),:]).dot(ES))\
+nTR*np.trace(bSig_s)
sigma2[m] = sigma2[m]/float(nTR*nvoxel[m])
voxel_str = voxel_str + nvoxel[m]
new_niter = niter + 1
np.savez_compressed(current_file, niter = new_niter)
np.savez_compressed(options['working_path']+align_algo+'_'+str(new_niter)+'.npz',\
bSig_s = bSig_s, bW = bW, bmu=bmu, sigma2=sigma2, ES=ES, nvoxel=nvoxel, niter=new_niter)
os.remove(options['working_path']+align_algo+'_'+str(new_niter-1)+'.npz')
# calculate log likelihood
sign , logdet = np.linalg.slogdet(bSig_x)
if sign == -1:
print str(new_niter)+'th iteration, log sign negative'
loglike = - 0.5*nTR*logdet - 0.5*np.trace(bX.T.dot(inv_bSig_x).dot(bX)) #-0.5*nTR*nvoxel*nsubjs*math.log(2*math.pi)
np.savez_compressed(options['working_path']+align_algo+'_'+'loglikelihood_'+str(new_niter)+'.npz',\
loglike=loglike)
# print str(-0.5*nTR*logdet)+','+str(-0.5*np.trace(bX.T.dot(inv_bSig_x).dot(bX)))
print str(loglike)
return new_niter
|
# -*- coding: utf-8 -*-
from datetime import datetime
from collections import OrderedDict
from django.shortcuts import render, redirect
from django.http import Http404, HttpResponse
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import viewsets, generics
from dashboard.libs.date_tools import parse_date
from dashboard.libs import swagger_tools
from .models import Product, Area, ProductGroup, Person, Department, Skill
from .tasks import sync_float
from .serializers import (
PersonSerializer, PersonProductSerializer, DepartmentSerializer,
SkillSerializer)
from . import spreadsheets
def _product_meta(request, product):
meta = {
'can_edit': product.can_user_change(request.user),
'admin_url': request.build_absolute_uri(product.admin_url)
}
return meta
def product_html(request, id):
if not id:
id = Product.objects.visible().first().id
return redirect(reverse(product_html, kwargs={'id': id}))
try:
Product.objects.visible().get(id=id)
except (ValueError, Product.DoesNotExist):
raise Http404
return render(request, 'common.html')
@api_view(['GET'])
def product_json(request, id):
"""
detail view of a single product
"""
request_data = request.GET
try:
product = Product.objects.visible().get(id=id)
except (ValueError, Product.DoesNotExist):
error = 'cannot find product with id={}'.format(id)
return Response({'error': error}, status=404)
start_date = request_data.get('startDate')
if start_date:
start_date = parse_date(start_date)
end_date = request_data.get('endDate')
if end_date:
end_date = parse_date(end_date)
# get the profile of the product for each month
profile = product.profile(
start_date=start_date,
end_date=end_date,
freq='MS',
calculation_start_date=settings.PEOPLE_COST_CALCATION_STARTING_POINT
)
meta = _product_meta(request, product)
return Response({**profile, 'meta': meta})
def product_group_html(request, id):
if not id:
id = ProductGroup.objects.first().id
return redirect(reverse(product_group_html, kwargs={'id': id}))
try:
ProductGroup.objects.get(id=id)
except (ValueError, ProductGroup.DoesNotExist):
raise Http404
return render(request, 'common.html')
@api_view(['GET'])
def product_group_json(request, id):
"""
detail view of a single product group
"""
# TODO handle errors
try:
product_group = ProductGroup.objects.get(id=id)
except (ValueError, ProductGroup.DoesNotExist):
error = 'cannot find product group with id={}'.format(id)
return Response({'error': error}, status=404)
# get the profile of the product group for each month
profile = product_group.profile(
freq='MS',
calculation_start_date=settings.PEOPLE_COST_CALCATION_STARTING_POINT)
meta = _product_meta(request, product_group)
return Response({**profile, 'meta': meta})
class PersonViewSet(viewsets.ReadOnlyModelViewSet):
"""
View set for person
retrieve:
Detail view of a single person
list:
List view of persons
"""
queryset = Person.objects.all()
serializer_class = PersonSerializer
@swagger_tools.additional_schema(
OrderedDict([
('start_date', {
'name': 'start_date',
'required': False,
'location': 'query',
'type': 'string',
'description': 'start date',
}),
('end_date', {
'name': 'end_date',
'required': False,
'location': 'query',
'type': 'string',
'description': 'end date',
}),
])
)
class PersonProductListView(generics.ListAPIView):
"""
List view of products the person(id={person_id}) spends time on
in the time window defined by start date and end date.
"""
serializer_class = PersonProductSerializer
def get_queryset(self):
person = Person.objects.get(id=self.kwargs.get('person_id'))
return person.products
def get_serializer_context(self):
context = super().get_serializer_context()
start_date = self.request.query_params.get('start_date')
if start_date:
start_date = parse_date(start_date)
end_date = self.request.query_params.get('end_date')
if end_date:
end_date = parse_date(end_date)
return {
'start_date': start_date,
'end_date': end_date,
'person': Person.objects.get(id=self.kwargs.get('person_id')),
**context
}
def service_html(request, id):
if not id:
id = Area.objects.filter(visible=True).first().id
return redirect(reverse(service_html, kwargs={'id': id}))
try:
Area.objects.filter(visible=True).get(id=id)
except (ValueError, Area.DoesNotExist):
raise Http404
return render(request, 'common.html')
@api_view(['GET'])
def service_json(request, id):
"""
detail view of a single service area
"""
try:
area = Area.objects.filter(visible=True).get(id=id)
except (ValueError, Area.DoesNotExist):
error = 'cannot find service area with id={}'.format(id)
return Response({'error': error}, status=404)
# get the profile of the service
profile = area.profile(
calculation_start_date=settings.PEOPLE_COST_CALCATION_STARTING_POINT)
return Response(profile)
def portfolio_html(request):
return render(request, 'common.html', {'body_classes': 'portfolio'})
@api_view(['GET'])
def services_json(request):
"""
list view of all service areas
"""
result = [
area.profile(
calculation_start_date=settings.PEOPLE_COST_CALCATION_STARTING_POINT
)
for area in Area.objects.filter(visible=True)
]
return Response(result)
@login_required
@api_view(['POST'])
def sync_from_float(request):
"""
sync data from Float.com
"""
sync_float.delay()
return Response({
'status': 'STARTED'
})
@require_http_methods(['GET'])
def products_spreadsheet(request, **kwargs):
show = kwargs.get('show', 'visible')
if show == 'visible':
products = Product.objects.visible()
elif show == 'all':
products = Product.objects.all()
else:
products = Product.objects.filter(pk=show)
spreadsheet = spreadsheets.Products(
products, settings.PEOPLE_COST_CALCATION_STARTING_POINT
)
response = HttpResponse(content_type="application/vnd.ms-excel")
response['Content-Disposition'] = 'attachment; filename={}_{}_{}.xlsx'.format(
'ProductData', show, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
spreadsheet.workbook.save(response)
return response
class DepartmentViewSet(viewsets.ReadOnlyModelViewSet):
"""
View set for department
retrieve:
Detail view of a single department
list:
List view of departments
"""
queryset = Department.objects.all()
serializer_class = DepartmentSerializer
class SkillViewSet(viewsets.ReadOnlyModelViewSet):
"""
View set for skills
retrieve:
Detail view of a single skill
list:
List view of skills
"""
queryset = Skill.objects.all()
serializer_class = SkillSerializer
|
import subprocess
from pathlib import Path
if __name__ == '__main__':
script_path = Path(Path.cwd(), 'HjerrildTest.py')
constants_path = Path(Path.cwd(),'constants.ini')
workspace_path = Path(Path.cwd())
for guitar in ['martin', 'firebrand']:
for train_mode in ['1Fret', '2FretA', '2FretB', '3Fret']:
cmd_list = ['python', str(script_path), str(constants_path), str(workspace_path), '--guitar', guitar,'--train_mode', train_mode]
proc = subprocess.Popen(cmd_list, stdin=None, stdout=None, stderr=None)
|
import re
import math
from .constants import *
import lightcrs.utm as utm
import lightcrs.mgrs as mgrs
# WGS84 geoid is assumed
# N .. 0
# E .. 90
# S .. 180
# W .. 270
# latitude south..north [-90..90]
# longitude west..east [-180..180]
class LatLon(object):
def __init__(self,
lat : float,
lon : float) -> None:
self.lat = lat
self.lon = lon
self._hash = hash((self.lat, self.lon))
def __hash__(self) -> int:
return self._hash
def __repr__(self) -> str:
return f"LatLon({self.lat}, {self.lon})"
def __str__(self) -> str:
if self.lat < 0:
ns = "S"
lat = abs(self.lat)
else:
ns = "N"
lat = self.lat
if self.lon < 0:
ew = "W"
lon = abs(self.lon)
else:
ew = "E"
lon = self.lon
return f"{lat}{ns}, {lon}{ew}"
def to_UTM(self) -> utm.UTM:
""" Computes Universal Transverse Mercator coordinates
from WGS84 based latitude longitude coordinates (e.g. GPS).
Grid zones are 8 degrees latitude.
N0 degrees is offset 10 into latitude bands.
Args:
latitude (float): [-90 .. 90] degrees
longitude (float): [-180 .. 180] degrees
Returns:
UTM : namedtuple
"""
if not -80.0 <= self.lat <= 84.0:
raise RuntimeError(f"latitude {self.lat} outside UTM limits")
if self.lon == 180:
self.lon = -180.
zone = math.floor((self.lon + 180) / 6) + 1
lon_central_meridian = math.radians((zone - 1)*6 - 180 + 3)
lat_band_idx = int(math.floor((self.lat/8) + 10))
lat_band = mgrs_lat_bands[lat_band_idx]
# special case Norway
if zone == 31 and lat_band == "V" and self.lon >= 3.0:
zone += 1
lon_central_meridian += math.radians(6)
# special case Svalbard
if zone == 32 and lat_band == "X" and self.lon < 9.0:
zone -= 1
lon_central_meridian -= math.radians(6)
if zone == 32 and lat_band == "X" and self.lon >= 9.0:
zone += 1
lon_central_meridian += math.radians(6)
if zone == 34 and lat_band == "X" and self.lon < 21.0:
zone -= 1
lon_central_meridian -= math.radians(6)
if zone == 34 and lat_band == "X" and self.lon >= 21.0:
zone += 1
lon_central_meridian += math.radians(6)
if zone == 36 and lat_band == "X" and self.lon < 33.0:
zone -= 1
lon_central_meridian -= math.radians(6)
if zone == 36 and lat_band == "X" and self.lon >= 33.0:
zone += 1
lon_central_meridian += math.radians(6)
phi = math.radians(self.lat)
lam = math.radians(self.lon) - lon_central_meridian
cos_lam = math.cos(lam)
sin_lam = math.sin(lam)
tan_lam = math.tan(lam)
tau = math.tan(phi)
sigma = math.sinh(eccentricity * math.atanh(eccentricity * tau / math.sqrt(1 + tau**2)))
tau_prime = tau * math.sqrt(1 + sigma**2) - sigma * math.sqrt(1 + tau**2)
xi_prime = math.atan2(tau_prime, cos_lam)
eta_prime = math.asinh(sin_lam / math.sqrt(tau_prime**2 + cos_lam**2))
xi = xi_prime
eta = eta_prime
for j in range(1, 7):
xi += alpha[j] * math.sin(2* j * xi_prime) * math.cosh(2 * j * eta_prime)
eta += alpha[j] * math.cos(2* j * xi_prime) * math.sinh(2 * j * eta_prime)
x = scale * A * eta
y = scale * A * xi
# convergence: Karney 2011 Eq 23, 24
p_prime = 1
q_prime = 0
for j in range(1, 7):
p_prime += 2 * j * alpha[j] * math.cos(2 * j * xi_prime) * math.cosh(2 * j * eta_prime)
q_prime += 2 * j * alpha[j] * math.sin(2 * j * xi_prime) * math.sinh(2 * j * eta_prime)
gamma_prime = math.atan(tau_prime / math.sqrt(1 + tau_prime**2) * tan_lam)
gamma_pprime = math.atan2(q_prime, p_prime)
gamma = gamma_prime + gamma_pprime
# scale: Karney 2011 Eq 25
sin_phi = math.sin(phi)
k_prime = math.sqrt(1 - eccentricity**2 * sin_phi**2) * math.sqrt(1 + tau**2) / \
math.sqrt(tau_prime**2 + cos_lam**2)
k_pprime = (A / semimajor_axis) * math.sqrt(p_prime**2 + q_prime**2)
k = scale * k_prime * k_pprime
# shift origin
x += 500000.0 # false easting
if y < 0:
y += 10000000.0 # false northing
convergence = math.degrees(gamma)
hemisphere = "N" if self.lat >= 0.0 else "S"
# "zone", "band", "hemisphere", "easting", "northing"
return utm.UTM(zone, hemisphere, x, y)
def to_MGRS(self, precision=5) -> mgrs.MGRS:
ucoords = self.to_UTM()
lat_band_idx = int(math.floor((self.lat/8) + 10))
band = mgrs_lat_bands[lat_band_idx]
column = math.floor(ucoords.easting / 100e3)
square_e = easting_100k_letters[(ucoords.zone-1) % 3][column - 1]
row = math.floor(ucoords.northing / 100e3) % 20
square_n = northing_100k_Letters[(ucoords.zone-1) % 2][row]
easting = int(ucoords.easting % 100e3)
northing = int(ucoords.northing % 100e3)
gzd = f"{ucoords.zone:0>2}{band}"
square_id = square_e + square_n
return mgrs.MGRS(gzd, square_id, easting, northing, precision)
|
# Copyright 2018 GCP Lab Keeper authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module gets a list of instances running in a given GCP project and shuts them down.
"""
from pprint import pprint
import re
from googleapiclient import discovery
from lib.lab_keeper.config import Config
from lib.lab_keeper.authenticator import Authenticator
class LabKeeper:
"""This class manages GCP Compute Engine resources."""
def __init__(self):
"""Initialize Lab Keeper parameters."""
self.config = Config()
self.authenticator = Authenticator()
self.credentials = self.authenticator.make_credentials()
self.service = discovery.build(
'compute', 'v1', credentials=self.credentials)
# List of Project IDs for this request.
self.project_list = self.config.project_list
# The list of the zone prefixes for this request.
self.wanted_zone_prefixes = self.config.zone_prefixes
# VM Label and value for shutdown flagging.
self.label_key = self.config.label_key
self.label_value = self.config.label_value
def _request_instances(self, project, target_zones, label_key, label_value):
"""Make an API call to return all existing instances for a zone in a project."""
instance_list = []
pprint(f"Looking for instances in project {project}.")
pprint(f"Looking for instances with label '{label_key}'='{label_value}'")
for zone in target_zones:
instance_request = self.service.instances().list(
project=project, zone=zone, filter=f"labels.{label_key} = {label_value}")
while instance_request is not None:
response = instance_request.execute()
if 'items' in response:
for instance in response['items']:
instance_list.append(instance)
instance_request = self.service.instances().list_next(
previous_request=instance_request, previous_response=response)
pprint(
f"{len(response['items'])} instances found in {zone}.")
else:
pprint(f"No instances found in zone {zone}. Continuing...")
break
pprint(f"{len(instance_list)} instances found in project {project}.")
return instance_list
def _request_zones(self, project):
"""Make an API call to return all available zones for a project."""
zone_request = self.service.zones().list(project=project)
while zone_request is not None:
response = zone_request.execute()
zone_list = []
for zone in response['items']:
# Puts all zone names inside a list.
zone_list.append(zone.get('name', None))
zone_request = self.service.zones().list_next(previous_request=zone_request,
previous_response=response)
return zone_list
def _filter_zones(self, wanted_zones_prefix, zone_list):
"""Filter unwanted zones from a zone list using a prefix."""
target_zones = []
for zone in zone_list:
for prefix in wanted_zones_prefix:
pattern = re.compile(f'^{prefix}')
if pattern.match(zone):
target_zones.append(zone)
return target_zones
def _get_instances_bystatus(self, instance_list):
"""Take an instance object list and return a dictionary with
statuses as keys and a list of instances as its values."""
instances_bystatus = {}
pprint(f"Checking status for {len(instance_list)} instances.")
# Create a list of returned statuses
status_list = []
for instance in instance_list:
if instance.get("status", None) not in status_list:
status_list.append(instance.get("status", None))
# Create a dictionary containing instances and zone by status
for key in status_list:
value = []
for instance in instance_list:
if instance.get("status", None) == key:
instance_data = {}
instance_name = instance.get("name", None)
zone_name = instance.get("zone", None).split('/')[-1]
instance_data["name"] = instance_name
instance_data["zone"] = zone_name
value.append(instance_data)
instances_bystatus[key] = value
# Print formatted contents of dictionary
for status in instances_bystatus:
pprint("######################")
pprint(
f"{status} instances:")
pprint("######################")
for instance in instances_bystatus[status]:
pprint(f"{instance['name']} in {instance['zone']}")
return instances_bystatus
def stop_running_instances(self, instance_status_list, project):
"""Stop compute engine instances in RUNNING state."""
# TODO: Check if instances in PROVISIONING, STAGING and REPAIRING
# states can and need to be stopped.
if "RUNNING" in instance_status_list.keys():
for running_instance in instance_status_list["RUNNING"]:
instance = running_instance.get("name", None)
zone = running_instance.get("zone", None)
request = self.service.instances().stop(
project=project, zone=zone, instance=instance)
pprint(f"Stopping instance {instance} in zone {zone}...")
response = request.execute()
else:
pprint(f"There are no running instances in project {project}.")
return
def main(self):
"""Main"""
# Get a list of zone names
zone_list = self._request_zones(self.project_list[0])
# Get only wanted zones
target_zones = self._filter_zones(self.wanted_zone_prefixes, zone_list)
# Get a list of instances for each target by project
instances_list_projects = {}
for project in self.project_list:
instances = self._request_instances(
project, target_zones, self.label_key, self.label_value)
instances_list_projects[project] = instances
# Create a list of instances by status by project
instance_status_list_projects = {}
for project in instances_list_projects:
instance_status_list = self._get_instances_bystatus(instances_list_projects[project])
instance_status_list_projects[project] = instance_status_list
# Stop running VMs
for project in instance_status_list_projects:
self.stop_running_instances(instance_status_list_projects[project], project)
return
if __name__ == '__main__':
# Create a Lab Keeper instance, and run it.
lk = LabKeeper()
lk.main()
|
from flask_babel import _
from flask_wtf.form import FlaskForm
from flask_wtf.recaptcha import RecaptchaField
from wtforms.fields.core import StringField
from wtforms.fields.html5 import EmailField
from wtforms.fields.simple import SubmitField, TextAreaField
from wtforms.validators import DataRequired, Email, Length
class SupportForm(FlaskForm):
"""Support form."""
name = StringField(label=_('Name'), validators=[Length(max=35), DataRequired()])
email = EmailField(label=_('Email Address'), validators=[Length(min=6, max=120), Email()])
message = TextAreaField(label=_('Message'), validators=[Length(max=1000), DataRequired()])
recaptcha = RecaptchaField()
submit = SubmitField(label=_('Send'))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import numpy as np
from blocks import simple_block, Down_sample, Up_sample
from torchsummary import summary
# simplest U-Net
class init_U_Net(nn.Module):
def __init__(
self,
input_modalites,
output_channels,
base_channel,
pad_method='pad',
softmax=True,
):
super(init_U_Net, self).__init__()
self.softmax = softmax
self.pad_method = pad_method
self.min_channel = base_channel
self.down_conv1 = simple_block(input_modalites, self.min_channel*2, 3)
self.down_sample_1 = Down_sample(2)
self.down_conv2 = simple_block(self.min_channel*2, self.min_channel*4, 3)
self.down_sample_2 = Down_sample(2)
self.down_conv3 = simple_block(self.min_channel*4, self.min_channel*8, 3)
self.down_sample_3 = Down_sample(2)
self.bridge = simple_block(self.min_channel*8, self.min_channel*16, 3)
self.up_sample_1 = Up_sample(self.min_channel*16, self.min_channel*16, 2)
self.up_conv1 = simple_block(self.min_channel*24, self.min_channel*8, 3, is_down=False)
self.up_sample_2 = Up_sample(self.min_channel*8, self.min_channel*8, 2)
self.up_conv2 = simple_block(self.min_channel*12, self.min_channel*4, 3, is_down=False)
self.up_sample_3 = Up_sample(self.min_channel*4, self.min_channel*4, 2)
self.up_conv3 = simple_block(self.min_channel*6, self.min_channel*2, 3, is_down=False)
self.out = nn.Conv3d(self.min_channel*2, output_channels, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, nn.InstanceNorm3d) or isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# encoder path
self.block_1 = self.down_conv1(x)
self.block_1_pool = self.down_sample_1(self.block_1)
self.block_2 = self.down_conv2(self.block_1_pool)
self.block_2_pool = self.down_sample_2(self.block_2)
self.block_3 = self.down_conv3(self.block_2_pool)
self.block_3_pool = self.down_sample_3(self.block_3)
# bridge
self.block_4 = self.bridge(self.block_3_pool)
# decoder path
self.block_5_upsample = self.up_sample_1(self.block_4)
self.block_5_upsample = self.pad(self.block_3, self.block_5_upsample, self.pad_method)
self.concat_1 = torch.cat([self.block_5_upsample, self.block_3], dim=1)
self.block_5 = self.up_conv1(self.concat_1)
self.block_6_upsample = self.up_sample_2(self.block_5)
self.block_6_upsample = self.pad(self.block_2, self.block_6_upsample, self.pad_method)
self.concat_2 = torch.cat([self.block_6_upsample, self.block_2], dim=1)
self.block_6 = self.up_conv2(self.concat_2)
self.block_7_upsample = self.up_sample_3(self.block_6)
self.block_7_upsample = self.pad(self.block_1, self.block_7_upsample, self.pad_method)
self.concat_3 = torch.cat([self.block_7_upsample, self.block_1], dim=1)
self.block_7 = self.up_conv3(self.concat_3)
res = self.out(self.block_7)
if self.softmax:
res = F.softmax(res, dim=1)
return res
def pad(self, encoder, decoder, method='pad'):
encoder_z, encoder_y, encoder_x = encoder.shape[-3], encoder.shape[-2], encoder.shape[-1]
decoder_z, decoder_y, decoder_x = decoder.shape[-3], decoder.shape[-2], decoder.shape[-1]
diff_z, diff_y, diff_x = encoder_z - decoder_z, encoder_y - decoder_y, encoder_x - decoder_x
if method == 'pad':
x = F.pad(decoder, (diff_x//2, diff_x - diff_x//2,
diff_y//2, diff_y - diff_y//2,
diff_z//2, diff_z - diff_z//2),
mode='constant', value=0)
elif method == 'interpolate':
x = F.interpolate(decoder, size=(encoder_z, encoder_y, encoder_x), mode='nearest')
else:
raise NotImplementedError()
return x
if __name__ == '__main__':
from utils import load_config
config_file = 'config.yaml'
config = load_config(config_file)
input_modalites = int(config['PARAMETERS']['input_modalites'])
output_channels = int(config['PARAMETERS']['output_channels'])
base_channel = 4
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
net = init_U_Net(input_modalites, output_channels, base_channel)
net.to(device)
import tensorwatch as tw
from tensorboardX import SummaryWriter
# print(net)
# params = list(net.parameters())
# for i in range(len(params)):
# layer_shape = params[i].size()
# print(len(layer_shape))
# print parameters infomation
# count_params(net)
input = torch.randn(1, 4, 64, 64, 64).to(device)
# tw.draw_model(net, input)
# input = torch.randn(1, 4, 130, 130, 130).to(device)
# print(y.shape)
# summary(net, input_size=(4, 64, 64, 64))
# print(net)
# print(net._modules.keys())
# net.out = nn.Conv3d(16, 8, 3, padding=1)
# net.to(device)
# y = net(input)
# print(y.data.shape)
def count_params(model):
''' print number of trainable parameters and its size of the model'''
num_of_param = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Model {} : params number {}, params size: {:4f}M'.format(model._get_name(), num_of_param, num_of_param*4/1000/1000))
count_params(model=net)
|
import pandas as pd
import requests
from io import StringIO
from os import path
import os
from csv import writer as csv_writer
import hydrostats.data as hd
import hydrostats.visual as hv
import hydrostats as hs
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
stations_pd = pd.read_csv('/Users/student/Dropbox/PhD/2021_Fall/Dissertation_v12/Middle_East/Israel/Israel_Selected_Stations.csv')
IDs = stations_pd['statid'].tolist()
COMIDs = stations_pd['COMID'].tolist()
Names = stations_pd['Name'].tolist()
obsFiles = []
simFiles = []
#COD = []
for id, name, comid in zip(IDs, Names, COMIDs):
obsFiles.append('/Users/student/Dropbox/PhD/2021_Fall/Dissertation_v12/Middle_East/Israel/Historical/Observed_Data/{}.csv'.format(id))
#simFiles.append('/Users/student/Dropbox/PhD/2021_Fall/Dissertation_v12/Middle_East/Israel/Historical/Simulated_Data/{}.csv'.format(comid))
simFiles.append('/Users/student/Dropbox/PhD/2021_Fall/Dissertation_v12/Middle_East/Israel/Historical/Corrected_Data/{}.csv'.format(comid))
#User Input
country = 'Israel'
#output_dir = '/Users/student/Dropbox/PhD/2021_Fall/Dissertation_v12/Middle_East/Israel/Historical/validationResults_Original/'
output_dir = '/Users/student/Dropbox/PhD/2021_Fall/Dissertation_v12/Middle_East/Israel/Historical/validationResults_Corrected/'
'''Initializing Variables to Append to'''
#Creating blank dataframe for Tables
all_station_table = pd.DataFrame()
station_array = []
comid_array = []
all_lag_table = pd.DataFrame()
#Creating an empty list for volumes
volume_list = []
#Creating a table template for the lag time table
#lag_table = 'Station, COMID, Metric, Max, Max Lag Number, Min, Min LagNumber\n'
#Making directories for all the Desired Plots
table_out_dir = path.join(output_dir, 'Tables')
if not path.isdir(table_out_dir):
os.makedirs(table_out_dir)
'''
plot_obs_hyd_dir = path.join(output_dir, 'Observed_Hydrographs')
if not path.isdir(plot_obs_hyd_dir):
os.makedirs(plot_obs_hyd_dir)
plot_sim_hyd_dir = path.join(output_dir, 'Simulated_Hydrographs')
if not path.isdir(plot_sim_hyd_dir):
os.makedirs(plot_sim_hyd_dir)
plot_out_dir = path.join(output_dir, 'Hydrographs')
if not path.isdir(plot_out_dir):
os.makedirs(plot_out_dir)
scatter_out_dir = path.join(output_dir, 'Scatter_Plots')
if not path.isdir(scatter_out_dir):
os.makedirs(scatter_out_dir)
scatter_ls_out_dir = path.join(output_dir, 'Scatter_Plots-Log_Scale')
if not path.isdir(scatter_ls_out_dir):
os.makedirs(scatter_ls_out_dir)
hist_out_dir = path.join(output_dir, 'Histograms')
if not path.isdir(hist_out_dir):
os.makedirs(hist_out_dir)
qqplot_out_dir = path.join(output_dir, 'QQ_Plot')
if not path.isdir(qqplot_out_dir):
os.makedirs(qqplot_out_dir)
daily_average_out_dir = path.join(output_dir, 'Daily_Averages')
if not path.isdir(daily_average_out_dir):
os.makedirs(daily_average_out_dir)
monthly_average_out_dir = path.join(output_dir, 'Monthly_Averages')
if not path.isdir(monthly_average_out_dir):
os.makedirs(monthly_average_out_dir)
volume_analysis_out_dir = path.join(output_dir, 'Volume_Analysis')
if not path.isdir(volume_analysis_out_dir):
os.makedirs(volume_analysis_out_dir)
lag_out_dir = path.join(output_dir, 'Lag_Analysis')
if not path.isdir(lag_out_dir):
os.makedirs(lag_out_dir)
'''
for id, comid, name, obsFile, simFile in zip(IDs, COMIDs, Names, obsFiles, simFiles):
print(id, comid, name)
obs_df = pd.read_csv(obsFile, index_col=0)
obs_df[obs_df < 0] = 0
obs_df.index = pd.to_datetime(obs_df.index)
observed_df = obs_df.groupby(obs_df.index.strftime("%Y-%m-%d")).mean()
observed_df.index = pd.to_datetime(observed_df.index)
dates_obs = observed_df.index.tolist()
'''
plt.figure(1)
plt.figure(figsize=(15, 9))
plt.plot(dates_obs, observed_df.iloc[:, 0].values, 'k', color='red', label='Observed Streamflow')
plt.title('Observed Hydrograph for ' + str(id) + ' - ' + name + '\n COMID: ' + str(comid))
plt.xlabel('Date')
plt.ylabel('Streamflow (m$^3$/s)')
plt.legend()
plt.grid()
plt.xlim(dates_obs[0], dates_obs[len(dates_obs)-1])
t = pd.date_range(dates_obs[0], dates_obs[len(dates_obs)-1], periods=10).to_pydatetime()
plt.xticks(t)
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.tight_layout()
plt.savefig(
plot_obs_hyd_dir + '/Observed Hydrograph for ' + str(id) + ' - ' + name + '. COMID - ' + str(comid) + '.png')
sim_df = pd.read_csv(simFile, index_col=0)
dates_sim = sim_df.index.tolist()
dates=[]
for date in dates_sim:
dates.append(dt.datetime.strptime(date, "%Y-%m-%d"))
dates_sim = dates
plt.figure(2)
plt.figure(figsize=(15, 9))
plt.plot(dates_sim, sim_df.iloc[:, 0].values, 'k', color='blue', label='Simulated Streamflow')
plt.title('Simulated Hydrograph for ' + str(id) + ' - ' + name + '\n COMID - ' + str(comid))
plt.xlabel('Date')
plt.ylabel('Streamflow (m$^3$/s)')
plt.legend()
plt.grid()
plt.xlim(dates_sim[0], dates_sim[len(dates_sim)-1])
t = pd.date_range(dates_sim[0], dates_sim[len(dates_sim)-1], periods=10).to_pydatetime()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.tight_layout()
plt.savefig(
plot_sim_hyd_dir + '/Simulated Hydrograph for ' + str(id) + ' - ' + name + '. COMID - ' + str(comid) + '.png')
'''
#Merging the Data
merged_df = hd.merge_data(simFile, obsFile)
'''Tables and Plots'''
# Appending the table to the final table
table = hs.make_table(merged_df,
metrics=['ME', 'MAE', 'MAPE', 'RMSE', 'NRMSE (Mean)', 'NSE', 'KGE (2009)', 'KGE (2012)', 'R (Pearson)',
'R (Spearman)', 'r2'], location=id, remove_neg=False, remove_zero=False)
all_station_table = all_station_table.append(table)
#Making plots for all the stations
sim_array = merged_df.iloc[:, 0].values
obs_array = merged_df.iloc[:, 1].values
'''Calculating the Volume of the Streams'''
sim_volume_dt = sim_array * 0.0864
obs_volume_dt = obs_array * 0.0864
sim_volume_cum = []
obs_volume_cum = []
sum_sim = 0
sum_obs = 0
for i in sim_volume_dt:
sum_sim = sum_sim + i
sim_volume_cum.append(sum_sim)
for j in obs_volume_dt:
sum_obs = sum_obs + j
obs_volume_cum.append(sum_obs)
volume_percent_diff = (max(sim_volume_cum)-max(obs_volume_cum))/max(sim_volume_cum)
volume_list.append([id, max(obs_volume_cum), max(sim_volume_cum), volume_percent_diff])
'''
plt.figure(3)
plt.figure(figsize=(15, 9))
plt.plot(merged_df.index, sim_volume_cum, 'k', color='blue', label='Simulated Volume')
plt.plot(merged_df.index, obs_volume_cum, 'k', color='red', label='Observed Volume')
plt.title('Volume Analysis for ' + str(id) + ' - ' + name + '\n COMID: ' + str(comid))
plt.xlabel('Date')
plt.ylabel('Volume (Mm^3)')
plt.legend()
plt.grid()
plt.savefig(
volume_analysis_out_dir + '/Volume Analysis for ' + str(id) + ' - ' + name + '. COMID - ' + str(comid) + '.png')
hv.plot(merged_df, legend=('Simulated', 'Observed'), grid=True,
title='Hydrograph for ' + str(id) + ' - ' + name + '\n COMID: ' + str(comid),
labels=['Datetime', 'Streamflow (m$^3$/s)'], linestyles=['b-', 'r-'], fig_size=(15, 9))
plt.savefig(path.join(plot_out_dir, '{0}_{1}_hydrographs.png'.format(str(id), name)))
daily_avg = hd.daily_average(merged_df)
daily_std_error = hd.daily_std_error(merged_data=merged_df)
hv.plot(merged_data_df=daily_avg, legend=('Simulated', 'Observed'), grid=True, x_season=True,
title='Daily Average Streamflow (Standard Error) for ' + str(
id) + ' - ' + name + '\n COMID: ' + str(comid),
labels=['Datetime', 'Streamflow (m$^3$/s)'], linestyles=['b-', 'r-'], fig_size=(15, 9), ebars=daily_std_error,
ecolor=('b', 'r'), tight_xlim=False)
plt.savefig(path.join(daily_average_out_dir, '{0}_{1}_daily_average.png'.format(str(id), name)))
hv.plot(merged_data_df=daily_avg, legend=('Simulated', 'Observed'), grid=True, x_season=True,
title='Daily Average Streamflow for ' + str(
id) + ' - ' + name + '\n COMID: ' + str(comid),
labels=['Datetime', 'Streamflow (m$^3$/s)'], linestyles=['b-', 'r-'], fig_size=(15, 9))
plt.savefig(path.join(daily_average_out_dir, '{0}_{1}_daily_average_1.png'.format(str(id), name)))
monthly_avg = hd.monthly_average(merged_df)
monthly_std_error = hd.monthly_std_error(merged_data=merged_df)
hv.plot(merged_data_df=monthly_avg, legend=('Simulated', 'Observed'), grid=True, x_season=True,
title='Monthly Average Streamflow (Standard Error) for ' + str(
id) + ' - ' + name + '\n COMID: ' + str(comid),
labels=['Datetime', 'Streamflow (m$^3$/s)'], linestyles=['b-', 'r-'], fig_size=(15, 9),
ebars=monthly_std_error, ecolor=('b', 'r'), tight_xlim=False)
plt.savefig(path.join(monthly_average_out_dir, '{0}_{1}_monthly_average.png'.format(str(id), name)))
hv.scatter(merged_data_df=merged_df, grid=True,
title='Scatter Plot for ' + str(id) + ' - ' + name + '\n COMID: ' + str(comid),
labels=('Simulated', 'Observed'), line45=True, best_fit=True, figsize=(15, 9))
plt.savefig(path.join(scatter_out_dir, '{0}_{1}_scatter_plot.png'.format(str(id), name)))
hv.scatter(sim_array=sim_array, obs_array=obs_array, grid=True,
title='Scatter Plot (Log Scale) for ' + str(id) + ' - ' + name + '\n COMID: ' + str(
comid),
labels=('Simulated', 'Observed'), line45=True, best_fit=True, log_scale=True, figsize=(15, 9))
plt.savefig(path.join(scatter_ls_out_dir, '{0}_{1}_scatter_plot-log_scale.png'.format(str(id), name)))
hv.hist(merged_data_df=merged_df, num_bins=100, legend=('Simulated', 'Observed'), grid=True,
title='Histogram of Streamflows for ' + str(id) + ' - ' + name + '\n COMID: ' + str(
comid),
labels=('Bins', 'Frequency'), figsize=(15, 9))
plt.savefig(path.join(hist_out_dir, '{0}_{1}_histograms.png'.format(str(id), name)))
hv.qqplot(merged_data_df=merged_df,
title='Quantile-Quantile Plot of Data for ' + str(
id) + ' - ' + name + '\n COMID: ' + str(comid),
xlabel='Simulated', ylabel='Observed', legend=True, figsize=(15, 9))
plt.savefig(path.join(qqplot_out_dir, '{0}_{1}_qq-plot.png'.format(str(id), name)))
'''
'''Time Lag Analysis'''
time_lag_metrics = ['ME', 'MAE', 'MAPE', 'RMSE', 'NRMSE (Mean)', 'NSE', 'KGE (2009)', 'KGE (2012)', 'SA', 'R (Pearson)',
'R (Spearman)', 'r2']
'''
station_out_dir = path.join(lag_out_dir, str(id))
if not path.isdir(station_out_dir):
os.makedirs(station_out_dir)
for metric in time_lag_metrics:
print(metric)
_, time_table = hs.time_lag(merged_dataframe=merged_df, metrics=[metric], interp_freq='1D', interp_type='pchip',
shift_range=(-10, 10), remove_neg=False, remove_zero=False,
plot_title=metric + ' at Different Lags for ' + str(
id) + ' - ' + name + '\n COMID: ' + str(comid), plot=True,
ylabel=metric + ' Values', xlabel='Number of Lagas', figsize=(15, 9),
save_fig=path.join(station_out_dir,
'{0}_timelag_plot_for{1}_{2}.png'.format(metric, str(id), name)))
plt.grid()
all_lag_table = all_lag_table.append(time_table)
for i in range(0, len (time_lag_metrics)):
station_array.append(id)
comid_array.append(comid)
plt.close('all')
'''
#Writing the lag table to excel
#table_IO = StringIO(all_lag_table)
#table_IO.seek(0)
#time_lag_df = pd.read_csv(table_IO, sep=",")
'''
all_lag_table = all_lag_table.assign(Station=station_array)
all_lag_table = all_lag_table.assign(COMID=comid_array)
all_lag_table.to_excel(path.join(lag_out_dir, 'Summary_of_all_Stations.xlsx'))
'''
#Writing the Volume Dataframe to a csv
volume_df = pd.DataFrame(volume_list, columns=['Station', 'Observed Volume', 'Simulated Volume', 'Percent Difference'])
volume_df.to_excel(path.join(table_out_dir, 'Volume_Table.xlsx'))
#Stations for the Country to an Excel Spreadsheet
all_station_table.to_excel(path.join(table_out_dir, 'Table_of_all_stations.xlsx'))
|
r"""
This file implements the documentation and default values of the
``vice.yields.agb.settings`` global yield settings dataframe.
.. note:: While the code in this file is seemingly useless in that the
implemented class does nothing other than call its parent class, the
purpose is for this instance to have its own documentation separate from
other yield setting dataframes.
"""
from ..._globals import _RECOGNIZED_ELEMENTS_
from ...core.dataframe import agb_yield_settings
class settings(agb_yield_settings):
r"""
The VICE dataframe: global yield settings for AGB stars
For each chemical element, this object stores the current asymptotic giant
branch (AGB) star nucleosynthetic yield setting. See `Notes`_ below for
mathematical details.
.. versionadded:: 1.2.0
In earlier versions, functions and classes within VICE accepted keyword
arguments or attributes which encoded which model table of yields to
adopt. This same functionality can be achieved by assigning a string as
the yield setting for specific elements.
.. note:: Modifying yield settings through this dataframe is equivalent
to going through the ``vice.elements`` module.
Indexing
--------
- ``str`` [case-insensitive] : elemental symbols
This dataframe must be indexed by the symbol of an element recognized
by VICE as it appears on the periodic table.
Item Assignment
---------------
For each chemical element, the AGB star yield can be assigned either:
- ``str`` [case-insensitive] : Adopt values published by a given study
Keywords correspond to yields calculated on a table of progenitor
masses and metallicities which can be adopted directly.
- "cristallo11" : Cristallo et al. (2011, 2015) [1]_ [2]_
- "karakas10" : Karakas (2010) [3]_
- "ventura13" : Ventura et al. (2013) [4]_
- "karakas16" : Karakas & Lugaro (2016) [5]_ ; Karakas et al. (2018)
[6]_
.. versionadded:: 1.3.0
The "ventura13" and "karakas16" yields models were introduced
in version 1.3.0.
- <function> : Mathematical function describing the yield
Must accept progenitor zero age main sequence mass in
:math:`M_\odot` as the first parameter and the metallicity by
mass :math:`Z` as the second.
Functions
---------
- keys
- todict
- restore_defaults
- factory_settings
- save_defaults
Notes
-----
VICE defines the yield from AGB stars as the fraction of a star's initial
mass which is processed into some element. As with all other yields in
VICE, these are *net* rather than *gross* yields in that they quantify only
the mass of a given element which is newly produced. For a star
of mass :math:`M_\star`, the mass of the element ejected to the ISM, not
counting previously produced nucleosynthetic material, is given by:
.. math:: M = y_\text{AGB}(M_\star, Z_\star) M_\star
where :math:`y_\text{AGB}` is the yield and :math:`Z_\star` is the initial
metallicity of the star.
This definition is retained in one- and multi-zone
chemical evolution models as well. For further details, see VICE's science
documentation: https://vice-astro.readthedocs.io/en/latest/science_documentation/index.html.
Example Code
------------
>>> import vice
>>> vice.yields.agb.settings["n"] = "cristallo11"
>>> vice.yields.agb.settings["N"]
"cristallo11"
>>> vice.yields.agb.settings["N"] = "karakas10"
>>> vice.yields.agb.settings["n"]
"karakas10"
>>> def f(m, z):
return 0.001 * m * (z / 0.014)
>>> vice.yields.agb.settings["n"] = f
>>> vice.yields.agb.settings["N"]
<function __main__.f(z)>
.. [1] Cristallo et al. (2011), ApJS, 197, 17
.. [2] Cristallo et al. (2015), ApJS, 219, 40
.. [3] Karakas (2010), MNRAS, 403, 1413
.. [4] Ventura et al. (2013), MNRAS, 431, 3642
.. [5] Kakaras & Lugaro (2016), ApJ, 825, 26
.. [6] Karakas et al. (2018), MNRAS, 477, 421
"""
def __init__(self):
super().__init__(dict(zip(_RECOGNIZED_ELEMENTS_,
len(_RECOGNIZED_ELEMENTS_) * ["cristallo11"])),
"AGB yield", True, "agb")
def keys(self):
r"""
Returns the keys of the AGB star yield settings dataframe.
**Signature**: vice.yields.agb.settings.keys()
.. note:: By nature, this function will simply return a list of all
elements that are built into VICE - the same thing as
``vice.elements.recognized``.
Example Code
------------
>>> import vice
>>> elements = vice.yields.agb.settings.keys()
>>> tuple(elements) == vice.elements.recognized
True
"""
return super().keys()
def todict(self):
r"""
Returns the AGB star yield settings dataframe as a dictionary.
**Signature**: vice.yields.agb.settings.todict()
.. note:: Modifications to the dictionary returned by this function
will *not* affect the global yield settings.
.. note:: Python dictionaries are case-sensitive, and are thus less
flexible than this class.
Example Code
------------
>>> import vice
>>> example = vice.yields.agb.settings.todict()
>>> example["c"]
"cristallo11"
>>> example["C"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
KeyError: 'C'
>>> example["c"] = "not a yield setting"
>>> example["c"]
"not a yield setting"
>>> vice.yields.agb.settings["c"]
"cristallo11"
"""
return super().todict()
def restore_defaults(self):
r"""
Restores the AGB star yield settings to their default values (i.e.
undoes any changes since VICE was imported).
**Signature**: vice.yields.agb.settings.restore_defaults()
Example Code
------------
>>> import vice
>>> vice.yields.agb.settings["c"] = "karakas10"
>>> vice.yields.agb.settings["n"] = "karakas10"
>>> vice.yields.agb.settings["o"] = "karakas10"
>>> vice.yields.agb.settings.restore_defaults()
>>> vice.yields.agb.settings["c"]
"cristallo11"
>>> vice.yields.agb.settings["n"]
"cristallo11"
>>> vice.yields.agb.settings["o"]
"cristallo11"
"""
super().restore_defaults()
def factory_settings(self):
r"""
Restores the AGB star yield settings to their factory defaults. This
differs from ``vice.yields.agb.settings.restore_defaults`` in that
users may modify their default values from those that VICE is
distributed with.
**Signature**: vice.yields.agb.settings.factory_settings()
.. tip:: To revert your nucleosynthetic yield settings back to their
production defaults *permanently*, simply call
``vice.yields.agb.settings.save_defaults`` immediately following
this function.
Example Code
------------
>>> import vice
>>> vice.yields.agb.settings["c"]
"karakas10" # the user has modified their default yield for carbon
>>> vice.yields.agb.settings.factory_settings()
>>> vice.yields.agb.settings["c"]
"cristallo11"
"""
super().factory_settings()
def save_defaults(self):
r"""
Saves the current AGB star yield settings as the default values.
**Signature**: vice.yields.agb.settings.save_defaults()
.. note:: Saving functional yields requires the package dill_, an
extension to ``pickle`` in the python standard library. It is
recommended that VICE users install dill_ >= 0.2.0.
.. _dill: https://pypi.org/project/dill
Example Code
------------
>>> import vice
>>> vice.yields.agb.settings["c"]
"cristallo11"
>>> vice.yields.agb.settings["c"] = "karakas10"
>>> vice.yields.agb.settings.save_defaults()
After re-launching the python interpreter:
>>> import vice
>>> vice.yields.agb.settings["c"]
"karakas10"
"""
super().save_defaults()
settings = settings()
|
import unittest
import nlpaug.augmenter.char as nac
import nlpaug.util.text.tokenizer as text_tokenizer
class TestCharacter(unittest.TestCase):
def test_empty(self):
texts = ['', None]
augs = [
nac.OcrAug(),
nac.KeyboardAug(),
]
for text in texts:
for aug in augs:
augmented_text = aug.augment(text)
self.assertEqual(text, augmented_text)
def test_tokenizer(self):
augs = [
nac.OcrAug(tokenizer=text_tokenizer.split_sentence),
nac.KeyboardAug(tokenizer=text_tokenizer.split_sentence),
nac.RandomCharAug(tokenizer=text_tokenizer.split_sentence),
]
text = 'The quick brown fox, jumps over lazy dog.'
expected_tokens = ['The', ' quick', ' brown', ' fox', ', ', 'jumps', ' over', ' lazy', ' dog', '.']
for aug in augs:
tokens = aug.tokenizer(text)
self.assertEqual(tokens, expected_tokens)
text = 'The quick !brown fox, jumps # over lazy dog .'
expected_tokens = ['The', ' quick', ' !', 'brown', ' fox', ', ', 'jumps', ' # ', 'over', ' lazy', ' dog', ' .']
for aug in augs:
tokens = aug.tokenizer(text)
self.assertEqual(tokens, expected_tokens)
def test_multi_thread(self):
text = 'The quick brown fox jumps over the lazy dog.'
n = 3
augs = [
nac.KeyboardAug(tokenizer=text_tokenizer.split_sentence),
nac.RandomCharAug(tokenizer=text_tokenizer.split_sentence),
]
for num_thread in [1, 3]:
for aug in augs:
augmented_data = aug.augment(text, n=n, num_thread=num_thread)
self.assertEqual(len(augmented_data), n)
def test_stopwords(self):
text = 'The quick brown fox jumps over the lazy dog.'
stopwords = ['The', 'brown', 'fox', 'jumps', 'the', 'dog']
augs = [
nac.RandomCharAug(stopwords=stopwords),
nac.KeyboardAug(stopwords=stopwords),
nac.OcrAug(stopwords=stopwords)
]
for aug in augs:
for i in range(10):
augmented_text = aug.augment(text)
self.assertTrue(
'quick' not in augmented_text or 'over' not in augmented_text or 'lazy' not in augmented_text)
def test_stopwords_regex(self):
text = 'The quick brown fox jumps over the lazy dog.'
stopwords_regex = "( [a-zA-Z]{1}ox | [a-z]{1}og|(brown)|[a-zA-z]{1}he)|[a-z]{2}mps "
augs = [
nac.RandomCharAug(action="delete", stopwords_regex=stopwords_regex),
nac.KeyboardAug(stopwords_regex=stopwords_regex),
nac.OcrAug(stopwords_regex=stopwords_regex)
]
for aug in augs:
for i in range(10):
augmented_text = aug.augment(text)
self.assertTrue(
'quick' not in augmented_text or 'over' not in augmented_text or 'lazy' not in augmented_text)
|
n1 = int(input('Digite o primeiro número: '))
n2 = int(input('Digite o segundo número: '))
if n1 > n2:
print('O primeiro valor {} é maior'.format(n1))
elif n2 > n1:
print('O segundo valor {} é maior'.format(n2))
elif n1 == n2:
print('Não existe valor maior, os dois são iguais')
|
"""
The program translate image to minecraft block map
@author: Tang142857
@project: workspace
@file: img2block.py
@date: 2021-06-30
Copyright(c): DFSA Software Develop Center
"""
import cv2
import numpy
import translator
import entry
def read_image(img_path: str, **kwargs):
"""
Return the bit map of the image
kwargs:
ch:r,g,b,a,o
resize:not use right now
"""
origin_img = cv2.imread(img_path)
# pay attention here ,there is BGR not RGB
rgb_img = cv2.cvtColor(origin_img, cv2.COLOR_BGR2RGB)
block_map = translator.translate_img(rgb_img)
map = numpy.array(block_map, dtype=numpy.uint8)
map = cv2.cvtColor(map, cv2.COLOR_RGB2BGR)
block_map = translator.translate_img(rgb_img, False)
cv2.imshow('block_map', map)
# cv2.imshow('origin_map', origin_img)
# cv2.waitKey()
return block_map
if __name__ == '__main__':
bm = read_image('/home/tang/file/download/pages/cpc.jpeg')
# bm = read_image('/home/tang/file/pictures/non-human/non-cover.jpg')
commands = translator.build_command(bm)
entry.exe_lines(commands)
|
# -*- coding: utf-8 -*-
from sympy import Rational as r
from .BetaFunction import BetaFunction
from Definitions import tensorContract
import itertools
class ScalarMassBetaFunction(BetaFunction):
def compute(self, a,b, nLoops):
perm = list(itertools.permutations([a, b], 2))
permSet = set(perm)
coeff = r(len(perm),len(permSet))
ret = 0
for s1,s2 in permSet:
ret += coeff * self.Beta(s1,s2, nLoops=nLoops)
return r(1,2)*ret
def fDefinitions(self):
""" Functions definition """
for i in range(self.nLoops):
self.functions.append([])
count = 1
while True:
try:
self.functions[i].append(eval(f"self.m{i+1}_{count}"))
count += 1
except:
break
def cDefinitions(self):
""" Coefficients definition """
## 1-loop
self.coefficients.append( [r(-6), r(1), r(1), r(1), r(-4), r(-2)] )
## 2-loop
# self.coefficients.append( [r(2), r(10), r(0), r(3), r(-143,6), r(11,6),
# r(10,6), r(-3), r(8), r(8), r(-3), r(-3),
# r(1,6), r(-1), r(-1,2), r(-2), r(0), r(0),
# r(-12), r(5), r(0), r(-1), r(-1), r(0),
# r(2), r(-4), r(2), r(4), r(1), r(0),
# r(0), r(0), r(-1), r(-3,2), r(4), r(4),
# r(2)] )
self.coefficients.append( [r(2), r(10), r(0), r(3), r(-143,6), r(11,6),
r(10,6), r(-3), r(8), r(8), r(-3), r(-3),
r(1,6), r(-1), r(-1,2), r(-2), r(0), r(0),
r(-12), r(5), r(0), r(-1), r(-1), r(0),
r(0), r(0), r(2), r(4), r(-8), r(-8),
r(-4), r(-4), r(2), r(4), r(1), r(0),
r(0), r(0), r(-1), r(-3,2), r(4), r(8),
r(8), r(4), r(4), r(4), r(4), r(4),
r(4), r(2), r(2)] )
######################
# 1-loop functions #
######################
def m1_1(self, a,b):
return tensorContract(self.C2S(a,e_),
self.mu(e_,b))
def m1_2(self, a,b):
return tensorContract(self.l(a,b,e_,f_),
self.mu(e_,f_))
def m1_3(self, a,b):
return tensorContract(self.h(a,e_,f_),
self.h(b,e_,f_))
def m1_4(self, a,b):
return tensorContract(self.Y2S(a,e_),
self.mu(e_,b))
def m1_5(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.yt(b,j_,k_),
self.M(k_,l_),
self.Mt(l_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m1_6(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.Mt(j_,k_),
self.y(b,k_,l_),
self.Mt(l_,i_),
doTrace=True, yukSorting=self.model.YukPos)
######################
# 2-loop functions #
######################
def m2_1(self, a,b):
return tensorContract(self.Ts(A_,a,i_),
self.Ts(C_,i_,e_),
self.G(A_,B_),
self.G(C_,D_),
self.Ts(B_,b,j_),
self.Ts(D_,j_,f_),
self.mu(e_,f_))
def m2_2(self, a,b):
return tensorContract(self.Ts(A_,a,i_),
self.Ts(C_,i_,b),
self.G(A_,B_),
self.G(C_,D_),
self.Ts(B_,e_,j_),
self.Ts(D_,j_,f_),
self.mu(e_,f_))
def m2_3(self, a,b):
return tensorContract(self.C2S(a,e_),
self.C2S(b,f_),
self.mu(e_,f_))
def m2_4(self, a,b):
return tensorContract(self.C2S(a,e_),
self.C2S(e_,f_),
self.mu(f_,b))
def m2_5(self, a,b):
return tensorContract(self.C2SG(a,e_),
self.mu(e_,b))
def m2_6(self, a,b):
return tensorContract(self.C2SS(a,e_),
self.mu(e_,b))
def m2_7(self, a,b):
return tensorContract(self.C2SF(a,e_),
self.mu(e_,b))
def m2_8(self, a,b):
return tensorContract(self.Ts(A_,a,e_),
self.Ts(B_,b,f_),
self.G(A_,B_),
self.l(e_,f_,g_,h_),
self.mu(g_,h_))
def m2_9(self, a,b):
return tensorContract(self.l(a,b,e_,f_),
self.C2S(f_,g_),
self.mu(e_,g_))
def m2_10(self, a,b):
return tensorContract(self.h(a,e_,f_),
self.C2S(f_,g_),
self.h(e_,g_,b))
def m2_11(self, a,b):
return tensorContract(self.C2S(a,e_),
self.l(e_,b,f_,g_),
self.mu(f_,g_))
def m2_12(self, a,b):
return tensorContract(self.C2S(a,e_),
self.h(e_,f_,g_),
self.h(f_,g_,b))
def m2_13(self, a,b):
return tensorContract(self.l(a,e_,f_,g_),
self.l(e_,f_,g_,h_),
self.mu(h_,b))
def m2_14(self, a,b):
return tensorContract(self.l(a,e_,g_,h_),
self.l(b,f_,g_,h_),
self.mu(e_,f_))
def m2_15(self, a,b):
return tensorContract(self.l(a,b,e_,f_),
self.h(e_,g_,h_),
self.h(f_,g_,h_))
def m2_16(self, a,b):
return tensorContract(self.h(a,e_,f_),
self.h(e_,g_,h_),
self.l(f_,g_,h_,b))
def m2_17(self, a,b):
return tensorContract(self.l(a,b,e_,f_),
self.l(e_,f_,g_,h_),
self.mu(g_,h_))
def m2_18(self, a,b):
return tensorContract(self.h(a,e_,f_),
self.l(e_,f_,g_,h_),
self.h(g_,h_,b))
def m2_19(self, a,b):
return tensorContract(self.Ts(A_,a,e_),
self.Ts(C_,e_,b),
self.G(A_,B_),
self.G(C_,D_),
self.T(D_,i_,j_),
self.T(B_,j_,k_),
self.Mt(k_,l_),
self.M(l_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_20(self, a,b):
return tensorContract(self.Y2SCF(a,e_),
self.mu(e_,b))
def m2_21(self, a,b):
return tensorContract(self.C2S(a,e_),
self.Y2S(e_,f_),
self.mu(f_,b))
def m2_22(self, a,b):
return tensorContract(self.l(a,b,e_,f_),
self.Y2S(f_,g_),
self.mu(e_,g_))
def m2_23(self, a,b):
return tensorContract(self.h(a,e_,f_),
self.Y2S(f_,g_),
self.h(e_,g_,b))
def m2_24(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.T(A_,j_,k_),
self.Mt(k_,l_),
self.M(l_,m_),
self.G(A_,B_),
self.T(B_,m_,n_),
self.yt(b,n_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_25(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.T(A_,j_,k_),
self.yt(b,k_,l_),
self.M(l_,m_),
self.G(A_,B_),
self.T(B_,m_,n_),
self.Mt(n_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_26(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.T(A_,j_,k_),
self.Mt(k_,l_),
self.y(b,l_,m_),
self.G(A_,B_),
self.T(B_,m_,n_),
self.Mt(n_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_27(self, a,b):
return tensorContract(self.C2S(a,e_),
self.y(e_,i_,j_),
self.Mt(j_,k_),
self.y(b,k_,l_),
self.Mt(l_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_28(self, a,b):
return tensorContract(self.C2S(a,e_),
self.y(e_,i_,j_),
self.yt(b,j_,k_),
self.M(k_,l_),
self.Mt(l_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_29(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.yt(b,j_,k_),
self.M(k_,l_),
self.Mt(l_,m_),
self.C2F(m_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_30(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.Mt(j_,k_),
self.y(b,k_,l_),
self.Mt(l_,m_),
self.C2F(m_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_31(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.Mt(j_,k_),
self.M(k_,l_),
self.yt(b,l_,m_),
self.C2F(m_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_32(self, a,b):
return tensorContract(self.M(i_,j_),
self.yt(a,j_,k_),
self.y(b,k_,l_),
self.Mt(l_,m_),
self.C2F(m_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_33(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.yt(e_,j_,k_),
self.y(b,k_,l_),
self.yt(f_,l_,i_),
self.mu(e_,f_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_34(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.yt(e_,j_,k_),
self.M(k_,l_),
self.yt(f_,l_,i_),
self.h(e_,f_,b),
doTrace=True, yukSorting=self.model.YukPos)
def m2_35(self, a,b):
return tensorContract(self.M(i_,j_),
self.yt(e_,j_,k_),
self.M(k_,l_),
self.yt(f_,l_,i_),
self.l(e_,f_,a,b),
doTrace=True, yukSorting=self.model.YukPos)
def m2_36(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.yt(b,j_,k_),
self.y(e_,k_,l_),
self.yt(f_,l_,i_),
self.mu(e_,f_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_37(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.Mt(j_,k_),
self.y(e_,k_,l_),
self.yt(f_,l_,i_),
self.h(e_,f_,b),
doTrace=True, yukSorting=self.model.YukPos)
def m2_38(self, a,b):
return tensorContract(self.M(i_,j_),
self.Mt(j_,k_),
self.y(e_,k_,l_),
self.yt(f_,l_,i_),
self.l(e_,f_,a,b),
doTrace=True, yukSorting=self.model.YukPos)
def m2_39(self, a,b):
return tensorContract(self.Y4S(a,e_),
self.mu(e_,b))
def m2_40(self, a,b):
return tensorContract(self.Y2SYF(a,e_),
self.mu(e_,b))
def m2_41(self, a,b):
return tensorContract(self.M(i_,j_),
self.yt(a,j_,k_),
self.M(k_,l_),
self.yt(e_,l_,m_),
self.y(b,m_,n_),
self.yt(e_,n_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_42(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.yt(b,j_,k_),
self.M(k_,l_),
self.yt(e_,l_,m_),
self.M(m_,n_),
self.yt(e_,n_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_43(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.Mt(j_,k_),
self.M(k_,l_),
self.yt(e_, l_, m_),
self.y(b, m_, n_),
self.yt(e_, n_, i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_44(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.Mt(j_,k_),
self.y(b,k_,l_),
self.yt(e_,l_,m_),
self.M(m_,n_),
self.yt(e_,n_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_45(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.yt(b,j_,k_),
self.y(e_,k_,l_),
self.Mt(l_,m_),
self.M(m_,n_),
self.yt(e_,n_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_46(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.Mt(j_,k_),
self.y(e_,k_,l_),
self.Mt(l_,m_),
self.y(b,m_,n_),
self.yt(e_,n_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_47(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.Mt(j_,k_),
self.y(e_,k_,l_),
self.yt(b,l_,m_),
self.M(m_,n_),
self.yt(e_,n_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_48(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.yt(b,j_,k_),
self.M(k_,l_),
self.Mt(l_,m_),
self.Y2F(m_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_49(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.Mt(j_,k_),
self.y(b,k_,l_),
self.Mt(l_,m_),
self.Y2F(m_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_50(self, a,b):
return tensorContract(self.y(a,i_,j_),
self.Mt(j_,k_),
self.M(k_,l_),
self.yt(b,l_,m_),
self.Y2F(m_,i_),
doTrace=True, yukSorting=self.model.YukPos)
def m2_51(self, a,b):
return tensorContract(self.M(i_,j_),
self.yt(a,j_,k_),
self.y(b,k_,l_),
self.Mt(l_,m_),
self.Y2F(m_,i_),
doTrace=True, yukSorting=self.model.YukPos)
|
from bs4 import BeautifulSoup, NavigableString
from pathlib import Path
def extract_references_page(html_string):
"""Extract the references in report282b, 283b, or 284b.html.
Keeps any double spaces present in the original reference, as well as <i>
and other minor HTML tags.
"""
# Add in extra <p> tags to make parsing easier
html_string = html_string.replace("<blockquote>", "<blockquote><p>")
# Fix the two broken refs of McCollough et al. and MacCord
html_string = html_string.replace("Lenhardt<p>", "Lenhardt<blockquote><p>")
soup = BeautifulSoup(html_string, 'html5lib')
references = {}
hrefs_to_refs = {}
i = 0
contents = soup.body.contents
while i < len(contents):
author = None
author_refs = None
if i < len(contents) - 1 and contents[i+1].name == "blockquote":
content = contents[i]
if content.name == "a":
if i == len(contents) - 1:
raise Exception("Found an <a> tag in references, but it was "
"the very last element in the page.")
author = content.text.strip()
author_refs = contents[i+1]
hrefs_to_refs[content['name']] = {"author": author, "refNum": 0}
elif isinstance(content, NavigableString):
author = str(content).strip()
author_refs = contents[i+1]
else:
raise Exception("Found an element right before a blockquote"
+ "that is not a NavigableString or an <a> "
+ "tag, " + str(content))
if author:
extracted_refs = []
for ref in author_refs: # ref here refers to a <p> tag in the soup
# Remove empty <p> tags
if ref.text and ref.text.strip() == '':
continue
a_name = None
if ref.a:
a_name = ref.a['name']
ref.a.replace_with(ref.a.string)
hrefs_to_refs[a_name] = {
"author": author,
"refNum": len(extracted_refs)
}
extracted_refs.append(
str(ref).replace('<p>', '')
.replace('</p>', '')
.replace('\n', ' ')
.strip()
)
references[author] = extracted_refs
i += 1
return {"refs": references, "hrefsToRefs": hrefs_to_refs}
def extract_all_references(dig_parent_dir, readfile):
"""Extract all references A-Z from the site."""
dig_parent_path_obj = Path(dig_parent_dir)
extracted = {"refs": {}, "hrefsToRefs": {}}
for split_page_num in [282, 283, 284]:
split_page_dir = dig_parent_path_obj / "dig/html/split"
refs_html = readfile(
"report" + str(split_page_num) + "b.html", split_page_dir
)
data = extract_references_page(refs_html)
extracted['refs'].update(data['refs'])
extracted['hrefsToRefs'].update(data['hrefsToRefs'])
return extracted
|
import json
import argparse
from src.model_handler.TrainHandler import start_training
def parse_args():
parser = argparse.ArgumentParser(description="Adipocyte Fluorescence Predictor CLI Tool")
parser.add_argument("-s", "--setting_file", type=str,
help="JSON filepath that contains settings.")
args = parser.parse_args()
print(args)
return args
def get_settings(json_path):
with open(json_path, "r") as json_file:
settings = json.load(json_file)
print(settings)
return settings
def main():
args = parse_args()
settings = get_settings(args.setting_file)
start_training(**settings)
if __name__ == "__main__":
main()
|
import enum
import numpy as np
import string
import random
from collections import namedtuple
class DIRECTION(enum.Enum):
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
class TURN(enum.Enum):
NONE = 0
LEFT = -1
RIGHT = 1
class BOARD_OBJECT(enum.Enum):
EMPTY = 0
BODY = 1
TAIL = -1
APPLE = 2
ALL_DIRECTIONS = [DIRECTION.UP, DIRECTION.RIGHT, DIRECTION.DOWN, DIRECTION.LEFT]
ALL_TURNS = [TURN.NONE, TURN.LEFT, TURN.RIGHT]
DIRECTION_UNIT_VECTORS = dict(
[
(DIRECTION.UP, [0, -1]),
(DIRECTION.DOWN, [0, 1]),
(DIRECTION.LEFT, [-1, 0]),
(DIRECTION.RIGHT, [1, 0]),
]
)
DIRECTION_MARKERS = dict(
[
(DIRECTION.UP, '^'),
(DIRECTION.DOWN, 'v'),
(DIRECTION.LEFT, '<'),
(DIRECTION.RIGHT, '>'),
]
)
LOG_LEVEL_THRESHOLD = 1
chars = string.ascii_letters + string.digits
def generate_id():
return ''.join([random.choice(chars) for n in range(6)])
def generate_chromosome(length):
return np.random.uniform(-1, 1, length)
def Log(message: str, level = 1, end='\n'):
if(level >= LOG_LEVEL_THRESHOLD):
print(message, end=end)
|
import json
import logging
from django.conf import settings
from django.shortcuts import render
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
from identity.keystone import Keystone
from swift_cloud_tools.client import SCTClient
log = logging.getLogger(__name__)
@login_required
def swift_cloud_report(request):
keystone = Keystone(request)
projects = []
environ = settings.ENVIRON
if not environ and "localhost" in request.get_host():
environ = "local"
try:
for project in keystone.project_list():
projects.append({
"id": project.id,
"name": project.name,
"description": project.description,
"environment": environ,
"status": "",
})
except Exception as e:
log.exception(f"Keystone error: {e}")
context = {"projects": json.dumps(projects)}
return render(request, "vault/swift_cloud/report.html", context)
@login_required
def swift_cloud_status(request):
project_id = request.GET.get('project_id')
if not project_id:
return JsonResponse({"error": "Missing project_id parameter"}, status=400)
sct_client = SCTClient(
settings.SWIFT_CLOUD_TOOLS_URL,
settings.SWIFT_CLOUD_TOOLS_API_KEY
)
content = {"status": None}
response = sct_client.transfer_get(project_id)
data = response.json()
if response.status_code == 404:
content["status"] = "Not initialized"
else:
content["status"] = "Waiting"
if data.get("initial_date") and not data.get("final_date"):
content["status"] = "Migrating"
if data.get("final_date"):
content["status"] = "Done"
return JsonResponse(content, status=200)
@login_required
def swift_cloud_migrate(request):
if request.method != 'POST':
return JsonResponse({"error": "Method not allowed"}, status=405)
sct_client = SCTClient(
settings.SWIFT_CLOUD_TOOLS_URL,
settings.SWIFT_CLOUD_TOOLS_API_KEY
)
params = json.loads(request.body)
content = {"message": "Migration job created"}
status = 201
response = sct_client.transfer_create(
params.get('project_id'),
params.get('project_name'),
params.get('environment')
)
status = response.status_code
if status != 201:
content = {"error": response.text}
return JsonResponse(content, status=status)
|
import abc
__all__ = ['PowerDNSDatabaseMixIn']
class PowerDNSDatabaseMixIn(object):
"""
PowerDNSDatabaseMixIn class contains PowerDNS related queries
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def _execute(self, operation, params=()):
pass
def gslb_checks(self):
operation = """
SELECT `contents_monitors`.`id`,
`contents`.`content`,
`monitors`.`monitor_json`
FROM `contents_monitors`
JOIN `contents` ON `contents_monitors`.`content_id` = `contents`.`id`
JOIN `monitors` ON `contents_monitors`.`monitor_id` = `monitors`.`id`
"""
return self._execute(operation)
def gslb_records(self, qname, qtype):
operation = """
SELECT `names`.`name` AS `qname`,
`types`.`type` AS `qtype`,
`names_types`.`ttl`,
`names_types`.`persistence`,
`records`.`fallback`,
`records`.`weight`,
`contents_monitors`.`id`,
`contents`.`content`,
`views`.`rule`
FROM `names`
JOIN `names_types` ON `names`.`id` = `names_types`.`name_id`
JOIN `types` ON `names_types`.`type_value` = `types`.`value`
JOIN `records` ON `names_types`.`id` = `records`.`name_type_id`
JOIN `contents_monitors` ON `records`.`content_monitor_id` = `contents_monitors`.`id`
JOIN `contents` ON `contents_monitors`.`content_id` = `contents`.`id`
JOIN `views` ON `records`.`view_id` = `views`.`id`
"""
if qtype == 'ANY':
operation += """
WHERE `names`.`name` = %s
AND `records`.`disabled` = 0
"""
params = (qname,)
else:
operation += """
WHERE `names`.`name` = %s
AND `types`.`type` = %s
AND `records`.`disabled` = 0
"""
params = (qname, qtype)
return self._execute(operation, params)
|
from typing import List, Any
from copy import deepcopy
class Message:
"""
A message class for tranferring data between server and client
"""
def __init__(
self,
sender: str = "SERVER",
house: str = "",
room: str = "",
text: str = "",
action: str = "",
reciepents: List[str] = [],
data: dict[str, Any] = {},
):
self.action = action
self.sender = sender
self.house = house
self.room = room
self.text = text
self.reciepents = reciepents
self.data = data
def clone(self) -> "Message":
return deepcopy(self)
def take_recipients(self) -> List[str]:
"""
Takes the ownership of reciepents and replace it with
a empty list to reduce message load
"""
reciepents = self.reciepents
self.reciepents = []
return reciepents
def convert(
self,
sender: str = "SERVER",
action: str = "push_text",
text: str = "",
house: str = "",
room: str = "",
reciepents: list[str] = [],
data: dict[str, str] = {},
) -> "Message":
"""
Converts some parts of the message for different actions
"""
message = self.clone()
message.reciepents = reciepents if reciepents else [message.sender]
message.action = action
if room:
message.room = room
if house:
message.house = house
if text:
message.text = text
if sender == "SERVER":
message.sender = "SERVER"
message.data = data
return message
|
"""denne filen tar kun inn viewsets og legger dem inn i urlpatterns """
from rest_framework import routers
from .views import BrukerViewSet, NamesViewSet, UserViewSet, BorstViewSet, registrationView
router = routers.DefaultRouter()
router.register('api/kalas', BrukerViewSet, 'kalas')
router.register('api/names', NamesViewSet, 'name')
router.register('api/users', UserViewSet, 'users')
router.register('api/borst', BorstViewSet, 'borst')
urlpatterns = router.urls
|
import collector
from server import APP
# R0201 = Method could be a function Used when a method doesn't use its bound
# instance, and so could be written as a function.
# pylint: disable=R0201
class TestRoot:
"""Test various use cases for the index route."""
def test_route_with_no_worker(self, mocker):
"""Test index route when there is no worker set."""
client = APP.test_client(mocker)
url = '/'
redis = mocker.MagicMock()
mocker.patch.object(collector.utils, 'REDIS', redis)
response = client.get(url)
assert response.get_data() == \
b'{"message":"No worker set","status":"Error","version":"1.0"}\n'
assert response.status_code == 500
def test_route_with_redis_present(self, mocker):
"""Test index route when redis is present."""
client = APP.test_client(mocker)
worker = mocker.MagicMock()
mocker.patch.object(collector, 'WORKER', worker)
url = '/'
redis = mocker.MagicMock()
mocker.patch.object(collector.utils, 'REDIS', redis)
response = client.get(url)
assert response.get_data() == \
b'{"message":"Up and Running","status":"OK","version":"1.0"}\n'
assert response.status_code == 200
def test_route_with_redis_absent(self, mocker):
"""Test index route when there is no redis."""
client = APP.test_client(mocker)
worker = mocker.MagicMock()
mocker.patch.object(collector, 'WORKER', worker)
url = '/'
response = client.get(url)
assert response.get_data() == \
b'{"message":"Required service not operational",' \
b'"status":"Error","version":"1.0"}\n'
assert response.status_code == 500
|
from ..utils import get_by_key
import datetime
from ..models import meetups
from ..models.meetups import MEETUPS_LIST
from ..models import users
from ..models.users import USERS_LIST
QUESTIONS_LIST = []
class Questions():
def put(self, question_id, created_on, created_by, meetup, title, body,votes):
self.single_question = {}
question = get_by_key('question_id', question_id, QUESTIONS_LIST)
if "message" not in question:
return {"message": "the question id you entered is being used for another question"}
created_on = datetime.datetime.now()
self.single_question['question_id'] = question_id
self.single_question['created_on'] = created_on
self.single_question['created_by'] =int(created_by)
self.single_question['meetup'] = int(meetup)
self.single_question['title'] = title
self.single_question['body'] = body
self.single_question['votes'] = int(votes)
QUESTIONS_LIST.append(self.single_question)
return {"message": "Question has been added successfully"}
def get_all_questions(self,meetup_id):
question = [questions for questions in QUESTIONS_LIST if questions['meetup'] == meetup_id]
if not question:
return {"message": "question for this meetup does not exist"}
return question
def get_single_question(self, question_id):
question = get_by_key('question_id', question_id, QUESTIONS_LIST)
if not question:
return {"message": "question does not exist"}
return question
def patch1(self,question_id):
question = [questions for questions in QUESTIONS_LIST if questions['question_id'] == question_id]
if not question:
return{"message": "question is not available"}
question[0]['votes']+=1
return {"message": "you upvoted thsi question"}
def patch2(self,question_id):
question= [questions for questions in QUESTIONS_LIST if questions['question_id'] == question_id]
if not question:
return {"message":"question is not available"}
question[0]['votes']-=1
return {"message": "you downvoted this question"}
|
#!/usr/bin/env python3
"""
Usage: check_ntp.py <host> <min_stratum>
$ check_ntp.py pool.ntp.org 3
"""
import sys
import ntplib
def check_health(host, min_stratum=3, port=123, ntp_version=3, timeout=5):
try:
c = ntplib.NTPClient()
resp = c.request(host, port=port, version=ntp_version, timeout=timeout)
except:
print("Connection Fail")
exit(1)
if (int(resp.stratum) > int(min_stratum)):
print("Current Stratum: %s, Minimum Stratum: %s" %(resp.stratum, min_stratum))
exit(2)
exit(0)
if __name__ == "__main__":
try:
host = sys.argv[1]
min_stratum = sys.argv[2]
except:
print("WARNING - healchcheck could not run, please supply host and min_stratum")
exit(1)
check_health(host, min_stratum)
|
# Python variants
vards = "Māris"
uzvards = "Danne"
# Vecais standarta veids
pilnsVards = vards + " " + uzvards
# Labāks variants
pilnvsVards = "{} {}".format(vards, uzvards)
# Jaunais veids
pilnsVards = f"{vards} {uzvards}"
print(pilnsVards)
|
import logging
import os
from re import template
from typing import Dict, List, Tuple, cast
from zipfile import ZipFile
import requests
import typer
from .TemplateLoader import TemplateLoader
from .TemplateOptions import TemplateOptions
from .TemplateRenderer import TemplateRenderer
from .utils import download
CURVENOTE_API_URL = os.getenv("CURVENOTE_API_URL")
API_URL = (
CURVENOTE_API_URL if CURVENOTE_API_URL is not None else "https://api.curvenote.com"
)
TEMPLATE_DOWNLOAD_URL = "{api_url}/templates/tex/{template_name}/download"
OLD_TEMPLATE_DOWNLOAD_URL = "{api_url}/templates/{template_name}/download"
def do_download(URL: str, template_name: str):
url = URL.format(api_url=API_URL, template_name=template_name)
logging.info(f"DOWNLOAD: {url}")
try:
download_info = requests.get(url).json()
if "status" in download_info and download_info["status"] != 200:
raise ValueError(f'{template_name} not found - {download_info["status"]}')
except requests.exceptions.RequestException as e:
raise ValueError(f"Requests error - {url} - {e}")
return download_info
class PublicTemplateLoader(TemplateLoader):
def __init__(self, template_location: str):
super().__init__(template_location)
def initialise_from_template_api(
self, template_name: str
) -> Tuple[TemplateOptions, TemplateRenderer]:
logging.info("Writing to target folder: %s", self._target_folder)
logging.info("Looking up template %s", template_name)
logging.info("latest code")
try:
download_info = {}
try:
name = (
template_name
if template_name.startswith("public/")
else f"public/{template_name}"
)
download_info = do_download(TEMPLATE_DOWNLOAD_URL, name)
except:
name = (
template_name
if not template_name.startswith("public/")
else template_name[7:]
)
download_info = do_download(OLD_TEMPLATE_DOWNLOAD_URL, name)
if "link" not in download_info:
typer.echo(f"Template '{template_name}' not found")
raise typer.Exit(-1)
except ValueError as err:
logging.error("could not download template %s", template_name)
raise ValueError(f"could not download template: {template_name}") from err
# fetch template to local folder
logging.info(f"Found template, download url {download_info['link']}")
logging.info("downloading...")
zip_filename = os.path.join(
self._target_folder, f"{template_name.replace('/','_')}.template.zip"
)
download(download_info["link"], zip_filename)
# unzip
logging.info("Download complete, unzipping...")
with ZipFile(zip_filename, "r") as zip_file:
zip_file.extractall(self._target_folder)
logging.info("Unzipped to %s", self._target_folder)
os.remove(zip_filename)
logging.info("Removed %s", zip_filename)
# success -- update members
self._template_name = template_name
renderer = TemplateRenderer()
renderer.use_from_folder(self._target_folder)
return TemplateOptions(self._target_folder), renderer
|
# -*- coding: utf-8 -*-
"""
Module for the structural design of steel members.
"""
import numpy as np
class Geometry:
"""
Structural element geometry.
Class for the geometric properties of a structural element.
Parameters
----------
cs_sketch : CsSketch object
Cross-section sketch.
length : float
Member's length.
"""
def __init__(self, cs_sketch, length, thickness):
self.cs_sketch = cs_sketch
self.length = length
self.thickness = thickness
class CsSketch:
"""
Cross-section geometry.
Parameters
----------
nodes : list
List of points.
elem : list
Element connectivity.
"""
def __init__(self, nodes, elem):
self.nodes = nodes
self.elem = elem
class CsProps:
"""
Cross-section properties.
Class for the mass properties of cross-sections. The properties can be calculated using the from_cs_sketch() method.
Parameters
----------
area : float
Cross-sectional area.
xc : float
`x` coordinate of the gravity center.
yc : float
`y` coordinate of the gravity center.
moi_xx : float
Moment of inertia around `x` axis.
moi_yy : float
Moment of inertia around `y` axis.
moi_xy : float
Polar moment of inertia.
theta_principal : float
Rotation of the principal axes.
moi_1 : float
Moment of inertia around the major axis.
moi_2 : float
Moment of inertia around the minor axis.
"""
def __init__(self,
area=None,
xc=None,
yc=None,
moi_xx=None,
moi_yy=None,
moi_xy=None,
theta_principal=None,
moi_1=None,
moi_2=None
):
self.area = area
self.xc = xc
self.yc = yc
self.moi_xx = moi_xx
self.moi_yy = moi_yy
self.moi_xy = moi_xy
self.theta_principal = theta_principal
self.moi_1 = moi_1
self.moi_2 = moi_2
@classmethod
def from_cs_sketch(cls, cs_sketch):
"""
Cross-section calculator.
Alternative constructor, calculates mass properties of a given sc sketch and returns a CsProps object.
Parameters
----------
cs_sketch : CsSketch object
Notes
-----
"""
nele = len(cs_sketch.elem[0])
node = cs_sketch.elem[0] + cs_sketch.elem[1]
nnode = 0
j = 0
while node:
i = [ii for ii, x in enumerate(node) if x == node[0]]
for ii in sorted(i, reverse=True):
del node[ii]
if len(i) == 2:
j += 1
nnode += 1
# classify the section type (currently not used)
# if j == nele:
# section = 'close' # single cell
# elif j == nele - 1:
# section = 'open' # singly-branched
# else:
# section = 'open' # multi-branched
# Calculate the cs-properties
tt = []
xm = []
ym = []
xd = []
yd = []
side_length = []
for i in range(nele):
sn = cs_sketch.elem[0][i]
fn = cs_sketch.elem[1][i]
# thickness of the element
tt = tt + [cs_sketch.elem[2][i]]
# compute the coordinate of the mid point of the element
xm = xm + [mean_list([cs_sketch.nodes[0][sn], cs_sketch.nodes[0][fn]])]
ym = ym + [mean_list([cs_sketch.nodes[1][sn], cs_sketch.nodes[1][fn]])]
# compute the dimension of the element
xd = xd + [(cs_sketch.nodes[0][fn] - cs_sketch.nodes[0][sn])]
yd = yd + [(cs_sketch.nodes[1][fn] - cs_sketch.nodes[1][sn])]
# compute the length of the element
side_length = side_length + [np.sqrt(xd[i] ** 2 + yd[i] ** 2)]
# calculate cross sectional area
area = sum([a * b for a, b in zip(side_length, tt)])
# compute the centroid
xc = sum([a * b * c for a, b, c in zip(side_length, tt, xm)]) / area
yc = sum([a * b * c for a, b, c in zip(side_length, tt, ym)]) / area
if abs(xc / np.sqrt(area)) < 1e-12:
xc = 0
if abs(yc / np.sqrt(area)) < 1e-12:
yc = 0
# Calculate MOI
moi_xx = sum([sum(a) for a in zip([a ** 2 * b * c / 12 for a, b, c in zip(yd, side_length, tt)],
[(a - yc) ** 2 * b * c for a, b, c in
zip(ym, side_length, tt)])])
moi_yy = sum([sum(a) for a in zip([a ** 2 * b * c / 12 for a, b, c in zip(xd, side_length, tt)],
[(a - xc) ** 2 * b * c for a, b, c in
zip(xm, side_length, tt)])])
moi_xy = sum(
[sum(a) for a in zip([a * b * c * d / 12 for a, b, c, d in zip(xd, yd, side_length, tt)],
[(a - xc) * (b - yc) * c * d for a, b, c, d in
zip(xm, ym, side_length, tt)])])
if abs(moi_xy / area ** 2) < 1e-12:
moi_xy = 0
# Calculate angle of principal axes
if moi_xx == moi_yy:
theta_principal = np.pi / 2
else:
theta_principal = np.arctan(
(-2 * moi_xy) / (moi_xx - moi_yy)) / 2
# Change to centroid principal coordinates
# coord12 = [[a - xc for a in cs_sketch.nodes[0]],
# [a - yc for a in cs_sketch.nodes[1]]]
coord12 = np.array([[np.cos(theta_principal), np.sin(theta_principal)],
[-np.sin(theta_principal), np.cos(theta_principal)]]).dot(cs_sketch.nodes)
# re-calculate cross sectional properties for the centroid
for i in range(nele):
sn = cs_sketch.elem[0][i]
fn = cs_sketch.elem[1][i]
# calculate the coordinate of the mid point of the element
xm = xm + [mean_list([coord12[0][sn], coord12[0][fn]])]
ym = ym + [mean_list([coord12[1][sn], coord12[1][fn]])]
# calculate the dimension of the element
xd = xd + [(coord12[0][fn] - coord12[0][sn])]
yd = yd + [(coord12[1][fn] - coord12[1][sn])]
# calculate the principal moment of inertia
moi_1 = sum([sum(a) for a in zip([a ** 2 * b * c / 12 for a, b, c in zip(yd, side_length, tt)],
[(a - yc) ** 2 * b * c for a, b, c in
zip(ym, side_length, tt)])])
moi_2 = sum([sum(a) for a in zip([a ** 2 * b * c / 12 for a, b, c in zip(xd, side_length, tt)],
[(a - xc) ** 2 * b * c for a, b, c in
zip(xm, side_length, tt)])])
return cls(
area=area,
xc=xc,
yc=yc,
moi_xx=moi_xx,
moi_yy=moi_yy,
moi_xy=moi_xy,
theta_principal=theta_principal,
moi_1=moi_1,
moi_2=moi_2
)
class Material:
"""
Material properties.
Parameters
----------
e_modulus : float
Modulus of elasticity.
poisson : float
Poisson's ratio.
f_yield : float
Yield stress
plasticity : tuple
Plasticity table (tuple of stress-plastic strain pairs).
By default, no plasticity is considered.
"""
def __init__(self, e_modulus, poisson, f_yield, plasticity=None):
self.e_modulus = e_modulus
self.poisson = poisson
self.f_yield = f_yield
self.plasticity = plasticity
@staticmethod
def plastic_table(nominal=None):
"""
Plasticity tables.
Tables with plastic stress-strain curve values for different steels
given a steel name, e.g 'S355'
Parameters
----------
nominal : string [optional]
Steel name. Default value, 'S355'
Attributes
----------
Notes
-----
References
----------
"""
if nominal is None:
nominal = 'S235'
if nominal is 'S355':
table = (
(381.1, 0.0),
(391.2, 0.0053),
(404.8, 0.0197),
(418.0, 0.0228),
(444.2, 0.0310),
(499.8, 0.0503),
(539.1, 0.0764),
(562.1, 0.1009),
(584.6, 0.1221),
(594.4, 0.1394),
(5961, 1.)
)
if nominal is 'S650':
table = (
(760., 0.0),
(770., 0.022),
(850., 0.075),
(900., 0.1),
(901., 1.)
)
return table
@classmethod
def from_nominal(cls, nominal_strength=None):
"""
Alternative constructor creating a steel material from a given nominal strength.
Parameters
----------
nominal_strength : str
Steel quality, given in the form of e.g. "S355"
"""
if nominal_strength is None:
f_yield = 235.
else:
f_yield = float(nominal_strength.replace('S', ''))
plasticity = cls.plastic_table(nominal=nominal_strength)
return cls(210000., 0.3, f_yield, plasticity=plasticity)
class BCs:
def __init__(self, bcs):
self.bcs = bcs
@classmethod
def from_hinged(cls):
return cls([[1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0]])
class StructProps:
"""
Structural properties of a member.
Parameters
----------
t_classification : float, optional
Classification of a tube, d/(t^2*e)
p_classification : float, optional
Classification of a plate, c/(t*e)
lmbda_y : float, optional
Flexural slenderness on the strong axis.
lmbda_z : float, optional
Flexural slenderness on the weak axis.
n_pl_rd : float, optional
Plastic axial compression resistance.
n_b_rd_shell : float, optional
Shell buckling resistance
"""
def __init__(self,
t_classification=None,
p_classification=None,
lmbda_y=None,
lmbda_z=None,
n_pl_rd=None,
n_b_rd_shell=None
):
self.t_classification = t_classification
self.p_classification = p_classification
self.lmbda_y = lmbda_y
self.lmbda_z = lmbda_z
self.n_pl_rd = n_pl_rd
self.n_b_rd_shell = n_b_rd_shell
class Part:
"""
Structural part.
Class describing a structural part, including geometry, boundary conditions loads and resistance.
Parameters
----------
geometry : Geometry object, optional
cs_props : CsProps object, optional
material : Material object, optional
struct_props : StructProps object, optional
bc_loads: BCs object, optional
"""
def __init__(self,
geometry=None,
cs_props=None,
material=None,
struct_props=None,
bc_loads=None
):
self.geometry = geometry
self.cs_props = cs_props
self.material = material
self.bc_loads = bc_loads
self.struct_props = struct_props
# SIMPLY SUPPORTED PLATE
#TODO: Implement EN50341. Currently the resistance is calculated only for pure compression elements. Add interaction.
def n_pl_rd(
thickness,
width,
f_yield,
psi=None
):
# Docstring
"""
Plastic design resistance of a plate.
Calculates the resistance of a plate according to EN1993-1-1 and
EN1993-1-5. The plate is assumed simply supported.
Parameters
----------
thickness : float
[mm] Plate thickness
width : float
[mm] Plate width
f_yield : float
[MPa] Yield stress
psi : float, optional
[_] Ratio of the min over max stress for a linear distribution,
(sigma_min / sigma_max)
Default = 1, which implies a uniform distribution
Returns
-------
float
[N] Plastic design resistance
Notes
-----
To be extended to include cantilever plate (outstand members)
References
----------
.. [1] Eurocode 3: Design of steel structures - Part 1-1: General rules and rules for buildings.
Brussels: CEN, 2005.
.. [2] Eurocode 3: Design of steel structures - Part 1-5: Plated structural elements. Brussels: CEN, 2005.
"""
# Convert inputs to floats
thickness, width, f_yield = float(thickness), float(width), float(f_yield)
# Default value for psi
if psi is None:
psi = 1.
else:
psi = float(psi)
# Calculate kapa_sigma
k_sigma = 8.2 / (1.05 + psi)
# Aeff calculation.
# Reduction factor for the effective area of the profile acc. to EC3-1-5
classification = width / (thickness * np.sqrt(235 / f_yield))
lambda_p = classification / (28.4 * np.sqrt(k_sigma))
if lambda_p > 0.673 and plate_class(thickness, width, f_yield) == 4:
rho = (lambda_p - 0.055 * (3 + psi)) / lambda_p ** 2
else:
rho = 1.
# Effective area
a_eff = rho * thickness * width
# Axial compression resistance , Npl
nn_pl_rd = a_eff * f_yield
# Return value
return nn_pl_rd
def plate_class(
thickness,
width,
f_yield
):
# Docstring
"""
Plate classification.
Returns the class for a given plate, according to EN1993-1-1.
Currently works for simply supported plates under pure compression.
Parameters
----------
thickness : float
[mm] Plate thickness
width : float
[mm] Plate width
f_yield : float
[MPa] Yield stress
Returns
-------
int
[_] Class number
Notes
-----
To be extended to include the rest of the cases of Table 5.3 [1].
Members under combined axial and bending and outstand members.
References
----------
.. [1] Eurocode 3: Design of steel structures - Part 1-1: General rules and rules for buildings. Brussels: CEN, 2005
"""
# Convert inputs to floats
width, thickness, f_yield = float(width), float(thickness), float(f_yield)
# Calculate classification
classification = width / (thickness * np.sqrt(235. / f_yield))
if classification <= 33.:
p_class = 1
elif classification <= 38.:
p_class = 2
elif classification <= 42.:
p_class = 3
else:
p_class = 4
# Return value
return p_class
def sigma_cr_plate(
thickness,
width,
psi=None
):
# Docstring
"""
Critical stress of a plate.
Calculates the critical stress for a simply supported plate.
Parameters
----------
thickness : float
[mm] Plate thickness
width : float
[mm] Plate width
psi : float, optional
[_] Ratio of the min over max stress for a linear distribution,
(sigma_min / sigma_max)
Default = 1, which implies a uniform distribution
Returns
-------
float
[MPa] Plate critical stress
Notes
-----
To be extended to include cantilever plate (outstand members)
References
----------
.. [1] Eurocode 3: Design of steel structures - Part 1-5: Plated structural elements. Brussels: CEN, 2005.
"""
# Convert inputs to floats
thickness, width = float(thickness), float(width)
# Default value for psi
if psi is None:
psi = 1.
else:
psi = float(psi)
# Calculate kapa_sigma
k_sigma = 8.2 / (1.05 + psi)
# Elastic critical stress acc. to EN3-1-5 Annex A
sigma_e = 190000 * (thickness / width) ** 2
sigma_cr = sigma_e * k_sigma
# Return value
return sigma_cr
# CYLINDRICAL SHELLS
def sigma_x_rd(
thickness,
radius,
length,
f_y_k,
fab_quality=None,
gamma_m1=None
):
# Docstring
"""
Meridional design buckling stress.
Calculates the meridional buckling stress for a cylindrical shell
according to EN1993-1-6 [1].
Parameters
----------
thickness : float
[mm] Shell thickness
radius : float
[mm] Cylinder radius
length : float
[mm] Cylnder length
f_y_k : float
[MPa] Characteristic yield strength
fab_quality : str, optional
[_] Fabrication quality class. Accepts: 'fcA', 'fcB', 'fcC'
The three classes correspond to .006, .010 and .016 times the
width of a dimple on the shell.
Default = 'fcA', which implies excelent fabrication
gamma_m1 : int, optional
[_] Partial safety factor
Default = 1.1
Returns
-------
float
[MPa] Meridional buckling stress
References
----------
.. [1] Eurocode 3: Design of steel structures - Part 1-6: Strength and stability of shell structures.
Brussels: CEN, 2006._
"""
# Default values
if fab_quality is None:
fab_quality = 'fcA'
if gamma_m1 is None:
gamma_m1 = 1.1
else:
gamma_m1 = float(gamma_m1)
# Fabrication quality class acc. to table D2
if fab_quality is 'fcA':
q_factor = 40.
elif fab_quality is 'fcB':
q_factor = 25.
elif fab_quality is 'fcC':
q_factor = 16.
else:
print('Invalid fabrication class input. Choose between \'fcA\', \'fcB\' and \'fcC\' ')
return
# Critical meridinal stress, calculated on separate function
sigma_cr, category = sigma_x_rcr(thickness, radius, length)
# Shell slenderness
lmda = np.sqrt(f_y_k / sigma_cr)
delta_w_k = (1. / q_factor) * np.sqrt(radius / thickness) * thickness
alpha = 0.62 / (1 + 1.91 * (delta_w_k / thickness) ** 1.44)
beta = 0.6
eta = 1.
if category is 'long':
# For long cylinders, a formula is suggested for lambda, EC3-1-6 D1.2.2(4)
# Currently, the general form is used. to be fixed.
lmda_0 = 0.2
# lmda_0 = 0.2 + 0.1 * (sigma_e_M / sigma_e)
else:
lmda_0 = 0.2
lmda_p = np.sqrt(alpha / (1. - beta))
# Buckling reduction factor, chi
if lmda <= lmda_0:
chi = 1.
elif lmda < lmda_p:
chi = 1. - beta * ((lmda - lmda_0) / (lmda_p - lmda_0)) ** eta
else:
chi = alpha / (lmda ** 2)
# Buckling stress
sigma_rk = chi * f_y_k
sigma_rd = sigma_rk / gamma_m1
# Return value
return sigma_rd
def n_cr_shell(
thickness,
radius,
length
):
# Docstring
"""
Critical compressive load for cylindrical shell.
Calculates the critical load for a cylindrical shell under pure
compression and assumes uniform stress distribution. Calculation
according to EN1993-1-6 [1], Annex D.
Parameters
----------
thickness : float
[mm] Shell thickness
radius : float
[mm] Cylinder radius
length : float
[mm] Cylnder length
Returns
-------
float
[N] Critical load
References
----------
.. [1] Eurocode 3: Design of steel structures - Part 1-6: Strength and stability of shell structures.
Brussels: CEN, 2006.
"""
# Convert inputs to floats
thickness, radius, length = float(thickness), float(radius), float(length)
# Elastic critical load acc to EN3-1-6 Annex D
nn_cr_shell = 2 * np.pi * radius * thickness * sigma_x_rcr(thickness, radius, length)[0]
# Return value
return nn_cr_shell
def sigma_x_rcr(
thickness,
radius,
length
):
# Docstring
"""
Critical meridional stress for cylindrical shell.
Calculates the critical load for a cylindrical shell under pure
compression and assumes uniform stress distribution. Calculation
according to EN1993-1-6 [1], Annex D.
Parameters
----------
thickness : float
[mm] Shell thickness
radius : float
[mm] Cylinder radius
length : float
[mm] Cylnder length
Returns
-------
list
List of 2 elements:
a) float, Critical load [N]
b) string, length category
References
----------
.. [1] Eurocode 3: Design of steel structures - Part 1-6: Strength and stability of shell structures.
Brussels: CEN, 2006.
"""
# Convert inputs to floats
thickness, radius, length = float(thickness), float(radius), float(length)
# Elastic critical load acc. to EN3-1-6 Annex D
omega = length / np.sqrt(radius * thickness)
if 1.7 <= omega <= 0.5 * (radius / thickness):
c_x = 1.
length_category = 'medium'
elif omega < 1.7:
c_x = 1.36 - (1.83 / omega) + (2.07 / omega ** 2)
length_category = 'short'
else:
# c_x_b is read on table D.1 of EN3-1-5 Annex D acc. to BCs
# BC1 - BC1 is used on the Abaqus models (both ends clamped, see EN3-1-5 table 5.1)
c_x_b = 6.
c_x_n = max((1 + 0.2 * (1 - 2 * omega * thickness / radius) / c_x_b), 0.6)
c_x = c_x_n
length_category = 'long'
# Calculate critical stress, eq. D.2 on EN3-1-5 D.1.2.1-5
sigma_cr = 0.605 * 210000 * c_x * thickness / radius
# Return value
return sigma_cr, length_category
def fabclass_2_umax(fab_class=None):
# Docstring
"""
Max dimple displacement.
Returns the maximum displacement for a dimple imperfection on a cylindrical shell. The values are taken from table
8.4 of EN1993-1-6[1] for a given fabrication quality class, A, B or C.
Parameters
----------
fab_class : {'fcA', 'fcB', 'fcC'}
The fabrication quality class.
Returns
-------
float
u_max / l, where u_max is the maximum deviation and l the dimple's size (circumferencial or meridional)
References
----------
.. [1] Eurocode 3: Design of steel structures - Part 1-6: Strength and stability of shell structures.
Brussels: CEN, 2006.
"""
# default values
if fab_class is None:
fab_class = 'fcA'
# Assign imperfection amplitude, u_max acc. to the fabrication class
if fab_class is 'fcA':
u_max = 0.006
elif fab_class is 'fcB':
u_max = 0.010
else:
u_max = 0.016
# Return values
return u_max
# OVERALL BUCKLING
def n_cr_flex(
length,
moi_y,
kapa_bc=None,
e_modulus=None
):
# Docstring
"""
Euler's critical load.
Calculates the critical load for flexural buckling of a given column.
A single direction is considered. If more directions are required
(e.g the two principal axes), the function has to be called multiple
times. For torsional mode critical load use n_cr_tor(), and for
flexural-torsional critical load use n_cr_flex_tor()
Parameters
----------
length : float
[mm] Column length.
moi_y : float
[mm^4] Moment of inertia.
kapa_bc : float, optional
[_] length correction for the effect of the boundary conditions.
Default = 1, which implies simply supported column.
e_modulus : float, optional
[MPa] Modulus of elasticity.
Default = 210000., typical value for steel.
Returns
-------
float
[N] Critical load.
"""
# default values
if kapa_bc is None:
kapa_bc = 1.
else:
kapa_bc = float(kapa_bc)
if e_modulus is None:
e_modulus = 210000.
else:
e_modulus = float(e_modulus)
# Euler's critical load
nn_cr_flex = (np.pi ** 2) * e_modulus * moi_y / (kapa_bc * length) ** 2
# Return the result
return nn_cr_flex
def n_cr_tor(
length,
area,
moi_y0,
moi_z0,
moi_torsion,
moi_warp,
y_0=None,
z_0=None,
e_modulus=None,
poisson=None,
):
# Docstring
"""
Torsional elastic critical load
Calculates the torsional elastic critical load for a hinged column.
The input values are refering to the principal axes. For flexural
buckling (Euler cases) use n_cr_flex. For the combined
flexural-torsional modes use n_cr_flex_tor.
Parameters
----------
length : float
[mm] Column length.
area : float
[mm^2] Cross-sectional area.
moi_y0 : float
[mm^4] Moment of inertia around `y`-axis.
`y`-axis on the centre of gravity but not necessarily principal.
moi_z0 : float
[mm^4] Moment of inertia around `z`-axis.
`z`-axis on the centre of gravity but not necessarily principal.
moi_torsion : float
[mm^4] Saint Venant constant.
moi_warp : float
[mm^6] Torsion constant.
y_0 : float, optional
[mm] Distance on `y`-axis of the shear center to the origin.
Default = 0, which implies symmetric profile
z_0 : float, optional
[mm] Distance on `z`-axis of the shear center to the origin.
Default = 0, which implies symmetric profile
e_modulus : float, optional
[MPa] Modulus of elasticity.
Default = 210000., general steel.
poisson : float, optional
[_] Young's modulus of elasticity.
Default = 0.3, general steel.
Returns
-------
float
[N] Flexural-torsional critical load.
Notes
-----
The torsional critical load is calculated as:
.. math:: N_{cr, tor} = {GJ + {\pi^2EI_w\over{L^2}}\over{r^2}}
Where:
:math:`E` : Elasticity modulus
:math:`G` : Shear modulus
:math:`J` : Torsional constant (Saint Venant)
:math:`I_w` : Warping constant
:math:`r^2=(moi_y + moi_z)/A + x_0^2 + y_0^2`
:math:`x_0, y_0` : Shear centre coordinates on the principal coordinate system
References
----------
..[1]N. S. Trahair, Flexural-torsional buckling of structures, vol. 6. CRC Press, 1993.
..[2]NS. Trahair, MA. Bradford, DA. Nethercot, and L. Gardner, The behaviour and design of steel structures to EC3, 4th edition. London; New York: Taylor & Francis, 2008.
"""
# default values
if y_0 is None:
y_0 = 0
else:
y_0 = float(y_0)
if z_0 is None:
z_0 = 0
else:
z_0 = float(z_0)
if e_modulus is None:
e_modulus = 210000.
else:
e_modulus = float(e_modulus)
if poisson is None:
poisson = 0.3
else:
poisson = float(poisson)
# Shear modulus
g_modulus = e_modulus / (2 * (1 + poisson))
# Polar radius of gyration.
i_pol = np.sqrt((moi_y0 + moi_z0) / area)
moi_zero = np.sqrt(i_pol ** 2 + y_0 ** 2 + z_0 ** 2)
# Calculation of critical torsional load.
nn_cr_tor = (1 / moi_zero ** 2) * (g_modulus * moi_torsion + (np.pi ** 2 * e_modulus * moi_warp / length ** 2))
# Return the result
return nn_cr_tor
def n_cr_flex_tor(
length,
area,
moi_y,
moi_z,
moi_yz,
moi_torsion,
moi_warp,
y_sc=None,
z_sc=None,
e_modulus=None,
poisson=None,
):
# Docstring
"""
Flexural-Torsional elastic critical load
Calculates the critical load for flexural-torsional buckling of a
column with hinged ends. The returned value is the minimum of the
the three flexural-torsional and the indepedent torsional mode, as
dictated in EN1993-1-1 6.3.1.4 [1]. (for further details, see Notes).
Parameters
----------
length : float
[mm] Column length.
area : float
[mm^2] Cross-sectional area.
moi_y : float
[mm^4] Moment of inertia around `y`-axis.
`y`-axis on the centre of gravity but not necessarily principal.
moi_z : float
[mm^4] Moment of inertia around `z`-axis.
`z`-axis on the centre of gravity but not necessarily principal.
moi_yz : float
[mm^4] Product of inertia.
moi_torsion : float
[mm^4] Saint Venant constant.
moi_warp : float
[mm^6] Torsion constant.
y_sc : float, optional
[mm] Distance on `y`-axis of the shear center to the origin.
Default = 0, which implies symmetric profile
z_sc : float, optional
[mm] Distance on `z`-axis of the shear center to the origin.
Default = 0, which implies symmetric profile
e_modulus : float, optional
[MPa] Modulus of elasticity.
Default = 210000., general steel.
poisson : float, optional
[_] Young's modulus of elasticity.
Default = 0.3, general steel.
Returns
-------
float
[N] Flexural-torsional critical load.
Notes
-----
The flexural-torsional critical loads are calculated as a combination
of the three independent overall buckling modes:
i) flexural around the major axis,
ii) flexural around the minor axis,
iii) Torsional buckling (around x-axis).
First, the cs-properties are described on the principal axes. Then
the three independent modes are calculated. The combined
flexural-torsional modes are calculated as the roots of a 3rd order
equation, as given in [1], [2]. The minimum of the torsional and the
three combined modes is returned (the two independent flexural modes
are not considered; for critical load of pure flexural mode use
'n_cr_flex').
References
----------
..[1]N. S. Trahair, Flexural-torsional buckling of structures, vol. 6. CRC Press, 1993.
..[2]NS. Trahair, MA. Bradford, DA. Nethercot, and L. Gardner, The behaviour and design of steel structures to EC3, 4th edition. London; New York: Taylor & Francis, 2008.
"""
# default values
if y_sc is None:
y_sc = 0
else:
y_sc = float(y_sc)
if z_sc is None:
z_sc = 0
else:
z_sc = float(z_sc)
if e_modulus is None:
e_modulus = 210000.
else:
e_modulus = float(e_modulus)
if poisson is None:
poisson = 0.3
else:
poisson = float(poisson)
# Angle of principal axes
if abs(moi_y - moi_z) < 1e-20:
theta = np.pi / 4
else:
theta = -np.arctan((2 * moi_yz) / (moi_y - moi_z)) / 2
# Distance of the rotation centre to the gravity centre on the
# principal axes coordinate system
y_0 = y_sc * np.cos(-theta) - z_sc * np.sin(-theta)
z_0 = z_sc * np.cos(-theta) + y_sc * np.sin(-theta)
# Moment of inertia around principal axes.
moi_y0 = (moi_y + moi_z) / 2 + np.sqrt(((moi_y - moi_z) / 2) ** 2 + moi_yz ** 2)
moi_z0 = (moi_y + moi_z) / 2 - np.sqrt(((moi_y - moi_z) / 2) ** 2 + moi_yz ** 2)
# Polar radius of gyration.
i_pol = np.sqrt((moi_y0 + moi_z0) / area)
moi_zero = np.sqrt(i_pol ** 2 + y_0 ** 2 + z_0 ** 2)
# Independent critical loads for flexural and torsional modes.
n_cr_max = (np.pi ** 2 * e_modulus * moi_y0) / (length ** 2)
n_cr_min = (np.pi ** 2 * e_modulus * moi_z0) / (length ** 2)
n_tor = n_cr_tor(
length,
area,
moi_y0,
moi_z0,
moi_torsion,
moi_warp=moi_warp,
y_0=y_0,
z_0=z_0,
e_modulus=e_modulus,
poisson=poisson
)
# Coefficients of the 3rd order equation for the critical loads
# The equation is in the form aaaa * N ^ 3 - bbbb * N ^ 2 + cccc * N - dddd
aaaa = moi_zero ** 2 - y_0 ** 2 - z_0 ** 2
bbbb = ((n_cr_max + n_cr_min + n_tor) * moi_zero ** 2) - (n_cr_min * y_0 ** 2) - (n_cr_max * z_0 ** 2)
cccc = moi_zero ** 2 * (n_cr_min * n_cr_max) + (n_cr_min * n_tor) + (n_tor * n_cr_max)
dddd = moi_zero ** 2 * n_cr_min * n_cr_max * n_tor
det_3 = (
4 * (-bbbb ** 2 + 3 * aaaa * cccc) ** 3 + (2 * bbbb ** 3 - 9 * aaaa * bbbb * cccc + 27 * aaaa ** 2 * dddd) ** 2
)
if det_3 < 0:
det_3 = -1. * det_3
cf = 1j
else:
cf = 1
# Critical load
# The following n_cr formulas are the roots of the 3rd order equation of the global critical load
n_cr_1 = bbbb / (3. * aaaa) - (2 ** (1. / 3) * (-bbbb ** 2 + 3 * aaaa * cccc)) / \
(3. * aaaa * (2 * bbbb ** 3 - 9 * aaaa * bbbb * cccc + 27 * aaaa ** 2 * dddd + \
(cf * np.sqrt(det_3))) ** (1. / 3)) + (
2 * bbbb ** 3 - 9 * aaaa * bbbb * cccc + 27 * aaaa ** 2 * dddd + \
(cf * np.sqrt(det_3))) ** (1. / 3) / (
3. * 2 ** (1. / 3) * aaaa)
n_cr_2 = bbbb / (3. * aaaa) + ((1 + (0 + 1j) * np.sqrt(3)) * (-bbbb ** 2 + 3 * aaaa * cccc)) / \
(3. * 2 ** (2. / 3) * aaaa * (
2 * bbbb ** 3 - 9 * aaaa * bbbb * cccc + 27 * aaaa ** 2 * dddd + \
(cf * np.sqrt(det_3))) ** (1. / 3)) - ((1 - (0 + 1j) * np.sqrt(3)) * \
(
2 * bbbb ** 3 - 9 * aaaa * bbbb * cccc + 27 * aaaa ** 2 * dddd + \
(cf * np.sqrt(det_3))) ** (1. / 3)) / (
6. * 2 ** (1. / 3) * aaaa)
n_cr_3 = bbbb / (3. * aaaa) + ((1 - (0 + 1j) * np.sqrt(3)) * (-bbbb ** 2 + 3 * aaaa * cccc)) / \
(3. * 2 ** (2. / 3) * aaaa * (
2 * bbbb ** 3 - 9 * aaaa * bbbb * cccc + 27 * aaaa ** 2 * dddd + \
(cf * np.sqrt(det_3))) ** (1. / 3)) - ((1 + (0 + 1j) * np.sqrt(3)) * \
(
2 * bbbb ** 3 - 9 * aaaa * bbbb * cccc + 27 * aaaa ** 2 * dddd + \
(cf * np.sqrt(det_3))) ** (1. / 3)) / (
6. * 2 ** (1. / 3) * aaaa)
# Lowest root is the critical load
nn_cr_flex_tor = min(abs(n_cr_1), abs(n_cr_2), abs(n_cr_3), n_tor)
# Return the critical load
return nn_cr_flex_tor
def lmbda_flex(
length,
area,
moi_y,
kapa_bc=None,
e_modulus=None,
f_yield=None
):
# Docstring
"""
Flexural slenderness.
Calculates the slenderness of a columne under pure compression.
Euler's critical load is used.
Parameters
----------
length : float
[mm] Column length
area : float
[mm^2] Cross section area
moi_y : float
[mm^4] Moment of inertia
kapa_bc : float, optional
[_] length correction for the effect of the boundary conditions.
Default = 1, which implies simply supported column
e_modulus : float, optional
[MPa] Modulus of elasticity
Default = 210000., typical value for steel
f_yield : float, optional
[MPa] yield stress.
Default = 380., brcause this value was used extencively while the
function was being written. To be changed to 235.
Returns
-------
float
[_] Member slenderness
"""
# default values
if kapa_bc is None:
kapa_bc = 1.
else:
kapa_bc = float(kapa_bc)
if e_modulus is None:
e_modulus = 210000.
else:
e_modulus = float(e_modulus)
if f_yield is None:
f_yield = 380.
else:
f_yield = float(f_yield)
# Calculate Euler's critical load
n_cr = n_cr_flex(
length,
moi_y,
e_modulus=e_modulus,
kapa_bc=kapa_bc
)
# Flexural slenderness EN3-1-1 6.3.1.3 (1)
lmbda_flexx = np.sqrt(area * f_yield / n_cr)
# Return the result
return lmbda_flexx
def imp_factor(b_curve):
# Docstring
"""
Imperfection factor.
Returns the imperfection factor for a given buckling curve.
The values are taken from Table 6.1 of EN1993-1-1 [1]
Parameters
----------
b_curve : {'a0', 'a', 'b', 'c', 'd'}
[_] Name of the buckling curve as obtained from Table 6.2 of [1].
Returns
-------
float
[_] Imperfection factor.
References
----------
.. [1] Eurocode 3: Design of steel structures - Part 1-1: General rules and rules for buildings.
Brussels: CEN, 2005.
"""
switcher = {
'a0': 0.13,
'a': 0.21,
'b': 0.34,
'c': 0.49,
'd': 0.76,
}
return switcher.get(b_curve, "nothing")
def chi_flex(
length,
area,
moi_y,
f_yield,
b_curve,
kapa_bc=None
):
# Docstring
"""
Flexural buckling reduction factor.
Claculates the reduction factor, chi, according to EN1993-1-1 6.3.1.2
Parameters
----------
length : float
[mm] Column length
area : float
[mm^2] Cross section area
moi_y : float
[mm^4] Moment of inertia
f_yield : float
[MPa] Yield stress.
b_curve : str
[_] Name of the buckling curve as obtained from Table 6.2 of [1].
Valid options are {'a0', 'a', 'b', 'c', 'd'}
kapa_bc : float, optional
[_] length correction for the effect of the boundary conditions.
Default = 1, which implies simply supported column
Returns
-------
float
[_] Reduction factor.
References
----------
.. [1] Eurocode 3: Design of steel structures - Part 1-1: General rules and rules for buildings.
Brussels: CEN, 2005.
"""
if kapa_bc is None:
kapa_bc = 1.
lmda = lmbda_flex(
length=length,
area=area,
moi_y=moi_y,
kapa_bc=kapa_bc,
e_modulus=None,
f_yield=f_yield
)
alpha = imp_factor(b_curve)
phi = (1 + alpha * (lmda - 0.2) + lmda ** 2) / 2.
chi = 1 / (phi + np.sqrt(phi ** 2 - lmda ** 2))
if chi > 1.:
chi = 1.
return chi
def n_b_rd(
length,
area,
moi_y,
f_yield,
b_curve,
kapa_bc=None,
gamma_m1=None
):
# Docstring
"""
Flexural buckling resistance.
Verifies the resistance of a column against flexural buckling
according to EN1993-1-1 6.3.1.1.
Parameters
----------
length : float
[mm] Column length
area : float
[mm^2] Cross section area
moi_y : float
[mm^4] Moment of inertia
f_yield : float
[MPa] Yield stress.
b_curve : str
[_] Name of the buckling curve as obtained from Table 6.2 of [1].
Valid options are: {'a0', 'a', 'b', 'c', 'd'}
kapa_bc : float, optional
[_] Length correction for the effect of the boundary conditions.
Default = 1, which implies simply supported column
gamma_m1 : float, optional
[_] Partial safety factor.
Default = 1.
Returns
-------
float
[N] Buckling resistance.
References
----------
.. [1] Eurocode 3: Design of steel structures - Part 1-1: General rules and rules for buildings.
Brussels: CEN, 2005.
"""
if kapa_bc is None:
kapa_bc = 1.
if gamma_m1 is None:
gamma_m1 = 1.
chi = chi_flex(length,
area,
moi_y,
f_yield,
b_curve,
kapa_bc=kapa_bc)
nn_b_rd = area * f_yield * chi / gamma_m1
return nn_b_rd
# CONNECTIONS
def bolt_grade2stress(bolt_grade):
# Docstring
"""
Convert bolt grade to yield and ultimate stress.
Standard designation for bolt grade as a decimal is converted to yield and ultimate stress values in MPa. In the
standard bolt grade designation, the integer part of the number represents the ultimate stress in MPa/100 and the
decimal part is the yield stress as a percentage of the ultimate (e.g 4.6 is f_u = 400, f_y = 400 * 0.6 = 240).
Parameters
----------
bolt_grade : float
Returns
-------
tuple : (f_ultimate, f_yield)
"""
# Calculation using divmod
f_ultimate = 100 * divmod(bolt_grade, 1)[0]
f_yield = round(f_ultimate * divmod(bolt_grade, 1)[1])
# Return values
return f_ultimate, f_yield
def shear_area(bolt_size, shear_threaded=None):
# Docstring
"""
Shear area of a bolt.
Returns the srea to be used for the calculation of shear resistance of a bolt, either the gross cross-section of the
bolt (circle area) or the reduced area of the threaded part of the bolt.
Parameters
----------
bolt_size : float
Bolt's diameter.
shear_threaded : bool, optional
Designates if the shear plane is on the threaded portion or not.
Default in False, which implies shearing of the non-threaded portion
Returns
-------
float
Notes
-----
Currently, the threaded area is based on an average reduction of the shank area. To be changed to analytic formula.
"""
# Default
if shear_threaded is None:
shear_threaded = False
# Calculate area
if shear_threaded:
a_shear = 0.784 * (np.pi * bolt_size ** 2 / 4)
else:
a_shear = np.pi * bolt_size ** 2 / 4
# Return
return a_shear
def f_v_rd(
bolt_size,
bolt_grade,
shear_threaded=None,
gamma_m2=None
):
# Docstring
"""
Bolt's shear resistance.
Calculates the shear resistance of single bolt for one shear plane as given in table 3.4 of EC3-1-8.
Parameters
----------
bolt_size : float
Diameter of the non-threaded part (nominal bolt size e.g. M16 = 16)
bolt_grade : float
Bolt grade in standard designation format (see documentation of bolt_grade2stress())
shear_threaded : bool, optional
Designates if the shear plane is on the threaded portion or not.
Default in False, which implies shearing of the non-threaded portion
gamma_m2 : float, optional
Safety factor.
Default value is 1.25
Returns
-------
float
"""
# Defaults
bolt_size = float(bolt_size)
if shear_threaded is None:
shear_threaded = False
if gamma_m2 is None:
gamma_m2 = 1.25
else:
gamma_m2 = float(gamma_m2)
# av coefficient
if shear_threaded and bolt_grade == (4.6 or 8.6):
a_v = 0.5
else:
a_v = 0.6
# Get ultimate stress for bolt
f_ub = bolt_grade2stress(bolt_grade)[0]
# Shear area
a_shear = shear_area(bolt_size, shear_threaded)
# Shear resistance
ff_v_rd = a_v * f_ub * a_shear / gamma_m2
# Return value
return ff_v_rd
def bolt_min_dist(d_0):
"""
Minimum bolt spacing.
:param d_0:
:return:
"""
e_1 = 1.2 * d_0
e_2 = 1.2 * d_0
e_3 = 1.5 * d_0
p_1 = 2.2 * d_0
p_2 = 2.4 * d_0
return e_1, e_2, e_3, p_1, p_2
def f_b_rd(bolt_size, bolt_grade, thickness, steel_grade, f_yield, distances, d_0):
"""
Connection bearing capacity.
Calculates the bearing capacity of a single bolt on a plate. The distances to the plate edges/other bolts are
described
:param bolt_size:
:param bolt_grade:
:param thickness:
:param steel_grade:
:param f_yield:
:param distances:
:param d_0:
:return:
"""
pass
def f_weld_perp():
# f_w_1 = (sqrt(2) / 2) * a_weld * l_weld * f_ult / (b_w * gamma_m2)
# f_w_2 = 0.9 * f_ult * a_weld * l_weld * sqrt(2) / gamma_m2
pass
def f_weld_paral():
pass
def bolt2washer(m_bolt):
"""
Washer diameter.
Return the diameter of the washer for a given bolt diameter.
The calculation is based on a function derived from linear regression
on ENXXXXXXX[REF].
Parameters
----------
m_bolt : float
Bolt diameter
Attributes
----------
Notes
-----
References
----------
"""
d_washer = np.ceil(1.5893 * m_bolt + 5.1071)
return d_washer
def mean_list(numbers):
"""
Mean value.
Calculate the average for a list of numbers.
Parameters
----------
numbers : list
Attributes
----------
Notes
-----
References
----------
"""
return float(sum(numbers)) / max(len(numbers), 1)
|
a = [10, 7, 5, 4]
while True:
m = max(a)
index = a.index(m)
a = [x + 1 for x in a]
a[index] -= 4
print(a)
|
import random
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def country_to_columns(full_data):
return pd.get_dummies(full_data["Country"])
def city_to_columns(full_data):
return pd.get_dummies(full_data["City"])
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
full_data = pd.read_csv(filename, parse_dates=["Date"]).\
dropna().drop_duplicates()
full_data = full_data[full_data["Temp"] > -72]
day_of_year = []
for date in full_data["Date"]:
day_of_year.append(pd.Period(date, "D").day_of_year)
# cities = city_to_columns(full_data)
# countries = country_to_columns(full_data)
full_data["day_of_year"] = day_of_year
features = full_data[["Country", "City", "day_of_year",
"Year", "Month", "Day"]]
# features = pd.concat([features, cities], axis=1)
# features = pd.concat([features, countries], axis=1)
labels = full_data["Temp"]
return features, labels
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
df, responses = load_data(
"C:\\Users\\idoro\\Desktop\\IML\\datasets\\City_Temperature.csv")
# Question 2 - Exploring data for specific country
israel_without_temp = df[df["Country"] == "Israel"]
features_with_temp = pd.concat([df, responses], axis=1)
israel_with_temp = \
features_with_temp[features_with_temp["Country"] == "Israel"]
israel_temp = responses.reindex_like(israel_with_temp)
fig = px.scatter(pd.DataFrame({"x": israel_with_temp["day_of_year"],
"y": israel_temp}), x="x", y="y",
labels={"x": "Day of year", "y": "Temperature"},
title="The temperature as a function of the day of year",
color=israel_with_temp["Year"].astype(str))
fig.show()
israel_by_month = israel_with_temp.groupby("Month").agg({"Temp": "std"})
months = (israel_with_temp["Month"].drop_duplicates()).values
fig2 = px.bar(israel_by_month, x=months, y="Temp",
labels={"x": "Month", "Temp": "Standard Deviation"},
title="The standard deviation of the daily temperatures "
"as a function of months")
fig2.show()
# Question 3 - Exploring differences between countries
grouped_by_country = features_with_temp.groupby(["Country", "Month"])
country_month_mean = grouped_by_country.mean().reset_index()
country_month_std = grouped_by_country.std().reset_index()
country_month_mean.insert(1, "std", country_month_std["Temp"])
fig3 = px.line(country_month_mean, x="Month", y="Temp", error_y="std",
color="Country")
fig3.update_layout(title="The average and standard deviation as a "
"function of Country and Month",
xaxis_title="Month",
yaxis_title="Average month temperature")
fig3.show()
# Question 4 - Fitting model for different values of `k`
israel_features_train, israel_temp_train, israel_features_test, \
israel_temp_test = split_train_test(israel_without_temp, israel_temp)
israel_losses = []
for k in range(1, 11):
poly_estimator = PolynomialFitting(k)
poly_estimator.fit((israel_features_train["day_of_year"]).to_numpy(),
israel_temp_train)
rounded_loss = np.round(poly_estimator.loss
(israel_features_test["day_of_year"].to_numpy(),
israel_temp_test), 2)
israel_losses.append(rounded_loss)
fig4 = px.bar(x=[i for i in range(1, 11)], y=israel_losses)
fig4.update_layout(title="The test error of the model as a function of "
"the polynomial degree",
xaxis_title="Polynomial Degree",
yaxis_title="Test Error")
fig4.show()
print(israel_losses)
# Question 5 - Evaluating fitted model on different countries
min_k = np.argmin(israel_losses) + 1
israel_poly = PolynomialFitting(min_k)
israel_poly.fit(israel_without_temp["day_of_year"].to_numpy(),
israel_temp)
losses_by_countries = {}
countries = set(features_with_temp["Country"])
for country in countries:
if country == "Israel":
continue
features_by_country = df[df["Country"] == country]
temp_of_country = responses.reindex_like(features_by_country)
rounded_loss = np.round(israel_poly.loss(
features_by_country["day_of_year"].to_numpy(),
temp_of_country), 2)
losses_by_countries[country] = rounded_loss
fig5 = px.bar(x=losses_by_countries.keys(),
y=losses_by_countries.values(),
color=losses_by_countries.keys())
fig5.update_layout(title="The test error of the model fitted for Israel "
"as a function of the other countries"
"the polynomial degree",
xaxis_title="Country",
yaxis_title="Test Error")
fig5.show()
|
harness.add_runtime('softboundcets-O3', {"CC": "${CLANG}", "AS": "${CLANG}", "CFLAGS": "-O3 -fsoftboundcets -L${SOFTBOUND_RUNTIME_DIR}", "LDFLAGS": "-lm -lrt -lsoftboundcets_rt"})
|
import discord
import asyncio
import random
from discord.ext import commands
class MM(commands.Cog):
def __init__(self, client):
self.client = client
def botAdminCheck(ctx):
return ctx.message.author.id == 368671236370464769
# Guilds Checker
@commands.command()
@commands.guild_only()
@commands.check(botAdminCheck)
async def mmstart(self, ctx, members: commands.Greedy[discord.Member] = None):
list_role = ['Murder', 'Detective']
roles = {}
killed_people = []
guild = ctx.guild
channel = ctx.channel
color = 0xa05a4e
f = open('cogs/mm/lives.txt', 'w')
f.write("")
f.close()
# Refresh database
f = open('cogs/mm/murder.txt', 'w')
f.write("")
f.close()
# Refresh database
f = open('cogs/mm/actions.txt', 'w')
f.write("")
f.close()
meeting_cd = 10 # Meeting duration
voting_cd = 10 # Voting wait time
cooldown = 20
for i in (0, (len(members)-2)):
# appending bystanding roles with regards to the total number of participants
list_role.append('Bystanders')
bystanders = len(list_role)-2
new = True
for role in ctx.guild.roles:
if role.name == 'Participant': # making a participant roles for permission purposes
participant = role
new = False
if new:
participant = await guild.create_role(name='Participant', hoist=False)
lobby_init = await ctx.send(embed=discord.Embed(title='__**# Lobby**__', description=f'> Starting in 3 seconds...\n> \n> **__Roles__**\n> Murder \n> Detective \n> By Standers', color=color))
await asyncio.sleep(3) # lOBBY COOL DOWN
for member in members:
role = random.choice(list_role)
roles[member] = role
count = 0
await member.add_roles(participant)
for i in list_role:
if i == role:
list_role.pop(count)
count += 1
try:
await lobby_init.edit(embed=discord.Embed(title='__**# Lobby**__', description=f'> {member.mention} Please reply ready.\n> \n> **__Roles__**\n> Murder : 1\n> Detective : 1\n> By Standers : {bystanders}', color=color))
# msg = await self.client.wait_for('message', check=lambda message : message.content.lower() == 'ready' and message.channel == channel and message.author == member, timeout = 30)
except asyncio.TimeoutError:
await lobby_init.edit(embed=discord.Embed(title='__**# Lobby**__', description=f'{member.mention} is inactive - Lobby failed to start.', color=discord.Color.dark_red()))
return
to_be_edited = await ctx.send('> Game is starting in `5` seconds....')
await asyncio.sleep(5)
await to_be_edited.delete()
ids = 1 # 1 Chill pill center, 2 MIS
if ids == 1:
m_channel_id = 774215902433509386
d_channel_id = 774215942048710687 # THE CHILL PILL CENTER
meeting = 774215983610200064
elif ids == 2:
m_channel_id = 774201930892705822 # MIS
d_channel_id = 774201944431656972
meeting = 774201910852976640
meeting_ch = ctx.guild.get_channel(meeting)
m_channel = ctx.guild.get_channel(m_channel_id)
d_channel = ctx.guild.get_channel(d_channel_id)
await meeting_ch.purge(limit=200)
await d_channel.purge(limit=200)
await m_channel.purge(limit=200)
# Disable everyones permissions to see any gaming channels
await meeting_ch.set_permissions(ctx.guild.default_role, read_messages=False, send_messages=False)
await d_channel.set_permissions(ctx.guild.default_role, read_messages=False, send_messages=False)
await m_channel.set_permissions(ctx.guild.default_role, read_messages=False, send_messages=False)
######## GAME STARTS ########
await meeting_ch.send('> @everyone Meeting starts in 10 seconds!')
await meeting_ch.set_permissions(participant, read_messages=True, send_messages=False)
f = open('cogs/mm/lives.txt', 'a')
for member in members:
if roles[member] == 'Murder':
murder = member
await m_channel.set_permissions(murder, read_messages=True, send_messages=False)
b = open('cogs/mm/murder.txt', 'w')
b.write(f'{member.id}')
b.close()
f.write(f'{member.id}\n')
elif roles[member] == 'Detective':
detective = member
await d_channel.set_permissions(detective, read_messages=True, send_messages=False)
f = open('cogs/mm/lives.txt', 'a')
f.write(f'{member.id}\n')
elif roles[member] == 'Bystanders':
f = open('cogs/mm/lives.txt', 'a')
f.write(f'{member.id}\n')
f.close()
await m_channel.send(embed=discord.Embed(description=f'{murder.mention} you have been chosen as the murder!, you will have a choice to kill someone every night!', color=0x800000))
await d_channel.send(embed=discord.Embed(description=f'{detective.mention} you have been chosen as the detective!, you will have a choice to inspect someone every night!', color=0x6050dc))
######## Identify certain bystanders for the detective ########
embed = discord.Embed(
title='Bystanders', description="Detective, we have identified some bystanders for you, we really hope it helps!", color=0x6050dc)
count = 0
for member in members:
if count != random.randint(0, 3):
if roles[member] == 'Bystanders':
embed.add_field(
name=f'{member.display_name}', value='is a confirmed bystander!')
else:
pass
count += 1
await d_channel.send(embed=embed)
####### First ever setup meeting starts #######
# 10 sec before meeting begins
await asyncio.sleep(10)
f = open('cogs/mm/lives.txt', 'r')
alive_list = f.read().split('\n') # Retrieve member data
f.close()
alive_list = [int(i) for i in alive_list if i != ""]
initial_list = alive_list.copy()
# Filter out murder from the member data set
without_mrd = [int(i) for i in alive_list if int(i) != int(murder.id)]
text = await meeting_ch.send(embed=discord.Embed(description='> Meeting has started! Introduce yourselves! You all have 50 seconds to talk. Prove your innocence.\n@everyone', color=color))
await meeting_ch.set_permissions(participant, read_messages=True, send_messages=True)
# 50 sec meeting cool down
await asyncio.sleep(meeting_cd)
await meeting_ch.send(embed=discord.Embed(description='Meeting has ended.', color=color))
await meeting_ch.set_permissions(participant, read_messages=True, send_messages=False)
await d_channel.set_permissions(detective, read_messages=True, send_messages=True)
await m_channel.set_permissions(murder, read_messages=True, send_messages=True)
f = open('cogs/mm/murder.txt', 'r')
f_murder = f.read().split('\n') # Retrieve member data
f.close()
f_murder = [int(i) for i in f_murder if i != ""]
murder = f_murder[0]
list2 = alive_list.copy()
while (len(without_mrd)) > 1:
await asyncio.sleep(cooldown)
f = open('cogs/mm/actions.txt', 'r')
actions = f.read().split('~')
actions = [i for i in actions if i != ""]
f.close()
killed = [i for i in actions if (list(i)[0]) == 'K']
if killed:
victim = ctx.guild.get_member(int(killed[0][1:]))
await meeting_ch.send(f'{victim.mention} got killed last night!')
await meeting_ch.set_permissions(victim, read_messages=True, send_messages=False)
await victim.remove_roles(participant)
alive_list = [i for i in actions if i != int(killed[0][1:])]
killed_people.append(victim)
if not len(f_murder) > 0:
await meeting_ch.send(embed=discord.Embed(description='Hip Hip Hooray! The murder is gone for good.', color=color))
break
if len(list2) <= 2:
await meeting_ch.send(f'{ctx.author.mention} <@{murder}> the town murder has killed enough bystanders and won! ')
break
murder = ctx.guild.get_member(murder)
await meeting_ch.set_permissions(participant, read_messages=True, send_messages=False)
await d_channel.set_permissions(detective, read_messages=True, send_messages=False)
await m_channel.set_permissions(murder, read_messages=True, send_messages=False)
f = open('cogs/mm/lives.txt', 'r')
alive_list = f.read().split('\n')
f.close()
alive_list = [int(i) for i in alive_list if i != ""]
# list2 = alive_list.copy()
await asyncio.sleep(9)
# refined_set = set(initial_list) - set(list2)
# if len(list(refined_set)) > 0:
# initial_list = [i for i in list(initial_list) if i != list(refined_set)[0]] # Restart the main member volume
# for i in range(0,len(list(refined_set))):
# await meeting_ch.send(f'<@{list(refined_set)[i]}> got killed last night.')
# else:
# pass
text = await meeting_ch.send(embed=discord.Embed(description='Meeting has started! Introduce yourselves! You all have 50 seconds to talk. Prove your innocence.\n@everyone', color=color))
await meeting_ch.set_permissions(participant, read_messages=True, send_messages=True)
await asyncio.sleep(meeting_cd)
############### VOTING ###############
embed = discord.Embed(
title="Vote out the most suspicious person (Needs majority to get voted out)(If you wish to skip, avoid voting anyone)!", color=color)
emojis = ['\u0031\ufe0f\u20e3', '\u0032\ufe0f\u20e3', '\u0033\ufe0f\u20e3', '\u0034\ufe0f\u20e3',
'\u0035\ufe0f\u20e3', '\u0036\ufe0f\u20e3', '\u0037\ufe0f\u20e3', '\u0038\ufe0f\u20e3', '\u0039\ufe0f\u20e3', '🔟']
count = 0
for member in list2:
# Makes a votable embed list with every member
embed.add_field(
name=f'\u200b', value=f'{emojis[count]} <@{member}>')
count += 1
message = await meeting_ch.send(embed=embed)
for i in range(0, len(list2)):
# Adds reactions to the embed with regards to the all members alive
await message.add_reaction(f'{emojis[i]}')
await asyncio.sleep(voting_cd)
embed = discord.Embed(
title="Vote out the most suspicious person (Needs majority to get voted out)(If you wish to skip, avoid voting anyone)!", color=color)
emojis = ['\u0031\ufe0f\u20e3', '\u0032\ufe0f\u20e3', '\u0033\ufe0f\u20e3', '\u0034\ufe0f\u20e3',
'\u0035\ufe0f\u20e3', '\u0036\ufe0f\u20e3', '\u0037\ufe0f\u20e3', '\u0038\ufe0f\u20e3', '\u0039\ufe0f\u20e3', '🔟']
count = 0
for member in list2:
# Makes a votable embed list with every member
embed.add_field(
name=f'\u200b', value=f'{emojis[count]} <@{member}>')
c final = await meeting_ch.fetch_message(message.id)
# Fetch aftervoting results
reactions = final.reactions
highest = 0
tie = False
for reaction in reactions:
if (counter := int(reaction.count)) > highest:
voted_emoji = reaction.emoji
highest = counter
tie = False
elif (counter := int(reaction.count)) == highest: # Checks the votes
tie = True
if highest <= 1:
tie = False
index = 0
for emoji in emojis:
if emoji == voted_emoji: # Gets the position of the highly voted emoji to retrieve the member
break
index += 1
# If the majority votes one highest person
if highest >= (len(list2) / 2) and not tie:
# The selected person
await meeting_ch.send(f'<@{list2[index]}> has been voted out!')
on_alive_list = list2.copy()
c = 0
for i in on_alive_list:
if int(i) == list2[index]:
on_alive_list.pop(c)
c += 1
f = open('cogs/mm/lives.txt', 'w')
for i in on_alive_list:
f.write(f'{i}\n')
f.close()
if tie:
await meeting_ch.send('There has been a tie!')
elif highest == 1:
await meeting_ch.send('No one has voted! ')
############################################ VOTING ############################################
color = 0x6050dc
embed = discord.Embed(
title="Vote out the most suspicious person (Needs majority to get voted out)(If you wish to skip, avoid voting anyone)!", color=color)
emojis = ['\u0031\ufe0f\u20e3', '\u0032\ufe0f\u20e3', '\u0033\ufe0f\u20e3', '\u0034\ufe0f\u20e3',
'\u0035\ufe0f\u20e3', '\u0036\ufe0f\u20e3', '\u0037\ufe0f\u20e3', '\u0038\ufe0f\u20e3', '\u0039\ufe0f\u20e3', '🔟']
list2 = []
count = 0
for member in list2:
# Makes a votable embed list with every member
embed.add_field(
name=f'\u200b', value=f'{emojis[count]} <@{member}>')
count += 1
message = await meeting_ch.send(embed=embed)
for i in range(0, len(list2)):
# Adds reactions to the embed with regards to the all members alive
await message.add_reaction(f'{emojis[i]}')
await asyncio.sleep(10)
final = await meeting_ch.fetch_message(message.id)
# Fetch aftervoting results
reactions = final.reactions
highest = 0
tie = False
for reaction in reactions:
if (counter := int(reaction.count)) > highest:
voted_emoji = reaction.emoji
highest = counter
elif (counter := int(reaction.count)) == highest: # Checks the votes
tie = True
if highest <= 1:
tie = False
index = 0
for emoji in emojis:
if emoji == voted_emoji: # Gets the position of the highly voted emoji to retrieve the member
break
index += 1
low = False
if not tie and highest > 1: # If the majority votes one highest person
# The selected person
await meeting_ch.send(f'<@{list2[index]}> has the majority vote!')
on_alive_list = list2.copy()
c = 0
for i in on_alive_list: # Makes a new list and removes the id of the person who got voted out
if i == list2[index]:
on_alive_list.pop(c)
c += 1
f = open('cogs/mm/lives.txt', 'w')
for i in on_alive_list:
f.write(f'{i}\n')
f.close()
elif tie and not low:
await meeting_ch.send('There has been a tie!')
elif highest == 1:
await meeting_ch.send('No one has voted!')
else:
await meeting_ch.send('There were no votes or are way too low!')
low = True
########################################
await meeting_ch.send(embed=discord.Embed(description='Meeting has ended.', color=color))
await meeting_ch.set_permissions(participant, read_messages=True, send_messages=False)
await d_channel.set_permissions(detective, read_messages=True, send_messages=True)
await m_channel.set_permissions(murder, read_messages=True, send_messages=True)
without_mrd = [int(i) for i in alive_list if int(i) != int(murder)]
for member in members:
await member.remove_roles(participant)
for member in killed_people:
await meeting_ch.set_permissions(member, read_messages=False, send_messages=False)
@commands.command()
@commands.cooldown(rate=1, per=10)
@commands.guild_only()
async def kill(self, ctx):
ids = 1 # 1 Chill pill center, 2 MIS
if ids == 1:
m_channel_id = 774215902433509386
d_channel_id = 774215942048710687 # THE CHILL PILL CENTER
meeting = 774215983610200064
elif ids == 2:
m_channel_id = 774201930892705822 # MIS
d_channel_id = 774201944431656972
meeting = 774201910852976640
meeting_ch = ctx.guild.get_channel(meeting)
color = 0x800000
if ctx.channel.id == m_channel_id:
f = open('cogs/mm/murder.txt', 'r')
murder = f.read().split('\n')
f.close()
f = open('cogs/mm/lives.txt', 'r')
alive_list = f.read().split('\n')
f.close()
alive_list = [i for i in alive_list if i != "" and murder[0]]
embed = discord.Embed(
title=f"POPULATION - {len(alive_list)}", color=color)
count = 1
for i in alive_list:
embed.add_field(
name=f'\u200b', value=f'**{count}** <@{i}>', inline=False)
count += 1
embed.set_footer(
text='Please reply with the index of the person to kill!')
try:
text = await ctx.send(embed=embed)
msg = await self.client.wait_for('message', check=lambda message: message.channel.id == m_channel_id and message.author == ctx.author, timeout=30)
except asyncio.TimeoutError:
await ctx.send('Kill timed out')
else:
index = int(msg.content) - 1
await text.edit(embed=discord.Embed(description=f'Killed <@{alive_list[index]}>!', color=color))
f = open('cogs/mm/actions.txt', 'w')
f.write(f'~K{alive_list[index]}~')
f.close()
@kill.error
async def kill_erorr_handler(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send("Kill is on cooldown!")
@commands.command()
@commands.cooldown(rate=1, per=10)
@commands.guild_only()
async def inspect(self, ctx):
ids = 1 # 1 Chill pill center, 2 MIS
if ids == 1:
m_channel_id = 774215902433509386
d_channel_id = 774215942048710687 # THE CHILL PILL CENTER
meeting = 774215983610200064
elif ids == 2:
m_channel_id = 774201930892705822 # MIS
d_channel_id = 774201944431656972
meeting = 774201910852976640
# if ctx.channel.id == d_channel_id:
# color = 0x6050dc
# f = open('cogs/mm/lives.txt', 'r')
# alive_list = f.read().split('\n')
# f.close()
# alive_list = [int(i) for i in alive_list if i != ""]
#
# f = open('cogs/mm/murder.txt', 'r')
# murder = f.read()
# f.close()
#
# embed = discord.Embed(title=f"POPULATION - {len(alive_list)}", color = color)
# count = 1
# for i in alive_list:
# embed.add_field(name = f'\u200b', value =f'**{count}** <@{i}>', inline = False)
# count += 1
# embed.set_footer(text='Please reply with the index of the person you would like to interrogate!')
# try:
# await ctx.send(embed=embed)
# msg = await self.client.wait_for('message', check=lambda message : message.channel.id == d_channel_id , timeout =60)
# except asyncio.TimeoutError:
# await ctx.send('Inspect timed out')
# else:
# index = int(msg.content) - 1
# choice = random.randint(0,5)
# if choice > 3:
# if alive_list[index] == murder:
# await ctx.send(embed = discord.Embed(title="RESULTS", color = color, description=f"<@{alive_list[index]}> is highly suspicious, better watch out for them."))
# else:
# await ctx.send(embed = discord.Embed(title="RESULTS", color = color, description= f"<@{alive_list[index]}> is clear you can trust in them!"))
# elif choice <= 3:
# await ctx.send(embed=discord.Embed(title="RESULTS", color = color,description= f"<@{alive_list[index]}> is unclear, you may inspect this person tomorrow again!"))
@inspect.error
async def inspect_erorr_handler(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
for role in ctx.guild.roles:
if role.name == 'Participant':
participant = role
await ctx.send("Inspection is on cooldown!")
@commands.command()
async def voting(self, ctx, members: commands.Greedy[discord.Member] = None):
m_channel_id = ctx.channel.id
color = 0x800000
if ctx.channel.id == m_channel_id:
f = open('cogs/mm/murder.txt', 'r')
murder = f.read().split('\n')
f.close()
murder = [i for i in murder if i != ""]
f = open('cogs/mm/lives.txt', 'r')
alive_list = f.read().split('\n')
f.close()
alive_list = [i for i in alive_list if i != "" and murder[0]]
embed = discord.Embed(
title=f"POPULATION - {len(alive_list)}", color=color)
count = 1
for i in alive_list:
embed.add_field(
name=f'\u200b', value=f'**{count}** <@{i}>', inline=False)
count += 1
embed.set_footer(
text='Please reply with the index of the person to kill!')
try:
text = await ctx.send(embed=embed)
msg = await self.client.wait_for('message', check=lambda message: message.channel.id == m_channel_id and message.author == ctx.author, timeout=30)
except asyncio.TimeoutError:
await ctx.send('Kill timed out')
else:
index = int(msg.content) - 1
await text.edit(embed=discord.Embed(description=f'Killed <@{alive_list[index]}>!', color=color))
f = open('cogs/mm/actions.txt', 'w')
f.write(f'~K{alive_list[index]}~')
f.close()
def setup(client):
client.add_cog(MM(client))
print('MM.cog is loaded')
|
import pytari2600.memory.cartridge as cartridge
import unittest
import pkg_resources
class TestCartridge(unittest.TestCase):
def test_cartridge(self):
cart = cartridge.GenericCartridge(pkg_resources.resource_filename(__name__, 'dummy_rom.bin'), 4, 0x1000, 0xFF9, 0x0)
# Write should do nothing
cart.write(0,7)
self.assertEqual(cart.read(0), 0)
self.assertEqual(cart.read(3), 3)
self.assertEqual(cart.read(2048+2), 2)
def test_ram_cartridge(self):
cart = cartridge.GenericCartridge(pkg_resources.resource_filename(__name__, 'dummy_rom.bin'), 4, 0x1000, 0xFF9, 0x080)
# Write should go to ram.
cart.write(0,7)
self.assertEqual(cart.read(0x80), 7)
cart.write(0,31)
self.assertEqual(cart.read(0x80), 31)
if __name__ == '__main__':
unittest.main()
|
from typing import Callable
# noinspection PyCompatibility
from concurrent.futures import Future
from .predicated_work_subscription_event_listener import PredicatedWorkSubscriptionEventListener
from yellowdog_client.scheduler import work_client as wc
from yellowdog_client.model import WorkRequirement
from yellowdog_client.model import WorkRequirementStatus
class WorkRequirementHelper:
"""
This class provides a number of methods that return a :class:`concurrent.futures.Future` allowing consumers to
simply wait for the required state of a work requirement before continuing on.
Constructor accepts the following **arguments**:
:param work_requirement: The work requirement.
:type work_requirement: :class:`yellowdog_client.model.WorkRequirement`
:param work_service_client_impl: The scheduler service client.
:type work_service_client_impl: :class:`yellowdog_client.scheduler.WorkClient`
.. seealso::
Use :meth:`yellowdog_client.scheduler.WorkClientImpl.get_work_requirement_helper` for easier access
to the :class:`yellowdog_client.scheduler.WorkRequirementHelper`
.. code-block:: python
helper = client.work_client.get_work_requirement_helper(work_requirement)
# WorkRequirementHelper
.. versionadded:: 0.4.0
"""
_work_requirement = None # type: WorkRequirement
_work_service_client_impl = None # type: wc.WorkClient
def __init__(self, work_requirement, work_service_client_impl):
# type: (WorkRequirement, wc.WorkClient) -> None
self._work_requirement = work_requirement
self._work_service_client_impl = work_service_client_impl
def when_requirement_matches(self, predicate):
# type: (Callable[[WorkRequirement], bool]) -> Future
"""
Returns a :class:`concurrent.futures.Future` that is completed when the specified predicate evaluates to true.
:param predicate: The predicate to test for each work requirement changed event received.
:type predicate: Callable[[:class:`yellowdog_client.model.WorkRequirement`], :class:`bool`]
:return: A :class:`concurrent.futures.Future` containing the matching work requirement.
:rtype: :class:`concurrent.futures.Future`
.. code-block:: python
from concurrent import futures
from yellowdog_client.model import WorkRequirementStatus
future = helper.when_requirement_matches(lambda req: req.status == WorkRequirementStatus.COMPLETED)
futures.wait(fs=(future,))
future_work_requirement = future.result()
# WorkRequirement
"""
future = Future()
future.set_running_or_notify_cancel()
listener = PredicatedWorkSubscriptionEventListener(
future=future,
predicate=predicate,
work_client=self._work_service_client_impl
)
self._work_service_client_impl.add_work_requirement_listener(
work_requirement=self._work_requirement,
listener=listener
)
listener.updated(
obj=self._work_service_client_impl.get_work_requirement(
work_requirement=self._work_requirement
)
)
return future
def when_requirement_status_is(self, status):
# type: (WorkRequirementStatus) -> Future
"""
Returns a task that is completed when the work requirement status matches the specified status.
:param status: The work requirement status to wait for.
:type status: :class:`yellowdog_client.model.WorkRequirementStatus`
:return: A :class:`concurrent.futures.Future` containing the matching work requirement.
:rtype: :class:`concurrent.futures.Future`
.. code-block:: python
from concurrent import futures
from yellowdog_client.model import WorkRequirementStatus
future = helper.when_requirement_status_is(WorkRequirementStatus.COMPLETED)
futures.wait(fs=(future,))
future_work_requirement = future.result()
# WorkRequirement
"""
return self.when_requirement_matches(lambda requirement: requirement.status == status)
|
import os, logging
import tool_shed.util.shed_util_common as suc
import tool_shed.util.metadata_util as metadata_util
from galaxy.web.form_builder import SelectField
def build_approved_select_field( trans, name, selected_value=None, for_component=True ):
options = [ ( 'No', trans.model.ComponentReview.approved_states.NO ),
( 'Yes', trans.model.ComponentReview.approved_states.YES ) ]
if for_component:
options.append( ( 'Not applicable', trans.model.ComponentReview.approved_states.NA ) )
if selected_value is None:
selected_value = trans.model.ComponentReview.approved_states.NA
select_field = SelectField( name=name )
for option_tup in options:
selected = selected_value and option_tup[ 1 ] == selected_value
select_field.add_option( option_tup[ 0 ], option_tup[ 1 ], selected=selected )
return select_field
def build_changeset_revision_select_field( trans, repository, selected_value=None, add_id_to_name=True, downloadable=False, reviewed=False, not_reviewed=False ):
"""Build a SelectField whose options are the changeset_rev strings of certain revisions of the received repository."""
options = []
changeset_tups = []
refresh_on_change_values = []
if downloadable:
# Restrict the options to downloadable revisions.
repository_metadata_revisions = repository.downloadable_revisions
elif reviewed:
# Restrict the options to revisions that have been reviewed.
repository_metadata_revisions = []
metadata_changeset_revision_hashes = []
for metadata_revision in repository.metadata_revisions:
metadata_changeset_revision_hashes.append( metadata_revision.changeset_revision )
for review in repository.reviews:
if review.changeset_revision in metadata_changeset_revision_hashes:
repository_metadata_revisions.append( review.repository_metadata )
elif not_reviewed:
# Restrict the options to revisions that have not yet been reviewed.
repository_metadata_revisions = []
reviewed_metadata_changeset_revision_hashes = []
for review in repository.reviews:
reviewed_metadata_changeset_revision_hashes.append( review.changeset_revision )
for metadata_revision in repository.metadata_revisions:
if metadata_revision.changeset_revision not in reviewed_metadata_changeset_revision_hashes:
repository_metadata_revisions.append( metadata_revision )
else:
# Restrict the options to all revisions that have associated metadata.
repository_metadata_revisions = repository.metadata_revisions
for repository_metadata in repository_metadata_revisions:
rev, label, changeset_revision = metadata_util.get_rev_label_changeset_revision_from_repository_metadata( trans, repository_metadata, repository=repository )
changeset_tups.append( ( rev, label, changeset_revision ) )
refresh_on_change_values.append( changeset_revision )
# Sort options by the revision label. Even though the downloadable_revisions query sorts by update_time,
# the changeset revisions may not be sorted correctly because setting metadata over time will reset update_time.
for changeset_tup in sorted( changeset_tups ):
# Display the latest revision first.
options.insert( 0, ( changeset_tup[ 1 ], changeset_tup[ 2 ] ) )
if add_id_to_name:
name = 'changeset_revision_%d' % repository.id
else:
name = 'changeset_revision'
select_field = SelectField( name=name,
refresh_on_change=True,
refresh_on_change_values=refresh_on_change_values )
for option_tup in options:
selected = selected_value and option_tup[ 1 ] == selected_value
select_field.add_option( option_tup[ 0 ], option_tup[ 1 ], selected=selected )
return select_field
|
# read in all LAMOST labels
import numpy as np
import pyfits
from matplotlib import rc
from matplotlib import cm
import matplotlib as mpl
rc('font', family='serif')
rc('text', usetex=True)
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
direc = "/home/annaho/aida41040/annaho/TheCannon/data"
print("reading in all data")
teff = np.loadtxt(
"%s/lamost_dr2/lamost_labels_all_dates.csv" %direc, delimiter=',',
dtype='float', usecols=(1,), skiprows=1)
logg = np.loadtxt(
"%s/lamost_dr2/lamost_labels_all_dates.csv" %direc, delimiter=',',
dtype='float', usecols=(2,), skiprows=1)
feh = np.loadtxt(
"%s/lamost_dr2/lamost_labels_all_dates.csv" %direc, delimiter=',',
dtype='float', usecols=(3,), skiprows=1)
print("reading in apogee labels")
# read in apogee labels
tr_IDs = np.load("../tr_id.npz")['arr_0']
labels_apogee = np.load("../tr_label.npz")['arr_0']
apogee_teff = labels_apogee[:,0]
apogee_logg = labels_apogee[:,1]
apogee_feh = labels_apogee[:,2]
# read in lamost labels
print("reading in lamost labels")
a = pyfits.open("../../make_lamost_catalog/lamost_catalog_training.fits")
b = a[1].data
a.close()
IDs_lamost = b['lamost_id']
IDs_lamost = np.array([val.strip() for val in IDs_lamost])
teff_all_lamost = b['teff_1']
logg_all_lamost = b['logg_1']
feh_all_lamost = b['feh']
inds = np.array([np.where(IDs_lamost==a)[0][0] for a in tr_IDs])
lamost_teff = teff_all_lamost[inds]
lamost_logg = logg_all_lamost[inds]
lamost_feh = feh_all_lamost[inds]
# plot all
print("plotting")
fig, (ax0,ax1) = plt.subplots(ncols=2, figsize=(12,6),
sharex=True, sharey=True)
plt.subplots_adjust(wspace=0.3)
def dr1(ax):
ax.hist2d(teff,logg,bins=1000,norm=LogNorm(), cmap="Greys")
ax.set_ylim(ax0.get_ylim()[1],ax0.get_ylim()[0])
ax.set_xlim(ax0.get_xlim()[1], ax0.get_xlim()[0])
ax.set_xlim(7500, 3800)
ax.tick_params(axis='x', labelsize=16)
ax.tick_params(axis='y', labelsize=16)
dr1(ax0)
dr1(ax1)
cmap = cm.plasma
# plot training set, lamost
lamost_feh[lamost_feh>0.25]=0.25
lamost_feh[lamost_feh<-1.1]=-1.1
im = ax0.scatter(lamost_teff,lamost_logg,c=lamost_feh, s=1, lw=0, cmap=cmap)
cbar = plt.colorbar(im, ax=ax0, label="[Fe/H] [dex] from LAMOST DR2")
cbar.ax.tick_params(labelsize=16)
cbar.set_clim(-1.1,0.25)
ax0.set_xlabel("$\mbox{T}_{\mbox{eff}}$ [K]", fontsize=16)
ax0.set_ylabel("log g [dex]", fontsize=16)
ax0.text(0.05, 0.95, "Colored Points: reference set\nwith their LAMOST labels",
horizontalalignment='left', verticalalignment='top', transform=ax0.transAxes,
fontsize=16)
ax0.text(0.05, 0.80, "Black Points: \n Full LAMOST DR2", transform=ax0.transAxes,
fontsize=16, verticalalignment='top', horizontalalignment='left')
ax0.locator_params(nbins=5)
# plot training set, apogee
apogee_feh[apogee_feh>0.25] = 0.25
apogee_feh[apogee_feh<-1.1] = -1.1
im = ax1.scatter(apogee_teff,apogee_logg,c=apogee_feh, s=1, lw=0, cmap=cmap)
cbar = plt.colorbar(im, ax=ax1, label="[Fe/H] [dex] from APOGEE DR12")
cbar.ax.tick_params(labelsize=16)
cbar.set_clim(-1.1,0.25)
ax1.set_xlabel("${\mbox{T}_{\mbox{eff}}}$ [K]", fontsize=16)
ax1.set_ylabel("log g [dex]", fontsize=16)
ax1.locator_params(nbins=5)
ax1.text(0.05, 0.95, "Colored Points: reference set\nwith their APOGEE labels",
horizontalalignment='left', verticalalignment='top', transform=ax1.transAxes,
fontsize=16)
ax1.text(0.05, 0.80, "Black Points: \n Full LAMOST DR2", transform=ax1.transAxes,
fontsize=16, verticalalignment='top', horizontalalignment='left')
plt.subplots_adjust(top=0.85)
plt.show()
#plt.savefig("ts_in_full_lamost_label_space.png")
#plt.close()
|
## directory where the gps files are located
#GPS_FILE_DIRECTORY = "C:/Users/raulms/Documents/Python Scripts/Codes/5StopsCentroids/Input/"
GPS_FILE_DIRECTORY = "/home/pablo/projects/python/data/bhulan/sampledata/"
## file extension of the gps files
# for now only handling excel files
GPS_FILE_EXTENSION = "*.xlsx"
## name of the excel worksheet with GPS points
WORKSHEET_NAME = "Hoja1"
# minimum time at a given location that makes it a "stop" for the vehicle
MIN_STOP_TIME = 2
# radius of 20 meters for stop location.
# if a gps point is recorded within this variable meter radius of a stop location
# it is considered part of the stop location
CONSTRAINT = .02
# hours that a vehicle has to stay at a stop for it to be considered a DC or home
DC_HOURS = 4
# radius in miles, to create a geo-fence around a city center and only
# consider points within that zone.
SANTIAGO_RADIUS = 60
# lat long for Santiago, used to calculate points within the city
# this is a hack, needs to be fixed
SANTI_LAT = '-33.469994'
SANTI_LON = '-70.642193'
# url to access cartodb
CARTO_URL = "https://<username>.cartodb.com/api/v1/imports/?api_key="
# api key for CARTO_DB. set this before sending data to cartodb
CARTO_DB_API_KEY = "<API_KEY_HERE>"
|
import numpy as np
import torch
import torch.nn as nn
from awave.losses import get_loss_f
from awave.utils.train import Trainer
class AbstractWT(nn.Module):
def fit(self,
X=None,
train_loader=None,
pretrained_model=None,
lr: float = 0.001,
num_epochs: int = 20,
seed: int = 42,
attr_methods='Saliency',
target=6,
lamlSum: float = 1.,
lamhSum: float = 1.,
lamL2norm: float = 1.,
lamCMF: float = 1.,
lamConv: float = 1.,
lamL1wave: float = 1.,
lamL1attr: float = 1.):
"""
Params
------
X: numpy array or torch.Tensor
For 1-d signals this should be 3-dimensional, (num_examples, num_curves_per_example, length_of_curve)
e.g. for 500 1-dimensional curves of length 40 would be (500, 1, 40)
train_loader: data_loader
each element should return tuple of (x, _)
pretrained_model: nn.Module, optional
pretrained model to distill
lamlSum : float
Hyperparameter for penalizing sum of lowpass filter
lamhSum : float
Hyperparameter for penalizing sum of highpass filter
lamL2norm : float
Hyperparameter to enforce unit norm of lowpass filter
lamCMF : float
Hyperparameter to enforce conjugate mirror filter
lamConv : float
Hyperparameter to enforce convolution constraint
lamL1wave : float
Hyperparameter for penalizing L1 norm of wavelet coeffs
lamL1attr : float
Hyperparameter for penalizing L1 norm of attributions
"""
torch.manual_seed(seed)
if X is None and train_loader is None:
raise ValueError('Either X or train_loader must be passed!')
elif train_loader is None:
if 'ndarray' in str(type(X)):
X = torch.Tensor(X).to(self.device)
# convert to float
X = X.float()
if self.wt_type == 'DWT2d':
X = X.unsqueeze(1)
# need to pad as if it had y (to match default pytorch dataloaders)
X = [(X[i], np.nan) for i in range(X.shape[0])]
train_loader = torch.utils.data.DataLoader(X,
shuffle=True,
batch_size=len(X))
# print(iter(train_loader).next())
params = list(self.parameters())
optimizer = torch.optim.Adam(params, lr=lr)
loss_f = get_loss_f(lamlSum=lamlSum, lamhSum=lamhSum,
lamL2norm=lamL2norm, lamCMF=lamCMF, lamConv=lamConv,
lamL1wave=lamL1wave, lamL1attr=lamL1attr)
trainer = Trainer(pretrained_model,
self,
optimizer,
loss_f,
use_residuals=True,
target=target,
attr_methods=attr_methods,
n_print=1, device=self.device)
# actually train
self.train()
trainer(train_loader, epochs=num_epochs)
self.train_losses = trainer.train_losses
self.eval()
|
from models import Employee
from db import session
from flask_restful import reqparse
from flask_restful import abort
from flask_restful import Resource
from flask_restful import fields
from flask_restful import marshal_with
employee_fields = {
'id': fields.Integer,
'created_at': fields.DateTime,
'updated_at': fields.DateTime,
'deleted_at': fields.DateTime,
'name': fields.String,
'city': fields.String,
'age': fields.Integer,
'status': fields.Boolean
}
parser = reqparse.RequestParser()
parser.add_argument('name', type=str)
parser.add_argument('city', type=str)
parser.add_argument('age', type=int)
parser.add_argument('status', type=bool)
class EmployeeResource(Resource):
@marshal_with(employee_fields)
def get(self, name):
employee = session.query(Employee).filter(Employee.name == name).first()
if not employee:
abort(404, message="Employee {} doesn't exist".format(name))
return employee
def delete(self, name):
employee = session.query(Employee).filter(Employee.name == name).first()
if not employee:
abort(404, message="Employee {} doesn't exist".format(name))
session.delete(employee)
session.commit()
return {}, 204
@marshal_with(employee_fields)
def patch(self, name):
parsed_args = parser.parse_args()
employee = session.query(Employee).filter(Employee.name == name).first()
# employee.name = parsed_args['name']
employee.city = parsed_args['city']
employee.age = parsed_args['age']
session.add(employee)
session.commit()
return employee, 200
class EmployeeListResource(Resource):
@marshal_with(employee_fields)
def get(self):
employees = session.query(Employee).all()
return employees
@marshal_with(employee_fields)
def post(self):
parsed_args = parser.parse_args()
employee = Employee(
name=parsed_args['name'],
city=parsed_args['city'],
age=parsed_args['age'],
status=parsed_args['status'],
)
session.add(employee)
session.commit()
return employee, 201
|
class Plot(object):
def __init__(self,name,array_func,event_weight_func,hist,
dim=1,
selection_func=None
):
self.name = name
self.array_func = array_func
self.event_weight_func = event_weight_func
self.hist = hist
self.dim = dim
self.selection_func = selection_func
self.data_color = 'black'
|
from django.contrib import admin
from fabric_bolt.fabfiles.models import Fabfile
#
#class FabfileAdmin(admin.ModelAdmin):
# form = FabfileForm
admin.site.register(Fabfile)
|
from setuptools import setup, Command
import subprocess
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
errno = subprocess.call(['py.test'])
raise SystemExit(errno)
setup(
name='Flask-Split',
version='0.4.0',
url='http://github.com/jpvanhal/flask-split',
license='MIT',
author='Janne Vanhala',
author_email='janne.vanhala@gmail.com',
description='A/B testing for your Flask application',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGES.rst').read(),
packages=['flask_split'],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'Flask>=0.10',
'Redis>=2.6.0',
],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class BST:
def __init__(self):
self.root = None
def insert(self, data):
if self.root is None:
print("Adding the first node of the tree")
self.root = Node(data)
else:
self._insert(data, self.root)
def _insert(self, data, currentnode: Node):
"""
:type data: object
"""
if currentnode.data < data:
if currentnode.right is None:
currentnode.right = Node(data)
else:
currentnode = currentnode.right
self._insert(data, currentnode)
elif currentnode.value > data:
if currentnode.left is None:
currentnode.left = Node(data)
else:
self._insert(data, currentnode.left)
else:
print("The node is already in the tree.")
|
import os
import pytest
import sqlalchemy as sa
from imicrobe.load import models
from orminator import session_manager_from_db_uri
@pytest.fixture()
def test_session():
engine = sa.create_engine(os.environ['IMICROBE_DB_URI'], echo=False)
try:
with engine.connect() as connection:
connection.execute('DROP DATABASE imicrobe_test')
except:
pass
with engine.connect() as connection:
connection.execute('CREATE DATABASE imicrobe_test')
test_db_uri = os.environ['IMICROBE_DB_URI'] + '_test'
test_engine = sa.create_engine(test_db_uri, echo=False)
models.Model.metadata.create_all(test_engine)
with session_manager_from_db_uri(test_db_uri) as test_session:
yield test_session
with engine.connect() as connection:
connection.execute('DROP DATABASE imicrobe_test')
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 拷贝数据
Case Name : 反语法测试\copy file to table
Description :
1.创建测试表并插入数据
2.构造数据文件
3.从文件中拷贝数据到表
4.清理环境
Expect :
1.创建测试表并插入数据成功
2.构造数据文件成功
3.从文件中拷贝数据到表失败
4.清理环境成功
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
Log = Logger()
class CopyFile(unittest.TestCase):
def setUp(self):
Log.info('----Opengauss_Function_DML_Copy_Case0045开始执行----')
self.user_node = Node('PrimaryDbUser')
self.constant = Constant()
def test_copy_file(self):
Log.info('----创建测试表并对测试表插入数据----')
sql_cmd = '''drop table if exists testzl;
create table testzl(sk integer,id char(16),\
name varchar(20),sq_ft integer);
insert into testzl values (001,'sk1','tt',3332);
insert into testzl values (001,'sk1','tt',3332);
insert into testzl values (001,'sk1','tt',3332);
'''
excute_cmd = f'''source {macro.DB_ENV_PATH} ;
gsql -d {self.user_node.db_name} -p \
{self.user_node.db_port} -c "{sql_cmd}"
'''
Log.info(excute_cmd)
msg = self.user_node.sh(excute_cmd).result()
Log.info(msg)
self.assertIn(self.constant.INSERT_SUCCESS_MSG, msg)
self.assertIn(self.constant.TABLE_CREATE_SUCCESS, msg)
Log.info('-------------创建数据文件-------------')
mkdir_cmd = f'''mkdir {macro.DB_INSTANCE_PATH}/pg_copydir;
touch {macro.DB_INSTANCE_PATH}/pg_copydir/testzl.dat;
'''
Log.info(mkdir_cmd)
mkdir_msg = self.user_node.sh(mkdir_cmd).result()
Log.info(mkdir_msg)
self.assertNotIn(self.constant.SQL_WRONG_MSG[1], mkdir_msg)
Log.info('----使用反语法\copy file from table进行copy----')
copy_cmd = f'''\copy '{macro.DB_INSTANCE_PATH}/pg_copydir/testzl.dat'\
to testzl;
'''
excute_cmd = f'''source {macro.DB_ENV_PATH} ;
gsql -d {self.user_node.db_name} -p \
{self.user_node.db_port} -c "{copy_cmd}"
'''
Log.info(excute_cmd)
copy_msg = self.user_node.sh(excute_cmd).result()
Log.info(copy_msg)
self.assertIn(self.constant.SYNTAX_ERROR_MSG, copy_msg)
def tearDown(self):
Log.info('----------------清理环境-----------------------')
sql_cmd = 'drop table if exists testzl;'
excute_cmd = f'''source {macro.DB_ENV_PATH} ;
gsql -d {self.user_node.db_name} -p \
{self.user_node.db_port} -c "{sql_cmd}"
'''
Log.info(excute_cmd)
msg = self.user_node.sh(excute_cmd).result()
Log.info(msg)
self.assertIn(self.constant.TABLE_DROP_SUCCESS, msg)
excute_cmd = f'''rm -rf {macro.DB_INSTANCE_PATH}/pg_copydir;
rm -rf /home/{self.user_node.ssh_user}/testzl;
'''
Log.info(excute_cmd)
msg = self.user_node.sh(excute_cmd).result()
Log.info(msg)
Log.info('----Opengauss_Function_DML_Copy_Case0045执行完成----')
|
__author__ = 'denis_makogon'
import proboscis
from proboscis import asserts
from proboscis import decorators
from gigaspace.cinder_workflow import base as cinder_workflow
from gigaspace.nova_workflow import base as nova_workflow
from gigaspace.common import cfg
from gigaspace.common import utils
GROUP_WORKFLOW = 'gigaspace.cinder.volumes.api'
CONF = cfg.CONF
@proboscis.test(groups=[GROUP_WORKFLOW])
class TestWorkflow(object):
"""
This is a test suit that represents described workflow:
- create volume:
- check while it will reach 'available status'
- list volumes
- get volume:
- by id
- by name
Attachment workflow was implemented in two different manners:
Main workflow:
- boot an instance:
- format volume using cloudinit
- poll until instance would reach ACTIVE state
- check volume and server attachments
- delete an instance
- poll until instance would gone away
- check if volume was deleted
Alternative workflow:
- boot volume
- poll untill volume reach 'available' state
- boot an instance without volume and userdata
- poll untill instance would reach ACTIVE state
- use Nova volumes API to attach volume
- check volume attachments
- reboot server (required, to let operating system to
discover volume during reboot)
- poll untill instance would reach ACTIVE state
- check server attachments
"""
def __init__(self):
self.cinder_actions = cinder_workflow.BaseCinderActions()
self.nova_actions = nova_workflow.BaseNovaActions()
self.volume = None
self.server = None
self.volume_size, self.display_name, self.expected_status = (
"1", "test_volume", "available")
self.server_name, self.flavor_id, self.image_id, = (
"test_server",
CONF.test_config.test_flavor_id,
CONF.test_config.test_image_id
)
def _poll_volume_status(self, expected_status):
def _pollster():
volume = self.cinder_actions.show_volume(self.volume.id)
if volume.status in ("error", "failed"):
raise Exception("Volume is not in valid state")
return volume.status == expected_status
return _pollster
def _create_volume(self):
self.volume = self.cinder_actions.create_volume(
self.volume_size, self.display_name)
utils.poll_until(self._poll_volume_status(self.expected_status),
expected_result=True,
sleep_time=1)
asserts.assert_equal(self.volume.size, int(self.volume_size))
volume = self.cinder_actions.show_volume(self.volume.id)
asserts.assert_equal(volume.status, self.expected_status)
@proboscis.test
@decorators.time_out(300)
def test_create_volume(self):
"""
- create volume:
- check while it will reach 'available status'
"""
self._create_volume()
@proboscis.test(depends_on=[test_create_volume])
def test_list_volumes(self):
"""
- list volumes
"""
volumes = self.cinder_actions.list_volumes()
asserts.assert_equal(len(volumes), 1)
@proboscis.test(depends_on=[test_list_volumes])
def test_get_volume_by_its_name_or_id(self):
"""
- get volume:
- by name
- by ID
"""
try:
volume = self.cinder_actions.show_volume(self.display_name)
except Exception as e:
print("Can't get volume by its display name. %s" % str(e))
volume = self.cinder_actions.show_volume(self.volume.id)
pass
asserts.assert_equal(volume.status, self.expected_status)
def _poll_until_server_is_active(self, expected_status):
def _pollster():
server = self.nova_actions.get(self.server.id)
if server.status.upper() in ["ERROR", "FAILED"]:
raise Exception("Failed to spawn compute instance.")
return server.status == expected_status
return _pollster
def _boot(self, volume_id):
try:
self.server = self.nova_actions.boot(self.server_name,
self.flavor_id,
self.image_id,
volume_id=volume_id)
utils.poll_until(self._poll_until_server_is_active("ACTIVE"),
expected_result=True,
sleep_time=1)
self.server = self.nova_actions.get(self.server.id)
asserts.assert_equal(self.server.status, "ACTIVE")
except Exception as e:
print(str(e))
raise proboscis.SkipTest("Failed to spawn an instance.")
@decorators.time_out(300)
@proboscis.test(depends_on=[test_get_volume_by_its_name_or_id])
def test_boot_instance(self):
"""
- boot an instance:
- poll until instance would reach ACTIVE state
- check attachments
"""
self._boot(self.volume.id)
def _check_attachments(self):
server = self.nova_actions.get(self.server.id)
server_attachment = getattr(
server, 'os-extended-volumes:volumes_attached').pop(0)
volume_id = server_attachment['id']
volume = self.cinder_actions.show_volume(self.volume.id)
volume_attachment = volume.attachments.pop(0)
server_id = volume_attachment['server_id']
asserts.assert_equal(server.id, server_id)
asserts.assert_equal(volume.id, volume_id)
@proboscis.test(depends_on=[test_boot_instance])
def test_server_and_volume_attachments(self):
"""
- checks volume and server attachments
"""
self._check_attachments()
def _poll_until_server_is_gone(self, server_id=None):
def _pollster():
try:
_server_id = (server_id if server_id
else self.server.id)
self.nova_actions.delete(_server_id)
except Exception:
print("\nInstance has gone.")
return True
return _pollster
def _poll_until_volume_is_gone(self, volume_id=None):
def _pollster():
try:
_volume_id = (volume_id if volume_id
else self.volume.id)
self.cinder_actions.cinderclient.volumes.delete(
_volume_id)
except Exception:
print("Volume has gone.")
return True
return _pollster
@decorators.time_out(300)
@proboscis.test(runs_after=[test_server_and_volume_attachments])
def test_boot_without_volume(self):
"""
- boot instance without volume
"""
self._boot(None)
@proboscis.test(depends_on=[test_boot_without_volume])
def test_volume_create(self):
"""
- create volume
"""
self._create_volume()
@proboscis.test(depends_on=[test_volume_create])
def test_attach_volume(self):
self.nova_actions.create_server_volume(self.volume.id, self.server.id)
utils.poll_until(self._poll_volume_status("in-use"),
expected_result=True,
sleep_time=1)
self._check_attachments()
@decorators.time_out(300)
@proboscis.test(depends_on=[test_attach_volume])
def test_server_reboot_for_volume_discovery(self):
self.nova_actions.novaclient.servers.reboot(self.server.id)
utils.poll_until(self._poll_until_server_is_active("ACTIVE"),
expected_result=True,
sleep_time=1)
self._check_attachments()
@proboscis.after_class
def test_delete_resources(self):
"""
- delete instance
- delete volumes
"""
for server in self.nova_actions.novaclient.servers.list():
server.delete()
utils.poll_until(
self._poll_until_server_is_gone(server_id=server.id),
expected_result=True,
sleep_time=1)
for volume in self.cinder_actions.cinderclient.volumes.list():
# one of the volumes was bootstraped with delete flag in block
# mapping device, so Cinder API service would reject request
# because of volume status that is 'deleting' at this stage
if volume.status in ['available', 'error']:
volume.delete()
utils.poll_until(
self._poll_until_volume_is_gone(volume_id=volume.id),
expected_result=True,
sleep_time=1)
|
from django.contrib.auth.models import User
from django.db import models
class University(models.Model):
name = models.CharField(max_length=80, unique=True)
acronym = models.CharField(max_length=40)
def __str__(self):
return self.name
class Organization(models.Model):
name = models.CharField(max_length=80, unique=True)
def __str__(self):
return self.name
class Administrator(models.Model):
user = models.OneToOneField(User, models.CASCADE)
university = models.ForeignKey(University, models.CASCADE)
def user_email(self):
return self.user.email
def __str__(self):
return "Administrator of {university}".format(university=self.university)
class Host(models.Model):
user = models.OneToOneField(User, models.CASCADE)
organization = models.ForeignKey(Organization, models.CASCADE)
university = models.ForeignKey(University, models.CASCADE)
administrator = models.ForeignKey(Administrator, models.SET_NULL, null=True, blank=True)
def __str__(self):
return "{organization} of {university}".format(
organization=self.organization, university=self.university
)
def user_email(self):
return self.user.email
def has_admin(self):
return bool(self.administrator)
hours = (
("", "Hour of Event"),
("12 PM", "12 PM"),
("01 PM", "01 PM"),
("02 PM", "02 PM"),
("03 PM", "03 PM"),
("04 PM", "04 PM"),
("05 PM", "05 PM"),
("06 PM", "06 PM"),
("07 PM", "07 PM"),
("08 PM", "08 PM"),
("09 PM", "09 PM"),
("10 PM", "10 PM"),
("11 PM", "11 PM"),
("12 AM", "12 AM"),
("01 AM", "01 AM"),
("02 AM", "02 AM"),
("03 AM", "03 AM"),
("04 AM", "04 AM"),
("05 AM", "05 AM"),
("06 AM", "06 AM"),
("07 AM", "07 AM"),
("08 AM", "08 AM"),
("09 AM", "09 AM"),
("10 AM", "10 AM"),
("11 AM", "11 AM"),
)
councils = (
("", "Affiliated Council"),
("Interfraternity Council", "Interfraternity Council"),
("Panhellenic", "Panhellenic"),
("NPHC", "NPHC"),
("Non-Affiliated", "Non-Affiliated"),
)
event_types = (
("", "Type of Event"),
("Social", "Social"),
("Date Function", "Date Function"),
("Formal", "Formal"),
("Other", "Other"),
)
invitation_types = (
("", "Choose One"),
("Invitation Only", "Invitation Only"),
("Open to the Public", "Open to the Public"),
("Open to Faculty, Staff, Students", "Open to Faculty, Staff, Students"),
)
event_locations = (
("", "Where is the event located?"),
("Chapter House", "Chapter House"),
("Other Campus Venue", "Other Campus Venue"),
("Off Campus", "Off Campus"),
)
class Event(models.Model):
name = models.CharField(max_length=40)
date_of_event = models.DateField()
time_of_event = models.CharField(max_length=40, choices=hours)
event_location = models.CharField(max_length=100, choices=event_locations)
event_location_other = models.CharField(max_length=100, blank=True, null=True)
name_of_planner = models.CharField(max_length=100)
phone_number_of_planner = models.CharField(max_length=40)
email_of_planner = models.CharField(max_length=40)
expected_number_guests = models.IntegerField()
affiliated_council = models.CharField(max_length=40, choices=councils)
type_of_event = models.CharField(max_length=40, choices=event_types)
type_event_other = models.CharField(max_length=100, blank=True, null=True)
event_description = models.TextField()
invitation_type = models.CharField(max_length=100, choices=invitation_types)
transportation = models.TextField(blank=True, null=True)
one_entry_point = models.CharField(
max_length=10,
choices=(("", "Will there be one entry point?"), ("Yes", "Yes"), ("No", "No")),
)
entry_point_location = models.CharField(max_length=100)
co_sponsored_description = models.TextField(blank=True, null=True)
alcohol_distribution = models.TextField(blank=True, null=True)
sober_monitors = models.TextField()
presidents_email = models.EmailField()
host = models.ForeignKey(Host, models.CASCADE)
def __str__(self):
return self.name
def host_email(self):
return self.host.user.email
class Guest(models.Model):
first_name = models.CharField(max_length=40)
last_name = models.CharField(max_length=40)
date_of_birth = models.DateField()
gender = models.CharField(max_length=10, choices=(("Male", "Male"), ("Female", "Female")))
event = models.ManyToManyField(Event)
def __str__(self):
return "{last_name}, {first_name}".format(
last_name=self.last_name, first_name=self.first_name
)
categories = (
("", "Select category for flag"),
("Underage Drinking", "Underage Drinking"),
("Stealing", "Stealing"),
("Vandalism", "Vandalism"),
("Violence", "Violence"),
("Other", "Other"),
)
priorities = (
("", "Select severity of violation"),
("Low", "Low"),
("Medium", "Medium"),
("High", "High"),
)
class Flag(models.Model):
guest = models.ForeignKey(Guest, models.CASCADE)
host = models.ForeignKey(Host, models.CASCADE, null=True, blank=True)
administrator = models.ForeignKey(Administrator, models.CASCADE, null=True, blank=True)
priority = models.CharField(max_length=10, choices=priorities)
category = models.CharField(max_length=20, choices=categories)
other_description = models.CharField(
max_length=30, blank=True, null=True, help_text="If you chose other, fill in description"
)
class GuestImage(models.Model):
image = models.ImageField(upload_to="images/guests")
guest = models.ForeignKey(Guest, models.CASCADE)
event = models.ForeignKey(Event, models.CASCADE)
date_time_taken = models.DateTimeField()
def __str__(self):
return str(self.guest)
def event_name(self):
return str(self.event)
|
import numpy as np
from io import StringIO
try :
from colorama import Fore
except:
class ForeDummy:
def __getattribute__(self, attr):
return ''
Fore = ForeDummy()
NULL_BYTE = b'\x00'
def read_null_terminated_string(f, max_len=128):
string = b''
next_char = b''
while next_char != NULL_BYTE:
string = string + next_char
next_char = f.read(1)
assert len(string) < max_len
return string
def generate_bitfield(n):
bf = 1 << np.arange(n)
return bf
def eval_bitfield(x, bitfield):
return (x & bitfield) > 0
class assert_size_in_bytes:
def __init__(self, byte_size):
self.byte_size = byte_size
def __call__(self, f):
def wrapped_f(*args,**kwargs):
val = f(*args, **kwargs)
assert type(val) is np.dtype, \
'expected numpy.dtype not {0}'.format(type(val))
assert val.itemsize == self.byte_size,\
'expected {0} bytes, found {1} bytes'.format(self.byte_size,
val.itemsize)
return val
return wrapped_f
def arr2str(x):
outstring = ''
for name in x.dtype.names:
val = x[name]
if type(val) is np.void:
val = bytes(val)
if type(val) is bytes:
valstring = '{0}B bytestring'.format(len(val))
else:
valstring = str(val)
outstring = outstring + Fore.LIGHTBLUE_EX+name+': ' + Fore.RESET + valstring + '\n'
return outstring
def print_arr(x):
print(arr2str(x))
def prettyvars(obj, skip_private = True):
d = vars(obj)
for key,val in d.items():
if key[0] == '_':
continue
print(Fore.LIGHTBLUE_EX + key + ': ' + Fore.RESET + str(val))
def print_table(data, headers=None, col_width=16):
io = StringIO()
num_cols = len(data[0])
fmt = '{:>' + str(col_width) + '}'
if headers:
print(Fore.LIGHTBLUE_EX + \
(fmt*num_cols).format(*headers, col_width=col_width) + \
Fore.RESET, \
file=io
)
for row in data:
val_strings = tuple(map(str, row))
print((fmt*num_cols).format(*val_strings, col_width=col_width), file=io)
io.seek(0)
s = io.read()
print(s)
return s
def ensure_not_none(x, default_val):
if x is None:
return default_val
return x
def has_len(x):
try:
len(x)
return True
except:
return False
def read_from_bytearr(bytearr, dtype = 'uint8', offset = 0, count = 1):
dtype = np.dtype(dtype)
num_bytes = dtype.itemsize*count
val = bytearr[offset:(offset+num_bytes)].view(dtype)
return val
def split_bytearr(bytearr, offset, size):
"""
e.g. offset = [0, 1000, 2000]
size = 192
would read bytearr[0:192], bytearr[1000:1192], bytearr[2000:2192]
and concatenate into one bytearr
size can also be an array, e.g. size = [192, 200, 300]
"""
split_inds = np.empty(2*len(offset), dtype='u8')
split_inds[0::2] = offset
split_inds[1::2] = offset + size
byte_groups = np.split(bytearr, split_inds)
val = np.concatenate(byte_groups[1::2])
return val
|
import os
import sys
import tempfile
import subprocess
import cv2
import pymesh
import numpy as np
import torch
import triangle as tr
from tridepth import BaseMesh
from tridepth.extractor import calculate_canny_edges
from tridepth.extractor import SVGReader
from tridepth.extractor import resolve_self_intersection, cleanup
from tridepth.extractor import add_frame
class Mesh2DExtractor:
def __init__(self, canny_params={"denoise": False}, at_params={"filter_itr": 4, "error_thresh": 0.01}):
self.canny_params = canny_params # TODO
self.autotrace_cmd = ['autotrace',
'--centerline',
'--remove-adjacent-corners',
'--filter-iterations', str(at_params["filter_itr"]),
'--error-threshold', str(at_params["error_thresh"]),
'--input-format=bmp',
'--output-format=svg']
def _execute_autotrace(self, filename, debug=False):
"""Execute autotrace with input (bmp-file)
- https://github.com/autotrace/autotrace
Returns:
svg_string: string starting from '<svg/>'
"""
# Execute autotrace
p = subprocess.Popen(self.autotrace_cmd + [filename], stdout=subprocess.PIPE)
# Read the converted svg contents
svg_string = p.communicate()[0]
if not len(svg_string):
print("autotrace_cmd: " + ' '.join(self.autotrace_cmd + [filename]), file=sys.stderr)
print("ERROR: returned nothing, leaving tmp bmp file around for you to debug", file=sys.stderr)
sys.exit(1)
else:
if debug:
print(filename)
sys.exit(1)
else:
os.unlink(filename) # Remove the tempolary file
return svg_string
def _read_polygon_from_svg(self, svg_string):
"""
"""
# Extract polygon information from svg-string
# - https://github.com/guyc/scadtrace/blob/master/svg.py
svg_reader = SVGReader(svg_string)
verts_2d, edges = svg_reader.run()
# Store polygons as wire-format (w/ cleaning)
# - https://github.com/PyMesh/PyMesh/blob/master/scripts/svg_to_mesh.py
if verts_2d.shape[0] == 0:
wires = pymesh.wires.WireNetwork.create_empty()
else:
wires = pymesh.wires.WireNetwork.create_from_data(verts_2d, edges)
wires = resolve_self_intersection(wires, min_edge_size=1.5)
wires = cleanup(wires)
return wires
def _triangulation(self, np_edge, wires, output_size, debug=False):
"""
"""
height, width = output_size
# We use cython wrapper of Triangle,
# since other implementations (Pymesh) can't output edges :(
# - https://github.com/drufat/triangle
input_dic = {}
input_dic["vertices"] = wires.vertices.copy()
input_dic["segments"] = wires.edges.copy()
# [Options]
# p: Triangulates a Planar Straight Line Graph.
# q: no angles smaller than 20 degrees
try:
t = tr.triangulate(input_dic, 'pq')
except:
import uuid
unique_filename = str(uuid.uuid4()) + ".png"
print(wires.vertices.shape, wires.edges.shape)
cv2.imwrite(unique_filename, np_edge)
exit()
if debug:
import matplotlib.pyplot as plt
plt.gca().invert_yaxis()
# plt.imshow(np_edge)
for edge in wires.edges:
v1x, v1y = wires.vertices[edge[0]]
v2x, v2y = wires.vertices[edge[1]]
plt.plot([v1x, v2x], [v1y, v2y], 'k-', color='r', linewidth=1.0)
for tri in t['triangles']:
v1x, v1y = t['vertices'][tri[0]]
v2x, v2y = t['vertices'][tri[1]]
v3x, v3y = t['vertices'][tri[2]]
plt.plot([v1x, v2x], [v1y, v2y], 'k-', color='black', linewidth=1.0)
plt.plot([v2x, v3x], [v2y, v3y], 'k-', color='black', linewidth=1.0)
plt.plot([v3x, v1x], [v3y, v1y], 'k-', color='black', linewidth=1.0)
plt.scatter(wires.vertices[:, 0], wires.vertices[:, 1], s=3.0, c="black")
plt.show()
print(t['vertices'].shape, t['triangles'].shape)
exit()
# Normalize (range=[0,1])
vertices = t["vertices"]
t["vertices"] = np.concatenate((vertices[:, :1] / width,
vertices[:, 1:2] / height,
vertices[:, 2:]), 1)
t["edgemap"] = np_edge
return t
def __call__(self, np_scene):
"""
Args:
np_scene: [H,W,3] (ndarray, uint8)
"""
height, width, _ = np_scene.shape
# Calculate canny edge
np_edge, _ = calculate_canny_edges(np_scene, denoise=self.canny_params["denoise"])
# Save into temp file as bmp-format
with tempfile.NamedTemporaryFile(suffix='.bmp', delete=False) as temp:
cv2.imwrite(temp.name, np_edge)
# Execute vectorization (by Autotrace)
svg_string = self._execute_autotrace(temp.name)
# Extract polygon information
wires = self._read_polygon_from_svg(svg_string)
# Triangulation
wires = add_frame(wires, output_size=(height, width))
mesh_dic = self._triangulation(np_edge, wires, output_size=(height, width))
# Finally integrate all the information, and create disconnected mesh
mesh = BaseMesh(mesh_dic)
return mesh
|
from __future__ import print_function
import logging
import os
import psutil
import signal
import socket
from .worker import Worker
from ..common import algorithm_loader
def run(bind_address, yaml, parameter_server_url, timeout):
algorithm = algorithm_loader.load(yaml['path'])
_run_agents(
bind_address=bind_address,
agent_factory=_get_factory(
algorithm=algorithm,
yaml=yaml,
parameter_server_url=parameter_server_url
),
timeout=timeout
)
def _get_factory(algorithm, yaml, parameter_server_url):
config = algorithm.Config(yaml)
return lambda n_agent: algorithm.Agent(
config=config,
parameter_server=algorithm.BridgeControl().parameter_server_stub(parameter_server_url)
)
def _run_agents(bind_address, agent_factory, timeout):
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
socket_ = socket.socket()
try:
_info('listening %s', bind_address)
socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_.bind(_parse_address(bind_address))
socket_.listen(100)
n_agent = 0
while True:
connection, address = socket_.accept()
try:
_debug('accepted %s from %s', str(connection), str(address))
available = _available_memory()
required = _memory_per_child()
if required is None:
_info('memory %.3f, None' % available)
else:
_info('memory %.3f, %.3f' % (available, required))
if required is not None and available < required:
_warning(
'Cannot start new child: available memory (%.3f) is less than memory per child (%.3f)' %
(available, required)
)
else:
pid = None
try:
pid = os.fork()
except OSError as e:
_warning('{} : {}'.format(bind_address, e.message))
if pid == 0:
socket_.close()
connection.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
Worker(agent_factory, timeout, n_agent, connection, address).run()
break
finally:
connection.close()
n_agent += 1
finally:
socket_.close()
def _debug(message, *args):
logging.debug('%d:' + message, os.getpid(), *args)
def _info(message, *args):
logging.info('%d:' + message, os.getpid(), *args)
def _parse_address(address):
host, port = address.split(':')
return host, int(port)
def _available_memory():
vm = psutil.virtual_memory()
return 100 * float(vm.available) / vm.total
def _memory_per_child():
process = psutil.Process(os.getpid())
n = 0
mem = 0
for child in process.children(recursive=False):
n += 1
mem += _process_tree_memory(child)
if n == 0:
return None
return mem / n
def _process_tree_memory(process):
mem = process.memory_percent()
for child in process.children(recursive=True):
mem += child.memory_percent()
return mem
def _warning(message, *args):
logging.warning('%d:' + message, os.getpid(), *args)
|
# guests_at_party
#fetch the input functions
from BTCInput import *
#create an empty guests list
guests=[]
number_guests=read_int('How many guests do you expect? ')
# read in 10 sales figures
for count in range(1,number_guests+1):
# assemble a prompt string
prompt='Enter the names of the guests: '
# read a value and append it to sales array
guests.append(read_text(prompt))
# print a heading
print('Guest list')
count = 1
# work through the guests figures and print them
for guest_value in guests:
# print an item
print('Guest number', count,'is',guest_value)
# advance the stand counter
count = count + 1
def save_guests(file_path):
'''
Saves the contents of the guests
'''
print('Save the guests in:', file_path)
try:
# create a file object
with open(file_path,'w') as output_file:
# Work through the guest values in the list
for guest in guests:
# write out the sale as a string
output_file.write(guest+'\n')
except:
print('Something went wrong writing the file')
print(guests)
save_guests('guest_list.txt')
|
import rospy
from python_qt_binding import QtCore
from python_qt_binding import QtGui
from python_qt_binding import QtWidgets
from python_qt_binding.QtWidgets import QWidget
from rqt_ez_publisher import ez_publisher_model as ez_model
from rqt_ez_publisher import widget as ez_widget
from rqt_ez_publisher import publisher
class EzPublisherWidget(QWidget):
'''Main widget of this GUI'''
sig_sysmsg = QtCore.Signal(str)
def __init__(self, parent=None, modules=[]):
self._model = ez_model.EzPublisherModel(
publisher.TopicPublisherWithTimer, modules=modules)
self._sliders = []
QWidget.__init__(self, parent=parent)
self.setup_ui()
def add_slider_from_combo(self):
return self.add_slider_by_text(str(self._combo.currentText()))
def close_slider(self, widget, remove=True):
widget.hide()
if remove:
self._sliders.remove(widget)
self._main_vertical_layout.removeWidget(widget)
def get_next_index(self, topic_name, attributes):
array_index = 0
text = ez_model.make_text(topic_name, attributes, array_index)
while text in [x.get_text() for x in self._sliders]:
array_index += 1
text = ez_model.make_text(topic_name, attributes, array_index)
return array_index
def add_widget(self, output_type, topic_name, attributes, array_index,
position=None):
widget_class = None
type_class_dict = {float: ez_widget.DoubleValueWidget,
int: ez_widget.IntValueWidget,
'uint': ez_widget.UIntValueWidget,
bool: ez_widget.BoolValueWidget,
str: ez_widget.StringValueWidget}
for module in self._model.get_modules():
type_class_dict[
module.get_msg_string()] = module.get_widget_class()
if output_type in type_class_dict:
widget_class = type_class_dict[output_type]
else:
self.sig_sysmsg.emit('not supported type %s' % output_type)
return False
widget = widget_class(topic_name, attributes, array_index,
self._model.get_publisher(topic_name), self)
self._model.get_publisher(topic_name).set_manager(self)
self._sliders.append(widget)
if widget.add_button:
widget.add_button.clicked.connect(
lambda: self.add_widget(
output_type, topic_name, attributes,
self.get_next_index(topic_name, attributes),
self._main_vertical_layout.indexOf(widget) + 1))
if position:
self._main_vertical_layout.insertWidget(position, widget)
else:
self._main_vertical_layout.addWidget(widget)
return True
def move_down_widget(self, widget):
index = self._main_vertical_layout.indexOf(widget)
if index < self._main_vertical_layout.count() - 1:
self._main_vertical_layout.removeWidget(widget)
self._main_vertical_layout.insertWidget(index + 1, widget)
def move_up_widget(self, widget):
index = self._main_vertical_layout.indexOf(widget)
if index > 1:
self._main_vertical_layout.removeWidget(widget)
self._main_vertical_layout.insertWidget(index - 1, widget)
def add_slider_by_text(self, text):
if text.endswith('/header/seq'):
rospy.loginfo('header/seq is not created')
return
if text in [x.get_text() for x in self._sliders]:
self.sig_sysmsg.emit('%s is already exists' % text)
return
results = self._model.register_topic_by_text(text)
if not results:
self.sig_sysmsg.emit('%s does not exists' % text)
return
topic_name, attributes, builtin_type, is_array, array_index = results
if builtin_type:
if is_array and array_index is None:
# use index 0
array_index = 0
self.add_widget(builtin_type, topic_name, attributes, array_index)
else:
for string in self._model.expand_attribute(text, array_index):
self.add_slider_by_text(string)
def get_sliders_for_topic(self, topic):
return [x for x in self._sliders if x.get_topic_name() == topic]
def get_sliders(self):
return self._sliders
def clear_sliders(self):
for widget in self._sliders:
self.close_slider(widget, False)
self._sliders = []
def update_combo_items(self):
self._combo.clear()
for topic in self._model.get_topic_names():
self._combo.addItem(topic)
def set_configurable(self, value):
self._reload_button.setVisible(value)
self._topic_label.setVisible(value)
self._clear_button.setVisible(value)
self._combo.setVisible(value)
for slider in self._sliders:
slider.set_configurable(value)
def setup_ui(self):
self._horizontal_layout = QtWidgets.QHBoxLayout()
self._reload_button = QtWidgets.QPushButton(parent=self)
self._reload_button.setMaximumWidth(30)
self._reload_button.setIcon(
self.style().standardIcon(QtWidgets.QStyle.SP_BrowserReload))
self._reload_button.clicked.connect(self.update_combo_items)
self._topic_label = QtWidgets.QLabel('topic(+data member) name')
self._clear_button = QtWidgets.QPushButton('all clear')
self._clear_button.setMaximumWidth(200)
self._clear_button.clicked.connect(self.clear_sliders)
self._combo = QtWidgets.QComboBox()
self._combo.setEditable(True)
self.update_combo_items()
self._combo.activated.connect(self.add_slider_from_combo)
self._horizontal_layout.addWidget(self._reload_button)
self._horizontal_layout.addWidget(self._topic_label)
self._horizontal_layout.addWidget(self._combo)
self._horizontal_layout.addWidget(self._clear_button)
self._main_vertical_layout = QtWidgets.QVBoxLayout()
self._main_vertical_layout.addLayout(self._horizontal_layout)
self._main_vertical_layout.setAlignment(
self._horizontal_layout, QtCore.Qt.AlignTop)
self.setLayout(self._main_vertical_layout)
def shutdown(self):
self._model.shutdown()
def main():
import sys
app = QtWidgets.QApplication(sys.argv)
main_window = QtWidgets.QMainWindow()
main_widget = EzPublisherWidget()
main_window.setCentralWidget(main_widget)
main_window.show()
app.exec_()
if __name__ == '__main__':
rospy.init_node('ez_publisher')
main()
|
"""
link: https://leetcode-cn.com/problems/generalized-abbreviation
problem: 输出字符串的所有缩写,缩写规则为将连续n位用字符串n替代,不可以连续替换
solution: dfs + 备忘录。记录上一操作是否转换了数字。
solution-fix: 所有缩写可能数量定为 2**n 个,n为原串长度。可以将缩写串表达成长度为 n 的一个二进制数,第 i 为 0 时表示缩写,1 代表不变取原字符。
更高的时间复杂度,但空间复杂度更好。
"""
class Solution:
def generateAbbreviations(self, word: str) -> List[str]:
if word == "":
return [""]
@functools.lru_cache(maxsize=None)
def dfs(k: str, pre_num: bool) -> List[str]:
if k == "":
return [""]
t = []
for i in range(len(k)):
for kk in dfs(k[i + 1:], not pre_num):
t.append((str(i + 1) + kk) if not pre_num else k[:i + 1] + kk)
return t
return dfs(word, False) + dfs(word, True)
# ---
class Solution:
def generateAbbreviations(self, word: str) -> List[str]:
n, res = len(word), []
for i in range(1 << n):
cnt, cur, k, t = 0, 0, i, ""
while cnt != n:
if k & 1:
cur += 1
cnt += 1
else:
if cur != 0:
t = str(cur) + t
cur = 0
t = word[n - 1 - cnt] + t
cnt += 1
k >>= 1
if cur != 0:
t = str(cur) + t
res.append(t)
return res
|
try:
from . import generic as g
except BaseException:
import generic as g
class LoaderTest(g.unittest.TestCase):
def test_obj_groups(self):
# a wavefront file with groups defined
mesh = g.get_mesh('groups.obj')
# make sure some data got loaded
assert g.trimesh.util.is_shape(mesh.faces, (-1, 3))
assert g.trimesh.util.is_shape(mesh.vertices, (-1, 3))
# make sure groups are the right length
assert len(mesh.metadata['face_groups']) == len(mesh.faces)
# check to make sure there is signal not just zeros
assert mesh.metadata['face_groups'].ptp() > 0
def test_remote(self):
"""
Try loading a remote mesh using requests
"""
# get a unit cube from localhost
with g.serve_meshes() as address:
mesh = g.trimesh.io.load.load_remote(
url=address + '/unit_cube.STL')
assert g.np.isclose(mesh.volume, 1.0)
assert isinstance(mesh, g.trimesh.Trimesh)
def test_obj_quad(self):
mesh = g.get_mesh('quadknot.obj')
# make sure some data got loaded
assert g.trimesh.util.is_shape(mesh.faces, (-1, 3))
assert g.trimesh.util.is_shape(mesh.vertices, (-1, 3))
assert mesh.is_watertight
assert mesh.is_winding_consistent
def test_obj_multiobj(self):
# test a wavefront file with multiple objects in the same file
meshes = g.get_mesh('two_objects.obj')
self.assertTrue(len(meshes) == 2)
for mesh in meshes:
# make sure some data got loaded
assert g.trimesh.util.is_shape(mesh.faces, (-1, 3))
assert g.trimesh.util.is_shape(mesh.vertices, (-1, 3))
assert mesh.is_watertight
assert mesh.is_winding_consistent
def test_obj_split_attributes(self):
# test a wavefront file where pos/uv/norm have different indices
# and where multiple objects share vertices
# Note 'process=False' to avoid merging vertices
meshes = g.get_mesh('joined_tetrahedra.obj', process=False)
self.assertTrue(len(meshes) == 2)
assert g.trimesh.util.is_shape(meshes[0].faces, (4, 3))
assert g.trimesh.util.is_shape(meshes[0].vertices, (9, 3))
assert g.trimesh.util.is_shape(meshes[1].faces, (4, 3))
assert g.trimesh.util.is_shape(meshes[1].vertices, (9, 3))
def test_obj_simple_order(self):
# test a simple wavefront model without split indexes
# and make sure we don't reorder vertices unnecessarily
file_name = g.os.path.join(g.dir_models,
'cube.OBJ')
# load a simple OBJ file without merging vertices
m = g.trimesh.load(file_name, process=False)
# we're going to load faces in a basic text way
# and compare the order from this method to the
# trimesh loader, to see if we get the same thing
faces = []
verts = []
with open(file_name, 'r') as f:
for line in f:
line = line.strip()
if line[0] == 'f':
faces.append(line[1:].strip().split())
if line[0] == 'v':
verts.append(line[1:].strip().split())
# get faces as basic numpy array
faces = g.np.array(faces, dtype=g.np.int64) - 1
verts = g.np.array(verts, dtype=g.np.float64)
# trimesh loader should return the same face order
assert g.np.allclose(faces, m.faces)
assert g.np.allclose(verts, m.vertices)
def test_obj_compressed(self):
mesh = g.get_mesh('cube_compressed.obj', process=False)
assert g.np.allclose(g.np.abs(mesh.vertex_normals).sum(axis=1),
1.0)
def test_stl(self):
model = g.get_mesh('empty.stl')
assert model.is_empty
def test_3MF(self):
# an assembly with instancing
s = g.get_mesh('counterXP.3MF')
# should be 2 unique meshes
assert len(s.geometry) == 2
# should be 6 instances around the scene
assert len(s.graph.nodes_geometry) == 6
# a single body 3MF assembly
s = g.get_mesh('featuretype.3MF')
# should be 2 unique meshes
assert len(s.geometry) == 1
# should be 6 instances around the scene
assert len(s.graph.nodes_geometry) == 1
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
def create_db_table_pages_metadata(db, drop=True):
if drop:
db["pages_metadata"].drop(ignore=True)
db["pages_metadata"].create({
"id": str,
"page_idx": int, # This is just a count as we work through the pages
"page_char_start": int,
"page_char_end": int,
"page_leaf_num": int,
"page_num": str,
"page_num_conf": float # A confidence value relating to the page number detection
}, pk=("id", "page_idx")) # compound foreign keys not currently available via sqlite_utils?
|
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Count
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse, reverse_lazy
from django.views.generic import CreateView, UpdateView, DeleteView
from django.views.generic.base import TemplateView
from django.views.generic.list import ListView
from .forms import PostForm, CommentForm
from .models import Post, Group, Follow
from .utils import get_user_profile, check_following
User = get_user_model()
class IndexView(ListView):
"""Главная страница сайта."""
paginate_by = 10
template_name = 'index.html'
def get_queryset(self):
return (
Post.objects.select_related('author', 'group')
.annotate(comment_count=Count('comments'))
.all()
)
class FollowView(LoginRequiredMixin, ListView):
"""Страница постов авторов, на которых подписан пользователь."""
paginate_by = 10
template_name = 'follow.html'
def get_queryset(self):
return (
Post.objects.select_related('author', 'group')
.annotate(comment_count=Count('comments'))
.filter(author__following__user=self.request.user)
)
class GroupView(ListView):
"""Страница сообщества с постами."""
template_name = 'group.html'
paginate_by = 10
def get_queryset(self):
group = get_object_or_404(Group, slug=self.kwargs['slug'])
return (
group.posts.select_related('author', 'group')
.annotate(comment_count=Count('comments'))
.all()
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['group'] = get_object_or_404(Group, slug=self.kwargs['slug'])
return context
class GroupListView(ListView):
"""Страница со списком сообществ."""
model = Group
template_name = 'group_list.html'
paginate_by = 30
class PostCreate(LoginRequiredMixin, CreateView):
"""Страница создания нового поста."""
form_class = PostForm
template_name = 'post_edit.html'
success_url = reverse_lazy('index')
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdate(LoginRequiredMixin, UpdateView):
"""Страница редактирования поста."""
model = Post
pk_url_kwarg = 'post_id'
form_class = PostForm
template_name = 'post_edit.html'
def get(self, request, *args, **kwargs):
post = self.get_object()
if post.author != request.user:
# если пользователь пытается редактировать чужой пост - перенаправляем его на страницу просмотра поста
return redirect('post', **kwargs)
return super().get(request, *args, **kwargs)
def get_success_url(self):
return reverse_lazy('post', kwargs=self.kwargs)
class PostDelete(LoginRequiredMixin, DeleteView):
"""Контроллер удаления поста. """
model = Post
pk_url_kwarg = 'post_id'
http_method_names = ['post']
def delete(self, request, *args, **kwargs):
post = self.get_object()
if request.user != post.author:
return redirect('post', **kwargs)
return super().delete(request, *args, **kwargs)
def get_success_url(self):
return reverse_lazy('profile', args=[self.kwargs['username']])
class ProfileView(ListView):
"""Страница профиля пользователя."""
template_name = 'profile.html'
paginate_by = 5
def get_queryset(self):
author = get_user_profile(self.kwargs['username'])
self.kwargs['author'] = author
return (
author.posts.select_related('author', 'group')
.annotate(comment_count=Count('comments'))
.all()
)
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(object_list=object_list, **kwargs)
context['author'] = self.kwargs['author']
context['following'] = check_following(self.request.user, context['author'])
return context
class PostView(TemplateView):
"""Страница просмотра поста."""
template_name = 'post.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
author = get_user_profile(self.kwargs['username'])
post = get_object_or_404(Post.objects.annotate(comment_count=Count('comments')), pk=self.kwargs['post_id'])
comments = post.comments.select_related('author', 'author__profile').order_by('created').all()
new_comment_form = CommentForm()
context['author'] = author
context['following'] = context['following'] = check_following(self.request.user, context['author'])
context['post'] = post
context['comments'] = comments
context['new_comment_form'] = new_comment_form
return context
class CommentCreate(LoginRequiredMixin, CreateView):
"""Контроллер для создания комментария. """
http_method_names = ['post']
form_class = CommentForm
def form_valid(self, form):
form.instance.author = self.request.user
form.instance.post = Post.objects.get(pk=self.kwargs['post_id'])
return super().form_valid(form)
def get_success_url(self):
return reverse('post', kwargs=self.kwargs)
@login_required
def profile_follow(request, username):
"""Контроллер для подписки на автора."""
author = get_object_or_404(User, username=username)
if author == request.user:
# подписаться на самого себя нельзя
return redirect('profile', username=username)
if Follow.objects.filter(user=request.user, author=author).exists():
# подписаться несколько раз на одного пользователя нельзя
return redirect('profile', username=username)
follow = Follow.objects.create(user=request.user, author=author)
return redirect('profile', username=username)
@login_required
def profile_unfollow(request, username):
"""Контроллер для отписки от автора."""
author = get_object_or_404(User, username=username)
follow = get_object_or_404(Follow, user=request.user, author=author)
follow.delete()
return redirect('profile', username=username)
def page_not_found(request, exception):
"""Страница ошибки при обращении к несуществующему адресу."""
return render(request, "misc/404.html", {"path": request.path}, status=404)
def server_error(request):
"""Страница, выводимая при возникновении ошибки на сервере."""
return render(request, "misc/500.html", status=500)
|
#!/usr/bin/env python3
import rospy
import numpy as np
import math
from std_msgs.msg import Float32
from std_msgs.msg import Int32
from sensor_msgs.msg import PointCloud2, PointField
from sensor_msgs import point_cloud2
import os
import open3d.ml as _ml3d
import open3d.ml.torch as ml3d
import open3d as o3d
import copy
import sys
import time
import struct
import ctypes
import roslib
from geometry_msgs.msg import Transform, Vector3, Quaternion
import numpy.lib.recfunctions as nlr
from matplotlib.cm import get_cmap
class ply2pointcloud(object):
def __init__(self):
# file_path = '/home/yellow/KPConv-PyTorch/Data/Stanford3dDataset_v1.2/input_0.020/Area_3.ply'
file_path = '/home/yellow/Open3D/examples/test_data/Bunny.ply'
print("Load a ply point cloud, print it, and render it")
pcd = o3d.io.read_point_cloud(file_path)
self.xyz_load = np.asarray(pcd.points)
self.pub_msg = self.xyzrgb_array_to_pointcloud2( self.xyz_load, self.xyz_load)
self.timer = rospy.Timer(rospy.Duration(0.5), self.timer_callback)
self.pub_points = rospy.Publisher('input_points', PointCloud2, queue_size=1)
print("ply2pointcloud init done")
def xyzrgb_array_to_pointcloud2(self,points, colors, stamp=None, frame_id='base_link', seq=None):
'''
Create a sensor_msgs.PointCloud2 from an array
of points.
'''
msg = PointCloud2()
assert(points.shape == colors.shape)
buf = []
if stamp:
msg.header.stamp = stamp
if frame_id:
msg.header.frame_id = frame_id
if seq:
msg.header.seq = seq
if len(points.shape) == 3:
msg.height = points.shape[1]
msg.width = points.shape[0]
else:
N = len(points)
xyzrgb = np.array(np.hstack([points, colors]), dtype=np.float32)
msg.height = 1
msg.width = N
msg.fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('r', 12, PointField.FLOAT32, 1),
PointField('g', 16, PointField.FLOAT32, 1),
PointField('b', 20, PointField.FLOAT32, 1)
]
msg.is_bigendian = False
msg.point_step = 24
msg.row_step = msg.point_step * N
msg.is_dense = True;
msg.data = xyzrgb.tostring()
return msg
def timer_callback(self,event):
self.pub_points.publish(self.pub_msg)
rospy.loginfo('pub_points')
if __name__ == "__main__":
rospy.init_node("ply2pointcloud")
Ply2PointCloud = ply2pointcloud()
rospy.spin()
|
import collections
import os
def ensure_dir(dirname):
"""
Ensure directory exists.
Roughly equivalent to `mkdir -p`
"""
if not os.path.isdir(dirname):
os.makedirs(dirname)
def derive_out_path(in_paths, out_dir, out_extension='',
strip_in_extension=True,
out_prefix=None):
"""
Derives an 'output' path given some 'input' paths and an output directory.
In the simple case that only a single path is supplied, this is
simply the pathname resulting from replacing extension suffix and moving
dir, e.g.
``input_dir/basename.in`` -> ``output_dir/basename.out``
If the out_dir is specified as 'None' then it is assumed that the
new file should be located in the same directory as the first
input path.
In the case that multiple input paths are supplied, their basenames
are concatenated, e.g.
``in_dir/base1.in`` + ``in_dir/base2.in``
-> ``out_dir/base1_base2.out``
If the resulting output path is identical to any input path, this
raises an exception.
NB the extension should be supplied including the '.' prefix.
"""
in_paths = listify(in_paths)
if out_dir is None:
out_dir = os.path.dirname(in_paths[0])
in_basenames = [os.path.basename(ip) for ip in in_paths]
if strip_in_extension:
in_basenames = [ os.path.splitext(bn)[0] for bn in in_basenames ]
out_basename = '_'.join(in_basenames)
if out_prefix:
out_basename = out_prefix+out_basename
out_path = os.path.join(out_dir, out_basename + out_extension)
for ip in in_paths:
if os.path.abspath(out_path) == os.path.abspath(ip):
raise RuntimeError(
'Specified path derivation results in output overwriting input!')
return out_path
def save_script(script, filename):
"""Save a list of casa commands as a text file"""
with open(filename, 'w') as fp:
fp.write('\n'.join(script))
def byteify(input):
"""
Co-erce unicode to 'bytestring'
(or string containing unicode, or dict containing unicode)
Useful when e.g. importing filenames from JSON
(CASA sometimes breaks if passed Unicode strings.)
cf http://stackoverflow.com/a/13105359/725650
"""
if isinstance(input, dict):
return {byteify(key):byteify(value) for key,value in input.iteritems()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def listify(x):
"""
Ensure x is a (non-string) iterable; if not, enclose in a list.
Returns:
x or [x], accordingly.
"""
if isinstance(x, basestring):
return [x]
elif isinstance(x, collections.Iterable):
return x
else:
return [x]
|
# import json
# from fastapi.testclient import TestClient
# from main import app
# import hashing
# client = TestClient(app)
# class TestUser():
# def test_create_user(self):
# new_user = self.user_thiago()
# response = client.post('/user', data = json.dumps(new_user))
# new_user_response = response.json()
# print('--=======')
# print(new_user_response)
# assert response.status_code == 201, response.text
# assert new_user_response['name'] == new_user['name']
# # assert new_user_response['password'] == new_user['password']
# assert new_user_response['email'] == new_user['email']
# def user_thiago(self):
# return {
# 'id': 1,
# 'name': 'Thiago Henry',
# 'password': hashing.Hash().bcrypt('123456'),
# 'email': 'thiago@email.com'
# }
|
from abc import abstractmethod
from pyautofinance.common.engine.engine_component import EngineComponent
class Timer(EngineComponent):
def __init__(self, when, **parameters):
self.when = when
self.parameters = parameters
@abstractmethod
def execute(self, cerebro, strat=None): # Returns the function of the timer
pass
def attach_to_engine(self, engine):
engine.cerebro.add_timer(self.when, execute=self.execute, **self.parameters)
|
# Author: OMKAR PATHAK
# Created On: 31st July 2017
# Best = Average = O(nlog(n)), Worst = O(n ^ 2)
# quick_sort algorithm
def sort(List):
if len(List) <= 1:
return List
pivot = List[len(List) // 2]
left = [x for x in List if x < pivot]
middle = [x for x in List if x == pivot]
right = [x for x in List if x > pivot]
return sort(left) + middle + sort(right)
# time complexities
def time_complexities():
return '''Best Case: O(nlogn), Average Case: O(nlogn), Worst Case: O(n ^ 2)'''
# easily retrieve the source code of the sort function
def get_code():
import inspect
return inspect.getsource(sort)
|
#!/usr/bin/env python
"""
$ python main.py grid.txt 4
70600674
"""
import sys
from operator import mul
from functools import reduce
def get_adjacents(grid, a, b, size):
x_range = range(a, a + size)
y_range = range(b, b + size)
y_range_rev = range(b, b - size, -1)
yield [grid[a][y] for y in y_range]
yield [grid[x][b] for x in x_range]
yield [grid[x][y] for x, y in zip(x_range, y_range)]
yield [grid[x][y] for x, y in zip(x_range, y_range_rev)]
if __name__ == '__main__':
filename, size = sys.argv[1], int(sys.argv[2])
with open(filename) as f:
grid = [
[int(i) for i in row.split()]
for row in f
]
x_max, y_max = len(grid), len(grid[0])
max_ = -1
for a in range(size - 1, x_max - size + 1):
for b in range(size - 1, y_max - size + 1):
for line in get_adjacents(grid, a, b, size):
product = reduce(mul, line)
if product > max_:
max_ = product
print(max_)
|
# -*- coding: utf-8 -*-
import unittest
import compat
import sys
from plmn.network_checks import *
class NetworkRegisterVerizon(unittest.TestCase):
def test_register_on_verizon(self):
NetworkChecks.network_register('Verizon', 'vzwinternet')
NetworkChecks.network_connect('Verizon', 'vzwinternet')
if __name__ == '__main__':
nargs = process_args()
unittest.main(argv=sys.argv[nargs:], exit=False)
Results.print_results()
|
"""Tests for C-implemented GenericAlias."""
import unittest
import pickle
from collections import (
defaultdict, deque, OrderedDict, Counter, UserDict, UserList
)
from collections.abc import *
from contextlib import AbstractContextManager, AbstractAsyncContextManager
from os import DirEntry
from re import Pattern, Match
from types import GenericAlias, MappingProxyType
import typing
from typing import TypeVar
T = TypeVar('T')
class BaseTest(unittest.TestCase):
"""Test basics."""
def test_subscriptable(self):
for t in (type, tuple, list, dict, set, frozenset,
defaultdict, deque,
OrderedDict, Counter, UserDict, UserList,
Pattern, Match,
AbstractContextManager, AbstractAsyncContextManager,
Awaitable, Coroutine,
AsyncIterable, AsyncIterator,
AsyncGenerator, Generator,
Iterable, Iterator,
Reversible,
Container, Collection,
Callable,
Set, MutableSet,
Mapping, MutableMapping, MappingView,
KeysView, ItemsView, ValuesView,
Sequence, MutableSequence,
MappingProxyType, DirEntry
):
tname = t.__name__
with self.subTest(f"Testing {tname}"):
alias = t[int]
self.assertIs(alias.__origin__, t)
self.assertEqual(alias.__args__, (int,))
self.assertEqual(alias.__parameters__, ())
def test_unsubscriptable(self):
for t in int, str, float, Sized, Hashable:
tname = t.__name__
with self.subTest(f"Testing {tname}"):
with self.assertRaises(TypeError):
t[int]
def test_instantiate(self):
for t in tuple, list, dict, set, frozenset, defaultdict, deque:
tname = t.__name__
with self.subTest(f"Testing {tname}"):
alias = t[int]
self.assertEqual(alias(), t())
if t is dict:
self.assertEqual(alias(iter([('a', 1), ('b', 2)])), dict(a=1, b=2))
self.assertEqual(alias(a=1, b=2), dict(a=1, b=2))
elif t is defaultdict:
def default():
return 'value'
a = alias(default)
d = defaultdict(default)
self.assertEqual(a['test'], d['test'])
else:
self.assertEqual(alias(iter((1, 2, 3))), t((1, 2, 3)))
def test_unbound_methods(self):
t = list[int]
a = t()
t.append(a, 'foo')
self.assertEqual(a, ['foo'])
x = t.__getitem__(a, 0)
self.assertEqual(x, 'foo')
self.assertEqual(t.__len__(a), 1)
def test_subclassing(self):
class C(list[int]):
pass
self.assertEqual(C.__bases__, (list,))
self.assertEqual(C.__class__, type)
def test_class_methods(self):
t = dict[int, None]
self.assertEqual(dict.fromkeys(range(2)), {0: None, 1: None}) # This works
self.assertEqual(t.fromkeys(range(2)), {0: None, 1: None}) # Should be equivalent
def test_no_chaining(self):
t = list[int]
with self.assertRaises(TypeError):
t[int]
def test_generic_subclass(self):
class MyList(list):
pass
t = MyList[int]
self.assertIs(t.__origin__, MyList)
self.assertEqual(t.__args__, (int,))
self.assertEqual(t.__parameters__, ())
def test_repr(self):
class MyList(list):
pass
self.assertEqual(repr(list[str]), 'list[str]')
self.assertEqual(repr(list[()]), 'list[()]')
self.assertEqual(repr(tuple[int, ...]), 'tuple[int, ...]')
self.assertTrue(repr(MyList[int]).endswith('.BaseTest.test_repr.<locals>.MyList[int]'))
self.assertEqual(repr(list[str]()), '[]') # instances should keep their normal repr
def test_exposed_type(self):
import types
a = types.GenericAlias(list, int)
self.assertEqual(str(a), 'list[int]')
self.assertIs(a.__origin__, list)
self.assertEqual(a.__args__, (int,))
self.assertEqual(a.__parameters__, ())
def test_parameters(self):
from typing import TypeVar
T = TypeVar('T')
K = TypeVar('K')
V = TypeVar('V')
D0 = dict[str, int]
self.assertEqual(D0.__args__, (str, int))
self.assertEqual(D0.__parameters__, ())
D1a = dict[str, V]
self.assertEqual(D1a.__args__, (str, V))
self.assertEqual(D1a.__parameters__, (V,))
D1b = dict[K, int]
self.assertEqual(D1b.__args__, (K, int))
self.assertEqual(D1b.__parameters__, (K,))
D2a = dict[K, V]
self.assertEqual(D2a.__args__, (K, V))
self.assertEqual(D2a.__parameters__, (K, V))
D2b = dict[T, T]
self.assertEqual(D2b.__args__, (T, T))
self.assertEqual(D2b.__parameters__, (T,))
L0 = list[str]
self.assertEqual(L0.__args__, (str,))
self.assertEqual(L0.__parameters__, ())
L1 = list[T]
self.assertEqual(L1.__args__, (T,))
self.assertEqual(L1.__parameters__, (T,))
def test_parameter_chaining(self):
from typing import TypeVar
T = TypeVar('T')
self.assertEqual(list[T][int], list[int])
self.assertEqual(dict[str, T][int], dict[str, int])
self.assertEqual(dict[T, int][str], dict[str, int])
self.assertEqual(dict[T, T][int], dict[int, int])
with self.assertRaises(TypeError):
list[int][int]
dict[T, int][str, int]
dict[str, T][str, int]
dict[T, T][str, int]
def test_equality(self):
self.assertEqual(list[int], list[int])
self.assertEqual(dict[str, int], dict[str, int])
self.assertNotEqual(dict[str, int], dict[str, str])
self.assertNotEqual(list, list[int])
self.assertNotEqual(list[int], list)
def test_isinstance(self):
self.assertTrue(isinstance([], list))
with self.assertRaises(TypeError):
isinstance([], list[str])
def test_issubclass(self):
class L(list): ...
self.assertTrue(issubclass(L, list))
with self.assertRaises(TypeError):
issubclass(L, list[str])
def test_type_generic(self):
t = type[int]
Test = t('Test', (), {})
self.assertTrue(isinstance(Test, type))
test = Test()
self.assertEqual(t(test), Test)
self.assertEqual(t(0), int)
def test_type_subclass_generic(self):
class MyType(type):
pass
with self.assertRaises(TypeError):
MyType[int]
def test_pickle(self):
alias = GenericAlias(list, T)
s = pickle.dumps(alias)
loaded = pickle.loads(s)
self.assertEqual(alias.__origin__, loaded.__origin__)
self.assertEqual(alias.__args__, loaded.__args__)
self.assertEqual(alias.__parameters__, loaded.__parameters__)
def test_union(self):
a = typing.Union[list[int], list[str]]
self.assertEqual(a.__args__, (list[int], list[str]))
self.assertEqual(a.__parameters__, ())
def test_union_generic(self):
T = typing.TypeVar('T')
a = typing.Union[list[T], tuple[T, ...]]
self.assertEqual(a.__args__, (list[T], tuple[T, ...]))
self.assertEqual(a.__parameters__, (T,))
if __name__ == "__main__":
unittest.main()
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import HasTraits
from traitsui.menu import Action, Menu as MenuManager
from pychron.pychron_constants import PLUSMINUS
# from pyface.action.group import Group
# from pyface.action.api import Group, MenuManager
# ============= standard library imports ========================
# ============= local library imports ==========================
class ContextMenuMixin(HasTraits):
use_context_menu = True
def close_popup(self):
pass
def action_factory(self, name, func, **kw):
return Action(name=name, on_perform=getattr(self, func), **kw)
def contextual_menu_contents(self):
"""
"""
save = [('PDF', 'save_pdf', {}), ('PNG', 'save_png', {})]
save_actions = [self.action_factory(n, f, **kw) for n, f, kw in save]
save_menu = MenuManager(name='Save Figure', *save_actions)
export_actions = [self.action_factory('CSV', 'export_data')]
export_menu = MenuManager(name='Export', *export_actions)
rescale = [('X', 'rescale_x_axis', {}),
('Y', 'rescale_y_axis', {}),
('Both', 'rescale_both', {})]
a = self.get_rescale_actions()
if a:
rescale.extend(a)
rescale_actions = [self.action_factory(n, f, **kw) for n, f, kw in rescale]
rescale_menu = MenuManager(name='Rescale', *rescale_actions)
contents = [save_menu, export_menu, rescale_menu]
c = self.get_child_context_menu_actions()
if c:
contents.extend(c)
return contents
def get_rescale_actions(self):
return
def get_child_context_menu_actions(self):
return
def get_contextual_menu(self):
"""
"""
ctx_menu = MenuManager(*self.contextual_menu_contents())
return ctx_menu
# class IsotopeContextMenuMixin(ContextMenuMixin):
# def set_status_omit(self):
# '''
# override this method in a subclass
# '''
# pass
#
# def set_status_include(self):
# '''
# override this method in a subclass
# '''
# pass
#
# def recall_analysis(self):
# '''
# override this method in a subclass
# '''
# pass
#
# def contextual_menu_contents(self):
#
# contents = super(IsotopeContextMenuMixin, self).contextual_menu_contents()
# contents.append(self.action_factory('Edit Analyses', 'edit_analyses'))
# actions = []
# if hasattr(self, 'selected_analysis'):
# if self.selected_analysis:
# actions.append(self.action_factory('Recall', 'recall_analysis'))
# if self.selected_analysis.status == 0:
# actions.append(self.action_factory('Omit', 'set_status_omit'))
# else:
# actions.append(self.action_factory('Include', 'set_status_include'))
# actions.append(self.action_factory('Void', 'set_status_void'))
#
# contents.append(MenuManager(name='Analysis', *actions))
#
# # contents.append(MenuManager(
# # self.action_factory('Recall', 'recall_analysis', enabled=enabled),
# # self.action_factory('Omit', 'set_status_omit', enabled=enabled),
# # self.action_factory('Include', 'set_status_include', enabled=enabled),
# # name='Analysis'))
# return contents
class RegressionContextMenuMixin(ContextMenuMixin):
def contextual_menu_contents(self):
contents = super(RegressionContextMenuMixin, self).contextual_menu_contents()
actions = [('linear', 'cm_linear'),
('parabolic', 'cm_parabolic'),
('cubic', 'cm_cubic'),
('quartic', 'cm_quartic'),
('exponential', 'cm_exponential'),
(u'average {}SD'.format(PLUSMINUS), 'cm_average_std'),
(u'average {}SEM'.format(PLUSMINUS), 'cm_average_sem')]
menu = MenuManager(*[self.action_factory(name, func) for name, func in actions],
name='Fit')
actions = [('SD', 'cm_sd'),
('SEM', 'cm_sem'),
('CI', 'cm_ci'),
('MonteCarlo', 'cm_mc')]
emenu = MenuManager(*[self.action_factory(name, func) for name, func in actions],
name='Error')
fmenu = MenuManager(self.action_factory('Show/Hide Filter Region', 'cm_toggle_filter_bounds'),
self.action_factory('Show/Hide All Filter Region', 'cm_toggle_filter_bounds_all'),
name='Filtering')
contents.append(menu)
contents.append(emenu)
contents.append(fmenu)
return contents
# ============= EOF =============================================
|
import functools
import importlib
import os
import sys
import matplotlib as mpl
from . import _backports
from ._mplcairo import (
cairo_to_premultiplied_argb32,
cairo_to_premultiplied_rgba8888,
cairo_to_straight_rgba8888,
)
@functools.lru_cache(1)
def get_tex_font_map():
return mpl.dviread.PsfontsMap(mpl.dviread.find_tex_file("pdftex.map"))
def get_glyph_name(dvitext):
ps_font = get_tex_font_map()[dvitext.font.texname]
return (_backports._parse_enc(ps_font.encoding)[dvitext.glyph]
if ps_font.encoding is not None else None)
def get_matplotlib_gtk_backend():
import gi
required = gi.get_required_version("Gtk")
if required == "4.0":
versions = [4]
elif required == "3.0":
versions = [3]
elif os.environ.get("_GTK_API"): # Private undocumented API.
versions = [int(os.environ["_GTK_API"])]
else:
versions = [4, 3]
for version in versions:
# Matplotlib converts require_version ValueErrors into ImportErrors.
try:
mod = importlib.import_module(
f"matplotlib.backends.backend_gtk{version}")
return mod, getattr(mod, f"_BackendGTK{version}")
except ImportError:
pass
raise ImportError("Failed to import any Matplotlib GTK backend")
@functools.lru_cache(1)
def fix_ipython_backend2gui(): # matplotlib#12637 (<3.1).
# Fix hard-coded module -> toolkit mapping in IPython (used for `ipython
# --auto`). This cannot be done at import time due to ordering issues (so
# we do it when creating a canvas) and should only be done once (hence the
# `lru_cache(1)`).
if sys.modules.get("IPython") is None: # Can be explicitly set to None.
return
import IPython
ip = IPython.get_ipython()
if not ip:
return
from IPython.core import pylabtools as pt
pt.backend2gui.update({
"module://mplcairo.gtk": "gtk3",
"module://mplcairo.qt": "qt",
"module://mplcairo.tk": "tk",
"module://mplcairo.wx": "wx",
"module://mplcairo.macosx": "osx",
})
# Work around pylabtools.find_gui_and_backend always reading from
# rcParamsOrig.
orig_origbackend = mpl.rcParamsOrig["backend"]
try:
mpl.rcParamsOrig["backend"] = mpl.rcParams["backend"]
ip.enable_matplotlib()
finally:
mpl.rcParamsOrig["backend"] = orig_origbackend
|
import numpy as np
import modules_disp as disp
class Module:
def sgd_step(self, lrate): pass # For modules w/o weights
class Linear(Module):
def __init__(self,m,n):
self.m, self.n = (m, n) # (in size, out size)
self.W0=np.zeros([self.n,1]) # (n x 1)
self.W = np.random.normal(0,1.0*m**(-.5),[m,n]) # (m x n)
def forward(self,A):
self.A = A # (m x b)
return np.transpose(self.W)@(self.A) + self.W0 # Your code (n x b)
def backward(self,dLdZ): # dLdZ is (n x b), uses stored self.A
self.dLdW = self.A@np.transpose(dLdZ) # Your code
self.dLdW0 = np.sum(dLdZ,axis=1) # Your code
return self.dLdW+self.dLdW0 # Your code (m x b)
def sgd_step(self, lrate): # Gradient descent step
self.W = self.W - lrate * self.dLdW # Your code
self.W0 = self.W0 - lrate * self.dLdW0 # Your code
layer=Linear(2,3)
print(layer)
print(layer.forward(np.array([[1,2,3],[2,6,4]])))
print(layer.backward(np.array([[0.5,0.5,0.5],[0.1,0.3,1.1],[0.2,0.1,0.2]])))
layer.sgd_step(0.001)
class Tanh(Module): # Layer activation
def forward(self,Z):
self.A = np.tanh(Z)
return self.A
def backward(self,dLdA): # Uses stored self.A
return None # Your code
class ReLU(Module): # Layer activation
def forward(self,Z):
self.A = None # Your code
return self.A
def backward(self,dLdA): # uses stored self.A
return None # Your code
class SoftMax(Module): # Output activation
def forward(self,Z):
return None # Your code
def backward(self,dLdZ): # Assume that dLdZ is passed in
return dLdZ
def class_fun(self, Ypred): # Return class indices
return None # Your code
class NLL(Module): # Loss
def forward(self,Ypred,Y):
self.Ypred = Ypred
self.Y = Y
return None # Your code
def backward(self): # Use stored self.Ypred, self.Y
return None # Your code (see end of 5.2)
class Sequential:
def __init__(self, modules, loss): # List of modules, loss module
self.modules = modules
self.loss = loss
def sgd(self, X, Y, iters = 100, lrate = 0.005): # Train
D,N = X.shape
for it in range(iters):
pass # Your code
def forward(self,X): # Compute Ypred
for m in self.modules: X = m.forward(X)
return X
def backward(self,delta): # Update dLdW and dLdW0
# Note reversered list of modules
for m in self.modules[::-1]: delta = m.backward(delta)
def sgd_step(self,lrate): # Gradient descent step
for m in self.modules: m.sgd_step(lrate)
#net = Sequential([Linear(2,3), Tanh(),
# Linear(3,3), Tanh(),
# Linear(3,2), SoftMax()])
# train the network on data and labels
#net.sgd(X, Y)
######################################################################
# Data Sets
######################################################################
def super_simple_separable_through_origin():
X = np.array([[2, 3, 9, 12],
[5, 1, 6, 5]])
y = np.array([[1, 0, 1, 0]])
return X, for_softmax(y)
def super_simple_separable():
X = np.array([[2, 3, 9, 12],
[5, 2, 6, 5]])
y = np.array([[1, 0, 1, 0]])
return X, for_softmax(y)
def xor():
X = np.array([[1, 2, 1, 2],
[1, 2, 2, 1]])
y = np.array([[1, 1, 0, 0]])
return X, for_softmax(y)
def xor_more():
X = np.array([[1, 2, 1, 2, 2, 4, 1, 3],
[1, 2, 2, 1, 3, 1, 3, 3]])
y = np.array([[1, 1, 0, 0, 1, 1, 0, 0]])
return X, for_softmax(y)
def hard():
X= np.array([[-0.23390341, 1.18151883, -2.46493986, 1.55322202, 1.27621763,
2.39710997, -1.3440304 , -0.46903436, -0.64673502, -1.44029872,
-1.37537243, 1.05994811, -0.93311512, 1.02735575, -0.84138778,
-2.22585412, -0.42591102, 1.03561105, 0.91125595, -2.26550369],
[-0.92254932, -1.1030963 , -2.41956036, -1.15509002, -1.04805327,
0.08717325, 0.8184725 , -0.75171045, 0.60664705, 0.80410947,
-0.11600488, 1.03747218, -0.67210575, 0.99944446, -0.65559838,
-0.40744784, -0.58367642, 1.0597278 , -0.95991874, -1.41720255]])
y= np.array([[ 1., 1., 0., 1., 1., 1., 0., 0., 0., 0., 0., 1., 1.,
1., 0., 0., 0., 1., 1., 0.]])
return X, for_softmax(y)
def for_softmax(y):
return np.vstack([y, 1-y])
######################################################################
# Tests
######################################################################
def sgd_test(nn):
lrate = 0.005
# data
X,Y = super_simple_separable()
print('X\n', X)
print('Y\n', Y)
# define the modules
assert len(nn.modules) == 4
(l_1, f_1, l_2, f_2) = nn.modules
Loss = nn.loss
print('l_1.W\n', l_1.W)
print('l_1.W0\n', l_1.W0)
print('l_2.W\n', l_2.W)
print('l_2.W0\n', l_2.W0)
z_1 = l_1.forward(X)
print('z_1\n', z_1)
a_1 = f_1.forward(z_1)
print('a_1\n', a_1)
z_2 = l_2.forward(a_1)
print('z_2\n', z_2)
a_2 = f_2.forward(z_2)
print('a_2\n', a_2)
Ypred = a_2
loss = Loss.forward(Ypred, Y)
print('loss\n', loss)
dloss = Loss.backward()
print('dloss\n', dloss)
dL_dz2 = f_2.backward(dloss)
print('dL_dz2\n', dL_dz2)
dL_da1 = l_2.backward(dL_dz2)
print('dL_da1\n', dL_da1)
dL_dz1 = f_1.backward(dL_da1)
print('dL_dz1\n', dL_dz1)
dL_dX = l_1.backward(dL_dz1)
print('dL_dX\n', dL_dX)
l_1.sgd_step(lrate)
print('l_1.W\n', l_1.W)
print('l_1.W0\n', l_1.W0)
l_2.sgd_step(lrate)
print('l_2.W\n', l_2.W)
print('l_2.W0\n', l_2.W0)
######################################################################
# Desired output
######################################################################
'''
# sgd_test for Tanh activation and SoftMax output
# np.random.seed(0)
# sgd_test(Sequential([Linear(2,3), Tanh(), Linear(3,2), SoftMax()], NLL()))
X
[[ 2 3 9 12]
[ 5 2 6 5]]
Y
[[1 0 1 0]
[0 1 0 1]]
l_1.W
[[ 1.24737338 0.28295388 0.69207227]
[ 1.58455078 1.32056292 -0.69103982]]
l_1.W0
[[ 0.]
[ 0.]
[ 0.]]
l_2.W
[[ 0.5485338 -0.08738612]
[-0.05959343 0.23705916]
[ 0.08316359 0.8396252 ]]
l_2.W0
[[ 0.]
[ 0.]]
z_1
[[ 10.41750064 6.91122168 20.73366505 22.8912344 ]
[ 7.16872235 3.48998746 10.46996239 9.9982611 ]
[ -2.07105455 0.69413716 2.08241149 4.84966811]]
a_1
[[ 1. 0.99999801 1. 1. ]
[ 0.99999881 0.99814108 1. 1. ]
[-0.96871843 0.60063321 0.96941021 0.99987736]]
z_2
[[ 0.40837833 0.53900088 0.56956001 0.57209377]
[-0.66368766 0.65353931 0.96361427 0.98919526]]
a_2
[[ 0.74498961 0.47139666 0.4027417 0.39721055]
[ 0.25501039 0.52860334 0.5972583 0.60278945]]
loss
2.3475491206369514
dloss
[[-0.25501039 0.47139666 -0.5972583 0.39721055]
[ 0.25501039 -0.47139666 0.5972583 -0.39721055]]
dL_dz2
[[-0.25501039 0.47139666 -0.5972583 0.39721055]
[ 0.25501039 -0.47139666 0.5972583 -0.39721055]]
dL_da1
[[-0.16216619 0.29977053 -0.37980845 0.2525941 ]
[ 0.07564949 -0.13984104 0.17717822 -0.11783354]
[ 0.19290557 -0.35659347 0.45180297 -0.30047453]]
dL_dz1
[[ -5.80088442e-10 1.19079549e-06 -0.00000000e+00 0.00000000e+00]
[ 1.79552879e-07 -5.19424389e-04 5.70658808e-10 -9.74876621e-10]
[ 1.18800113e-02 -2.27948719e-01 2.72183509e-02 -7.36963862e-05]]
dL_dX
[[ 8.22187641e-03 -1.57902474e-01 1.88370660e-02 -5.10035008e-05]
[ -8.20932462e-03 1.56837595e-01 -1.88089635e-02 5.09258498e-05]]
l_1.W
[[ 1.24737336 0.28296167 0.69415229]
[ 1.58455077 1.32056811 -0.68987204]]
l_1.W0
[[ -5.95107701e-09]
[ 2.59622620e-06]
[ 9.44620265e-04]]
l_2.W
[[ 0.54845212 -0.08730444]
[-0.05967074 0.23713647]
[ 0.08142188 0.84136692]]
l_2.W0
[[ -8.16925787e-05]
[ 8.16925787e-05]]
'''
'''
# Compare to the results above
np.random.seed(0)
nn = Sequential([Linear(2, 3), Tanh(), Linear(3,2), SoftMax()], NLL())
# These should match the initial weights above
print('-----------------')
print(nn.modules[0].W, '\n', nn.modules[0].W0)
print(nn.modules[2].W, '\n', nn.modules[2].W0)
# Run one iteration
nn.sgd(X,Y, iters = 1, lrate=lrate)
# These should match the final weights above
print('-----------------')
print(nn.modules[0].W, '\n', nn.modules[0].W0)
print(nn.modules[2].W, '\n', nn.modules[2].W0)
'''
'''
# sgd_test for ReLU activation and SoftMax output
# np.random.seed(0)
# sgd_test(Sequential([Linear(2,3), ReLU(), Linear(3,2), SoftMax()], NLL()))
X
[[ 2 3 9 12]
[ 5 2 6 5]]
Y
[[1 0 1 0]
[0 1 0 1]]
l_1.W
[[ 1.24737338 0.28295388 0.69207227]
[ 1.58455078 1.32056292 -0.69103982]]
l_1.W0
[[ 0.]
[ 0.]
[ 0.]]
l_2.W
[[ 0.5485338 -0.08738612]
[-0.05959343 0.23705916]
[ 0.08316359 0.8396252 ]]
l_2.W0
[[ 0.]
[ 0.]]
z_1
[[ 10.41750064 6.91122168 20.73366505 22.8912344 ]
[ 7.16872235 3.48998746 10.46996239 9.9982611 ]
[ -2.07105455 0.69413716 2.08241149 4.84966811]]
a_1
[[ 10.41750064 6.91122168 20.73366505 22.8912344 ]
[ 7.16872235 3.48998746 10.46996239 9.9982611 ]
[ 0. 0.69413716 2.08241149 4.84966811]]
z_2
[[ 5.28714248 3.64078533 10.92235599 12.36410102]
[ 0.78906625 0.80620366 2.41861097 4.44170662]]
a_2
[[ 9.88992134e-01 9.44516196e-01 9.99797333e-01 9.99637598e-01]
[ 1.10078665e-02 5.54838042e-02 2.02666719e-04 3.62401857e-04]]
loss
10.8256925657554
dloss
[[ -1.10078665e-02 9.44516196e-01 -2.02666719e-04 9.99637598e-01]
[ 1.10078665e-02 -9.44516196e-01 2.02666719e-04 -9.99637598e-01]]
dL_dz2
[[ -1.10078665e-02 9.44516196e-01 -2.02666719e-04 9.99637598e-01]
[ 1.10078665e-02 -9.44516196e-01 2.02666719e-04 -9.99637598e-01]]
dL_da1
[[ -7.00012165e-03 6.00636672e-01 -1.28879806e-04 6.35689470e-01]
[ 3.26551207e-03 -2.80193173e-01 6.01216067e-05 -2.96545080e-01]
[ 8.32702834e-03 -7.14490239e-01 1.53309592e-04 -7.56187463e-01]]
dL_dz1
[[ -7.00012165e-03 6.00636672e-01 -1.28879806e-04 6.35689470e-01]
[ 3.26551207e-03 -2.80193173e-01 6.01216067e-05 -2.96545080e-01]
[ 0.00000000e+00 -7.14490239e-01 1.53309592e-04 -7.56187463e-01]]
dL_dX
[[ -7.80777608e-03 1.75457571e-01 -3.76482800e-05 1.85697170e-01]
[ -6.77973405e-03 1.07546779e+00 -2.30765264e-04 1.13823145e+00]]
l_1.W
[[ 1.20029826 0.30491412 0.74815397]
[ 1.56283104 1.33069504 -0.66499483]]
l_1.W0
[[-0.00614599]
[ 0.00286706]
[ 0.00735262]]
l_2.W
[[ 0.40207469 0.05907299]
[-0.1256432 0.30310892]
[ 0.05564803 0.86714076]]
l_2.W0
[[-0.00966472]
[ 0.00966472]]
'''
'''
X, Y = hard()
nn = Sequential([Linear(2, 10), ReLU(), Linear(10, 10), ReLU(), Linear(10,2), SoftMax()], NLL())
disp.classify(X, Y, nn, it=100000)
'''
#######
# Test cases
######
def nn_tanh_test():
np.random.seed(0)
nn = Sequential([Linear(2,3), Tanh(), Linear(3,2), SoftMax()], NLL())
X,Y = super_simple_separable()
nn.sgd(X,Y, iters = 1, lrate=0.005)
return [np.vstack([nn.modules[0].W, nn.modules[0].W0.T]).tolist(),
np.vstack([nn.modules[2].W, nn.modules[2].W0.T]).tolist()]
def nn_relu_test():
np.random.seed(0)
nn = Sequential([Linear(2,3), ReLU(), Linear(3,2), SoftMax()], NLL())
X,Y = super_simple_separable()
nn.sgd(X,Y, iters = 2, lrate=0.005)
return [np.vstack([nn.modules[0].W, nn.modules[0].W0.T]).tolist(),
np.vstack([nn.modules[2].W, nn.modules[2].W0.T]).tolist()]
def nn_pred_test():
np.random.seed(0)
nn = Sequential([Linear(2,3), ReLU(), Linear(3,2), SoftMax()], NLL())
X,Y = super_simple_separable()
nn.sgd(X,Y, iters = 1, lrate=0.005)
Ypred = nn.forward(X)
return nn.modules[-1].class_fun(Ypred).tolist(), [nn.loss.forward(Ypred, Y)]
|
import discord
import random
import configparser
config = configparser.ConfigParser()
def loadconfig():
config = configparser.ConfigParser()
config.read('config.sffc')
Version = config['Data']['version']
token = config['Data']['token']
alttoken = config['Data']['alttoken']
prefix = config['Data']['prefix']
currency = config['Data']['currency']
return Version, token, alttoken, prefix, currency;
def cserverconfig(sID, sCH, sMO):
config = configparser.ConfigParser()
config.add_section("Data")
config.add_section("Flags")
config['Data']['Version'] = '1'
config['Data']['Server ID'] = sID
config['Flags']['SuggestChannel'] = sCH
config['Data']['Moderators'] = sMO
with open(sID + '.sffs', 'w') as configfile:
config.write(configfile)
def lserverconfig():
config = configparser.ConfigParser(sID)
config.read(sID + '.sffs')
sID = config['Data']['Server ID']
sCH = config['Flags']['SuggestChannel']
sMO = config['Data']['Moderators']
return sID, sCH, sMO;
def cuserconfig(money, uID): #Actually just writes money
config = configparser.ConfigParser()
config.add_section("Metadata")
config.add_section("Economy")
config['Metadata']['UserID'] = uID
config['Metadata']['Version'] = '1'
config['Economy']['money'] = money
with open(uID + '.sffu', 'w') as configfile:
config.write(configfile)
def luserconfig(uID): #Actually just returns amount of money
config = configparser.ConfigParser()
config.read(uID + '.sffu')
Money = config['Economy']['money']
return Money
|
import functools
import gc
def clear_all_lru_caches():
gc.collect()
wrappers = [
a for a in gc.get_objects() if isinstance(a, functools._lru_cache_wrapper)
]
for wrapper in wrappers:
wrapper.cache_clear()
|
"""
/******************************************************************************
This source file is part of the Avogadro project.
Copyright 2016 Kitware, Inc.
This source code is released under the New BSD License, (the "License").
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/
"""
import argparse
import json
import sys
from cclib.io.ccio import ccopen
from cclib.io.cjsonwriter import CJSON
def getMetaData():
metaData = {}
metaData['inputFormat'] = 'cjson'
metaData['outputFormat'] = 'cjson'
metaData['operations'] = ['read']
metaData['identifier'] = 'CJSON writer'
metaData['name'] = 'CJSON'
metaData['description'] = "The cclib script provided by the cclib repository is used to " +\
"write the CJSON format using the input file provided " +\
"to Avogadro2."
metaData['fileExtensions'] = ['out', 'log', 'adfout', 'g09']
metaData['mimeTypes'] = ['']
return metaData
def read():
# Pass the standard input to ccopen:
log = ccopen(sys.stdin)
ccdata = log.parse()
output_obj = CJSON(ccdata, terse=True)
output = output_obj.generate_repr()
return output
if __name__ == "__main__":
parser = argparse.ArgumentParser('Read files using cclib')
parser.add_argument('--metadata', action='store_true')
parser.add_argument('--read', action='store_true')
parser.add_argument('--write', action='store_true')
parser.add_argument('--display-name', action='store_true')
parser.add_argument('--lang', nargs='?', default='en')
args = vars(parser.parse_args())
if args['metadata']:
print(json.dumps(getMetaData()))
elif args['display_name']:
print(getMetaData()['name'])
elif args['read']:
print(read())
elif args['write']:
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.