content
stringlengths 5
1.05M
|
|---|
from django.contrib.auth.models import BaseUserManager
class AccountManager(BaseUserManager):
"""
Custom user model manager where email is the unique identifiers
for authentication instead of usernames.
"""
def create_user(self, email, password, **extra_fields):
"""
Create and save a User with the given email, password.
"""
if not email:
raise ValueError('Users must have an email address!')
email = self.normalize_email(email)
user = self.model(email= email,**extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_staff(self, email, password, **extra_fields):
"""
Create and save a Staff User with the given email, password.
"""
user = self.create_user(email,password)
user.is_staff = True
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""
Create and save a SuperUser with the given email and password.
"""
user = self.create_user(email,password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
|
#! python3
# tests.py
import json
import os
import unittest
import selenium
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
class TestClassFanGraphs(unittest.TestCase):
def test_files_exist(self):
directories = {
'leaders': [
'menu.txt', 'dropdown.txt', 'checkbox.txt', 'button.txt']
}
for dirname in directories:
self.assertTrue(
os.path.exists(os.path.join('data', dirname)))
self.assertEqual(
set(os.listdir(os.path.join('data', dirname))),
set(directories[dirname]))
class TestFanGraphsLeadersSettings(unittest.TestCase):
def setUp(self):
with open(os.path.join('data', 'base_address.txt')) as file:
self.url = json.load(file).get("leaders")
options = Options()
options.headless = True
self.browser = webdriver.Firefox(options=options)
def tearDown(self):
self.browser.quit()
def test_leaders_address(self):
self.browser.get(self.url)
self.assertIn("Leaderboards", self.browser.title)
def test_find_data_configuration_elements(self):
self.browser.get(self.url)
files = ['menu.txt', 'dropdown.txt', 'checkbox.txt', 'button.txt']
for filename in files:
with open(os.path.join('data', 'leaders', filename)) as file:
data = json.load(file)
for select in data:
self.assertEqual(
len(self.browser.find_elements_by_id(data[select])),
1, data[select])
def test_find_export_data_elements(self):
self.browser.get(self.url)
export_data_button = self.browser.find_element_by_id(
"LeaderBoard1_cmdCSV")
self.assertEqual(export_data_button.text, "Export Data")
def test_find_popup_elements(self):
self.browser.get(self.url)
while True:
try:
close_popup_button = self.browser.find_element_by_css_selector(
"span[class='ezmob-footer-close']")
break
except selenium.common.exceptions.NoSuchElementException:
self.browser.refresh()
continue
self.assertEqual(close_popup_button.text, "x")
popup = self.browser.find_element_by_id("ezmobfooter")
self.assertEqual(popup.get_attribute("style"), "")
close_popup_button.click()
self.assertNotEqual(popup.get_attribute("style"), "")
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""Tests for `statemachine` processors."""
import os
import cv2
from gabrieltool.statemachine import processor_zoo
def drawPred(frame, class_name, conf, left, top, right, bottom):
# Draw a bounding box.
cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0))
label = '%.2f' % conf
label = '%s: %s' % (class_name, label)
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, labelSize[1])
cv2.rectangle(frame, (left, top - labelSize[1]), (left + labelSize[0], top + baseLine), (255, 255, 255), cv2.FILLED)
cv2.putText(frame, label, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
return frame
def test_FasterRCNNOpenCVProcessor():
"""Test FasterRCNNOpenCV Processor.
Needs the data directory.
"""
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/sandwich-model')
labels = ["tomato", "cheese", "full", "ham", "lettuce", "cucumber", "half", "hamwrong", "bread"]
if os.path.exists(data_dir) and os.path.isdir(data_dir):
proc = processor_zoo.FasterRCNNOpenCVCallable(
proto_path=os.path.join(data_dir, 'faster_rcnn_test.pt'),
model_path=os.path.join(data_dir, 'model.caffemodel'),
labels=labels
)
im = cv2.imread(os.path.join(data_dir, 'test.jpg'))
app_state = proc(im)
for (cls_name, objects) in app_state.items():
for (left, top, right, bottom, confidence, classId) in objects:
drawPred(im, cls_name, confidence, left, top, right, bottom)
cv2.imwrite('tested.jpg', im)
assert("ham" in app_state)
|
import traceback
from flask import Flask, request, jsonify
from simulator import config, utils
from simulator.exchange import Poloniex
from simulator.order_handler import CoreOrder, SimulationOrder
from simulator.balance_handler import BalanceHandler
api = Flask(__name__)
logger = utils.get_logger()
@api.route('/public')
def public():
params = request.args.to_dict()
command = params.get('command')
if command == 'returnOrderBook':
params['timestamp'] = utils.get_timestamp()
try:
ob = poloniex.order_book_api(**params)
return jsonify(ob)
except Exception as e:
logger.error(e)
return jsonify({
'error': str(e)
})
else:
return jsonify({
'error': 'command {} is not supported'.format(command)
})
@api.route('/tradingApi')
def trading_api():
try:
api_key = request.headers.get('Key')
if not api_key:
raise ValueError('Key is missing ')
params = request.args.to_dict()
params['api_key'] = api_key
command = params.get('command')
params['timestamp'] = utils.get_timestamp(request.args.to_dict())
if command == 'returnBalances':
output = poloniex.get_balance_api(**params)
elif command == 'returnDepositsWithdrawals':
output = poloniex.get_history_api(**params)
elif command == 'returnOpenOrders':
output = poloniex.get_open_orders_api(**params)
elif command == 'sell' or command == 'buy':
params['type'] = command
output = poloniex.trade_api(**params)
elif command == 'cancelOrder':
output = poloniex.cancel_order_api(**params)
elif command == 'withdraw':
output = poloniex.withdraw_api(**params)
else:
return jsonify({
'error': 'command {} is not supported'.format(command)
})
return jsonify(output)
except Exception as e:
# traceback.print_exc()
logger.error(e)
return jsonify({'error': str(e)})
rdb = utils.get_redis_db()
if config.MODE == 'simulation':
order_handler = SimulationOrder(rdb)
else:
order_handler = CoreOrder()
supported_tokens = config.SUPPORTED_TOKENS
balance_handler = BalanceHandler(rdb, supported_tokens.keys())
poloniex = Poloniex(
'liqui',
config.PRIVATE_KEY['poloniex'],
list(supported_tokens.values()),
rdb,
order_handler,
balance_handler,
config.POLONIEX_ADDRESS
)
if __name__ == '__main__':
logger.info('Running in {} mode'.format(config.MODE))
api.run(host='0.0.0.0', port=5500, debug=True)
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder.api.contrib import used_limits
from cinder.api.openstack import wsgi
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
class FakeRequest(object):
def __init__(self, context):
self.environ = {'cinder.context': context}
class UsedLimitsTestCase(test.TestCase):
def setUp(self):
"""Run before each test."""
super(UsedLimitsTestCase, self).setUp()
self.controller = used_limits.UsedLimitsController()
@mock.patch('cinder.quota.QUOTAS.get_project_quotas')
@mock.patch('cinder.policy.enforce')
def test_used_limits(self, _mock_policy_enforce, _mock_get_project_quotas):
fake_req = FakeRequest(fakes.FakeRequestContext('fake', 'fake'))
obj = {
"limits": {
"rate": [],
"absolute": {},
},
}
res = wsgi.ResponseObject(obj)
quota_map = {
'totalVolumesUsed': 'volumes',
'totalGigabytesUsed': 'gigabytes',
'totalSnapshotsUsed': 'snapshots',
}
limits = {}
for display_name, q in quota_map.items():
limits[q] = {'limit': 2,
'in_use': 1}
_mock_get_project_quotas.return_value = limits
# allow user to access used limits
_mock_policy_enforce.return_value = None
self.controller.index(fake_req, res)
abs_limits = res.obj['limits']['absolute']
for used_limit, value in abs_limits.items():
self.assertEqual(value,
limits[quota_map[used_limit]]['in_use'])
obj = {
"limits": {
"rate": [],
"absolute": {},
},
}
res = wsgi.ResponseObject(obj)
# unallow user to access used limits
_mock_policy_enforce.side_effect = exception.NotAuthorized
self.controller.index(fake_req, res)
abs_limits = res.obj['limits']['absolute']
self.assertNotIn('totalVolumesUsed', abs_limits)
self.assertNotIn('totalGigabytesUsed', abs_limits)
self.assertNotIn('totalSnapshotsUsed', abs_limits)
|
# Generated by Django 2.2.8 on 2020-06-21 19:30
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('donations', '0016_auto_20200621_1609'),
]
operations = [
migrations.AddField(
model_name='donationrequest',
name='allowed_blood_groups',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='donationrequest',
name='created',
field=models.DateTimeField(default=datetime.datetime(2020, 6, 21, 19, 30, 16, 241260, tzinfo=utc), max_length=255),
),
]
|
# Generated by Django 2.0.3 on 2018-03-21 18:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patientbasicinfo', '0012_auto_20180321_1827'),
]
operations = [
migrations.AddField(
model_name='identity',
name='email',
field=models.EmailField(blank=True, db_index=True, max_length=254),
),
]
|
def calculate_amstrong_numbers_3_digits():
result = []
for item in range(100, 1000):
sum = 0
for number in str(item):
sum += int(number) ** 3
if sum == item:
result.append(item)
return result
print("3-digit Amstrong Numbers:")
print(calculate_amstrong_numbers_3_digits())
|
#N 個の街と N−1 本の道路なのでループはない。木構造
#根への距離をそれぞれ求めて、和が2で割り切れるかで判断できそう
from collections import defaultdict
from collections import deque
N, Q = map(int, input().split())
connect_nodes = defaultdict(set)
for i in range(N - 1):
a, b = map(int, input().split())
connect_nodes[a].add(b)
connect_nodes[b].add(a)
dist = [-1 for _ in range(N + 1)] #1からの距離。到達不可なら-1
is_visited = [False for _ in range(N + 1)] #訪問フラグ
#function 幅優先探索(v)
search_deque = deque() #Q ← 空のキュー
is_visited[1] = True #v に訪問済みの印を付ける
search_deque.append(1) #v を Q に追加
dist[1] = 0
while len(search_deque) != 0: #while Q が空ではない do
node = search_deque.popleft() # v ← Q から取り出す
# v を処理する
for connect_node in connect_nodes[node]: # for each v に接続している頂点 i do
if is_visited[connect_node] == False: # if i が未訪問 then
is_visited[connect_node] = True # iに訪問済みの印を付ける
search_deque.append(connect_node) # i を Q に追加
dist[connect_node] = dist[
node] + 1 #(左記は距離が全点間1固定の時。異なる場合は読み込み部分と共に変更すること)
for i in range(Q):
c, d = map(int, input().split())
if (dist[c] + dist[d]) % 2 == 1:
print("Road")
else:
print("Town")
|
# coding: utf-8
# # Permute Hetnets for Interpreting Compressed Latent Spaces
#
# Modified from @dhimmel - https://github.com/dhimmel/integrate/blob/master/permute.ipynb
#
# Generate several randomly permuted hetnets to serve as a null distribution. The permutations preserve node degree but randomizes connections between nodes. See [Himmelstein et al. 2017](https://doi.org/10.7554/eLife.26726) for more details.
# In[1]:
import os
import pandas as pd
import hetio.readwrite
import hetio.permute
# In[2]:
get_ipython().run_cell_magic('time', '', "hetnet_path = os.path.join('hetnets', 'interpret_hetnet.json.bz2')\ngraph = hetio.readwrite.read_graph(hetnet_path)")
# In[3]:
# Selected as a result of `scripts/evaluate-permutations.ipynb`
num_permuted_hetnets = 10
num_swaps = 4
# In[4]:
get_ipython().run_cell_magic('time', '', "stat_dfs = list()\npermuted_graph = graph\n \nfor i in range(num_permuted_hetnets):\n i += 1\n print('Starting permutation', i)\n permuted_graph, stats = hetio.permute.permute_graph(permuted_graph,\n multiplier=num_swaps,\n seed=i)\n stat_df = pd.DataFrame(stats)\n stat_df['permutation'] = i\n stat_dfs.append(stat_df)\n perm_path = os.path.join('hetnets', 'permuted',\n 'interpret_hetnet_perm-{}.json.bz2'.format(i))\n hetio.readwrite.write_graph(permuted_graph, perm_path)")
# In[5]:
# Save stats
stat_df = pd.concat(stat_dfs)
stat_path = os.path.join('hetnets', 'permuted', 'stats.tsv')
stat_df.to_csv(stat_path, sep='\t', index=False, float_format='%.5g')
|
#!/usr/bin/env python3
# Converts PLINK covariate and fam file into a covariate file for Gemma
import sys
import pandas as pd
import argparse
import numpy as np
def readbed(filebed, wind):
readbed=open(filebed)
dicpos={}
dicrange={}
for line in readbed:
spll=line.replace('\n','').split()
if spll[0] not in dicpos :
dicpos[spll[0]]=[]
dicrange[spll[0]]=[]
bp=int(spll[1])
dicpos[spll[0]].append(bp)
dicrange[spll[0]].append([max(0,bp-wind),bp+wind,bp])
return (dicpos, dicrange)
# extract_posgwas.py --bed $pos --gwas $gwas --chr_gwas ${params.head_chr} --ps_gwas ${params.head_bp} --a1_gwas ${params.head_a1} --a2_gwas ${params.head_a2} --wind ${params.size_win_kb} --pval_gwas ${params.head_pval} --rs_gwas ${params.head_rs}
def parseArguments():
parser = argparse.ArgumentParser(description='fill in missing bim values')
parser.add_argument('--gwas',type=str,required=True)
parser.add_argument('--bed',type=str,required=False)
parser.add_argument('--chr_gwas', type=str,help="comma separated list of covariates",default="")
parser.add_argument('--ps_gwas', type=str,help="comma separated list of covariates",default="")
parser.add_argument('--a1_gwas',type=str,required=True,help="comma separated list of pheno column")
parser.add_argument('--a2_gwas', type=str,required=True,help="output fam file")
parser.add_argument('--wind', type=float,required=True,help="output covariate file")
parser.add_argument('--rs_gwas', required=True,type=str,help="output covariate file")
parser.add_argument('--af_gwas', type=str,help="output covariate file")
parser.add_argument('--pval_gwas', type=str,help="output covariate file")
parser.add_argument('--out', type=str,help="output covariate file")
args = parser.parse_args()
return args
def checkpos(chro, pos, infopos):
if chro in infopos and int(pos) in infopos[chro]:
return True
return False
def checkrange(chro, pos, inforange):
if chro in inforange :
for rangei in inforange[chro]:
if pos>=rangei[0] and pos <rangei[1]:
return True
return False
args=parseArguments()
(infopos,inforange)=readbed(args.bed, (args.wind+10)*1000)
read_gwas=open(args.gwas)
gwashead=read_gwas.readline().replace('\n','')
gwasspl=gwashead.split()
chrgwas=gwasspl.index(args.chr_gwas)
posgwas=gwasspl.index(args.ps_gwas)
rsgwas=gwasspl.index(args.rs_gwas)
a1gwas=gwasspl.index(args.a1_gwas)
a2gwas=gwasspl.index(args.a2_gwas)
pvalgwas=gwasspl.index(args.pval_gwas)
#afgwas=gwasspl.index(args.a2_gwas)
if args.af_gwas :
headmaf="FRQ"
headmafpos=gwasspl.index(args.af_gwas)
ListParam=[chrgwas,rsgwas, posgwas, a1gwas, a2gwas,headmafpos,pvalgwas]
ListHeadPlk=["CHR","SNP", "BP", "A1", "A2", headmafchr,"P"]
else :
ListParam=[chrgwas,rsgwas, posgwas, a1gwas, a2gwas,pvalgwas]
ListHeadPlk=["CHR","SNP", "BP", "A1", "A2", "P"]
writepos=open(args.out+'_pos.init','w')
writerange=open(args.out+'_range.init','w')
writepos_plk=open(args.out+'_pos.assoc','w')
writerange_plk=open(args.out+'_range.assoc','w')
writerange_bed=open(args.out+'_range.bed','w')
#writeall_plk=open(args.out+'_all.assoc','w')
writepos.write(gwashead+'\n')
writerange.write(gwashead+'\n')
#dataformatplk<-allinfo[,c('chro','rs_bim', 'bp','A1', 'A2', 'risk.allele.af', 'beta.cat', 'sd.cat', 'pvalue')]
#names(dataformatplk)<-c("CHR", "SNP", "BP","A1", "A2", "FRQ", "BETA", "SE", "P")
writepos_plk.write("\t".join(ListHeadPlk)+'\n')
writerange_plk.write("\t".join(ListHeadPlk)+'\n')
#writeall_plk.write("\t".join(ListHeadPlk)+'\n')
for line in read_gwas :
line=line.replace('\n','')
spl=line.replace('\n','').split()
chro=spl[chrgwas]
pos=int(spl[posgwas])
plkchar="\t".join([spl[x] for x in ListParam])
#writeall_plk.write(plkchar)
if checkrange(chro, pos, inforange) :
writerange.write(line+'\n')
writerange_plk.write(plkchar+'\n')
writerange_bed.write(chro+"\t"+str(pos)+"\t"+str(pos)+'\n')
if checkpos(chro, pos, infopos):
writepos.write(line+'\n')
writepos_plk.write(plkchar+'\n')
writepos.close()
writerange.close()
writepos_plk.close()
writerange_plk.close()
#writeall_plk.close()
|
class ResizingArrayStack(object):
"""
"""
def __init__(self):
self.n = 0
self.capacity = 2
self.resizing_array = [None] * self.capacity
def __len__(self):
return self.n
def __contains__(self, i):
"""
>>> stack = ResizingArrayStack()
>>> stack.push('a')
>>> stack.push('b')
>>> 'a' in stack
True
>>> 'b' in stack
True
>>> 'c' in stack
False
"""
for j in self:
if i == j:
return True
return False
def __iter__(self):
"""
>>> stack = ResizingArrayStack()
>>> stack.push('a')
>>> stack.push('b')
>>> for i in stack:
... print(i)
...
b
a
"""
n = self.n
while n > 0:
n -= 1
yield self.resizing_array[n]
def __str__(self):
"""
>>> stack = ResizingArrayStack()
>>> stack.push('a')
>>> stack.push('b')
>>> stack
ResizingArrayStack(['b', 'a'])
>>> print(stack)
ResizingArrayStack(['b', 'a'])
"""
return 'ResizingArrayStack([{}])'.format(', '.join(repr(i) for i in self))
__repr__ = __str__
def push(self, item):
"""
>>> stack = ResizingArrayStack()
>>> stack.push('a')
>>> stack.push('b')
>>> len(stack)
2
>>> stack.capacity
2
>>> stack.n
2
>>> stack.push('c')
>>> stack.push('d')
>>> len(stack)
4
>>> stack.capacity
4
>>> stack.push('e')
>>> stack.push('f')
>>> len(stack)
6
>>> stack.capacity
8
>>> stack.push('g')
>>> stack.push('h')
>>> len(stack)
8
>>> stack.capacity
8
"""
if self.n == self.capacity:
self._resize(self.capacity*2)
self.resizing_array[self.n] = item
self.n += 1
def pop(self):
"""
>>> stack = ResizingArrayStack()
>>> stack.push('a')
>>> stack.push('b')
>>> stack.push('c')
>>> stack.push('d')
>>> stack.push('e')
>>> stack.push('f')
>>> stack.push('g')
>>> stack.push('h')
>>> len(stack)
8
>>> stack.pop()
'h'
>>> stack.pop()
'g'
>>> stack.pop()
'f'
>>> stack.pop()
'e'
>>> stack.pop()
'd'
>>> stack.pop()
'c'
>>> len(stack)
2
>>> stack.capacity
8
>>> stack.pop()
'b'
>>> len(stack)
1
>>> stack.capacity
4
>>> stack.pop()
'a'
>>> len(stack)
0
>>> stack.capacity
2
"""
if len(self) == 0:
raise IndexError('pop from empty stack')
if len(self) * 4 <= self.capacity:
m = int(self.capacity / 2)
self._resize(m)
self.n -= 1
return self.resizing_array[self.n]
@property
def top(self):
"""
>>> stack = ResizingArrayStack()
>>> stack.push('a')
>>> stack.top
'a'
>>> stack.push('b')
>>> stack.top
'b'
"""
if len(self) == 0:
raise IndexError('pop from empty stack')
return self.resizing_array[self.n-1]
def _resize(self, m):
"""
>>> stack = ResizingArrayStack()
>>> stack.push('a')
>>> stack.push('b')
>>> stack._resize(6)
>>> stack.resizing_array
['a', 'b', None, None, None, None]
>>> stack.capacity
6
>>> len(stack)
2
>>> stack
ResizingArrayStack(['b', 'a'])
"""
resizing_array = [None] * m
for i in range(self.n):
resizing_array[i] = self.resizing_array[i]
self.resizing_array = resizing_array
self.capacity = m
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 1 16:54:46 2021
@author: vanch
"""
import argparse
import FileToDataFrame as ftd
import matplotlib.pyplot as plt
def getTopMsgs(people_name, data_frame):
top_list = []
col_map = plt.get_cmap('Paired')
for name in people_name:
tmp_df_by_name = data_frame.loc[data_frame.Name == name]
index = tmp_df_by_name.index
number_of_rows = len(index)
top_list.append([name, number_of_rows])
ordered_list = sorted(top_list,key=lambda l:l[1], reverse=True)
names_ordered, msgs = zip(*ordered_list)
plt.figure(figsize=[14, 7])
plt.bar(names_ordered, msgs, width=0.8, color=col_map.colors, edgecolor='k',
linewidth=2)
plt.savefig('Top_Bars.png')
print(*ordered_list, sep = "\n")
def plotMsgsPerDayPerPerson(people_name, data_frame):
first = True
plt.figure()
for name in people_name:
data_frame[name] = data_frame['Name'].eq(name).cumsum()
if first:
plt.figure();
msg_cnt_x = data_frame[name].plot()
first = False
else:
data_frame[name].plot(ax=msg_cnt_x)
msg_cnt_x.legend(people_name);
fig = msg_cnt_x.get_figure()
fig.savefig('msgCountPerPerson.png')
def plotMsgsPerDay(data_frame):
plt.figure()
messages_per_day = data.groupby([data.index.year,data.index.month,data.index.day]).agg('count')
messages_per_day.Msg.plot()
def plotTotalMsgs(people_name, data_frame):
plt.figure()
data_frame["Total"] = 0
for name in people_name:
data_frame["Total"] += data_frame[name]
total_data_plot = data['Total'].plot()
total_data_plot.legend("Total");
fig = total_data_plot.get_figure()
fig.savefig('TotalMngs.png')
def plotBarMsgsByMonth(data_frame):
plt.figure();
messages_per_month = data.groupby([data.index.year,data.index.month]).agg('count')
monthly_plot = messages_per_month.Name.plot.bar()
monthly_plot.legend("Grouped");
fig2 = monthly_plot.get_figure()
fig2.savefig('Monthly.png')
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filename', default='')
args = parser.parse_args()
print (args.filename)
#df = ftd.FileToDataFrame(args.filename)
df = ftd.FileToDataFrame("../../samples/Gunner.txt")
data = df.Df
data.set_index('Date', inplace =True)
#Totals per person
people_name = data.Name.unique()
getTopMsgs(people_name, data)
plotMsgsPerDayPerPerson(people_name, data)
#Total Mesages per day
plotMsgsPerDay(data)
plotBarMsgsByMonth(data)
plotTotalMsgs(people_name, data)
from collections import Counter
mostCommonWords= Counter(" ".join(data["Msg"]).split()).most_common(100)
mostCommonCh = Counter(" ".join(data["Msg"])).most_common(10000)
|
#
# PySNMP MIB module IF-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///usr/share/snmp/mibs/ietf/IF-MIB
# Produced by pysmi-0.3.2 at Tue Apr 7 17:35:33 2020
# On host sensei platform Linux version 4.19.97-v7l+ by user nagios
# Using Python version 3.7.3 (default, Dec 20 2019, 18:57:59)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
IANAifType, = mibBuilder.importSymbols("IANAifType-MIB", "IANAifType")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
snmpTraps, = mibBuilder.importSymbols("SNMPv2-MIB", "snmpTraps")
Counter64, IpAddress, TimeTicks, MibIdentifier, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, NotificationType, ObjectIdentity, mib_2, ModuleIdentity, Unsigned32, Integer32, Bits, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "IpAddress", "TimeTicks", "MibIdentifier", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "NotificationType", "ObjectIdentity", "mib-2", "ModuleIdentity", "Unsigned32", "Integer32", "Bits", "iso")
TextualConvention, TruthValue, AutonomousType, PhysAddress, TestAndIncr, DisplayString, TimeStamp, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "TruthValue", "AutonomousType", "PhysAddress", "TestAndIncr", "DisplayString", "TimeStamp", "RowStatus")
ifMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 31))
ifMIB.setRevisions(('2000-06-14 00:00', '1996-02-28 21:55', '1993-11-08 21:55',))
if mibBuilder.loadTexts: ifMIB.setLastUpdated('200006140000Z')
if mibBuilder.loadTexts: ifMIB.setOrganization('IETF Interfaces MIB Working Group')
ifMIBObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 31, 1))
interfaces = MibIdentifier((1, 3, 6, 1, 2, 1, 2))
class OwnerString(TextualConvention, OctetString):
status = 'deprecated'
displayHint = '255a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 255)
class InterfaceIndex(TextualConvention, Integer32):
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 2147483647)
class InterfaceIndexOrZero(TextualConvention, Integer32):
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
ifNumber = MibScalar((1, 3, 6, 1, 2, 1, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifNumber.setStatus('current')
ifTableLastChange = MibScalar((1, 3, 6, 1, 2, 1, 31, 1, 5), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifTableLastChange.setStatus('current')
ifTable = MibTable((1, 3, 6, 1, 2, 1, 2, 2), )
if mibBuilder.loadTexts: ifTable.setStatus('current')
ifEntry = MibTableRow((1, 3, 6, 1, 2, 1, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ifEntry.setStatus('current')
ifIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifIndex.setStatus('current')
ifDescr = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifDescr.setStatus('current')
ifType = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 3), IANAifType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifType.setStatus('current')
ifMtu = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifMtu.setStatus('current')
ifSpeed = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifSpeed.setStatus('current')
ifPhysAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 6), PhysAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifPhysAddress.setStatus('current')
ifAdminStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ifAdminStatus.setStatus('current')
ifOperStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3), ("unknown", 4), ("dormant", 5), ("notPresent", 6), ("lowerLayerDown", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifOperStatus.setStatus('current')
ifLastChange = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 9), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifLastChange.setStatus('current')
ifInOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifInOctets.setStatus('current')
ifInUcastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifInUcastPkts.setStatus('current')
ifInNUcastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifInNUcastPkts.setStatus('deprecated')
ifInDiscards = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifInDiscards.setStatus('current')
ifInErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifInErrors.setStatus('current')
ifInUnknownProtos = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifInUnknownProtos.setStatus('current')
ifOutOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifOutOctets.setStatus('current')
ifOutUcastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifOutUcastPkts.setStatus('current')
ifOutNUcastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifOutNUcastPkts.setStatus('deprecated')
ifOutDiscards = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifOutDiscards.setStatus('current')
ifOutErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifOutErrors.setStatus('current')
ifOutQLen = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 21), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifOutQLen.setStatus('deprecated')
ifSpecific = MibTableColumn((1, 3, 6, 1, 2, 1, 2, 2, 1, 22), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifSpecific.setStatus('deprecated')
ifXTable = MibTable((1, 3, 6, 1, 2, 1, 31, 1, 1), )
if mibBuilder.loadTexts: ifXTable.setStatus('current')
ifXEntry = MibTableRow((1, 3, 6, 1, 2, 1, 31, 1, 1, 1), )
ifEntry.registerAugmentions(("IF-MIB", "ifXEntry"))
ifXEntry.setIndexNames(*ifEntry.getIndexNames())
if mibBuilder.loadTexts: ifXEntry.setStatus('current')
ifName = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifName.setStatus('current')
ifInMulticastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifInMulticastPkts.setStatus('current')
ifInBroadcastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifInBroadcastPkts.setStatus('current')
ifOutMulticastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifOutMulticastPkts.setStatus('current')
ifOutBroadcastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifOutBroadcastPkts.setStatus('current')
ifHCInOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifHCInOctets.setStatus('current')
ifHCInUcastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifHCInUcastPkts.setStatus('current')
ifHCInMulticastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifHCInMulticastPkts.setStatus('current')
ifHCInBroadcastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifHCInBroadcastPkts.setStatus('current')
ifHCOutOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifHCOutOctets.setStatus('current')
ifHCOutUcastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifHCOutUcastPkts.setStatus('current')
ifHCOutMulticastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifHCOutMulticastPkts.setStatus('current')
ifHCOutBroadcastPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifHCOutBroadcastPkts.setStatus('current')
ifLinkUpDownTrapEnable = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ifLinkUpDownTrapEnable.setStatus('current')
ifHighSpeed = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifHighSpeed.setStatus('current')
ifPromiscuousMode = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 16), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ifPromiscuousMode.setStatus('current')
ifConnectorPresent = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 17), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifConnectorPresent.setStatus('current')
ifAlias = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 18), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ifAlias.setStatus('current')
ifCounterDiscontinuityTime = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 19), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifCounterDiscontinuityTime.setStatus('current')
ifStackTable = MibTable((1, 3, 6, 1, 2, 1, 31, 1, 2), )
if mibBuilder.loadTexts: ifStackTable.setStatus('current')
ifStackEntry = MibTableRow((1, 3, 6, 1, 2, 1, 31, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifStackHigherLayer"), (0, "IF-MIB", "ifStackLowerLayer"))
if mibBuilder.loadTexts: ifStackEntry.setStatus('current')
ifStackHigherLayer = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 2, 1, 1), InterfaceIndexOrZero())
if mibBuilder.loadTexts: ifStackHigherLayer.setStatus('current')
ifStackLowerLayer = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 2, 1, 2), InterfaceIndexOrZero())
if mibBuilder.loadTexts: ifStackLowerLayer.setStatus('current')
ifStackStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ifStackStatus.setStatus('current')
ifStackLastChange = MibScalar((1, 3, 6, 1, 2, 1, 31, 1, 6), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifStackLastChange.setStatus('current')
ifRcvAddressTable = MibTable((1, 3, 6, 1, 2, 1, 31, 1, 4), )
if mibBuilder.loadTexts: ifRcvAddressTable.setStatus('current')
ifRcvAddressEntry = MibTableRow((1, 3, 6, 1, 2, 1, 31, 1, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "IF-MIB", "ifRcvAddressAddress"))
if mibBuilder.loadTexts: ifRcvAddressEntry.setStatus('current')
ifRcvAddressAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 4, 1, 1), PhysAddress())
if mibBuilder.loadTexts: ifRcvAddressAddress.setStatus('current')
ifRcvAddressStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 4, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ifRcvAddressStatus.setStatus('current')
ifRcvAddressType = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("volatile", 2), ("nonVolatile", 3))).clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ifRcvAddressType.setStatus('current')
linkDown = NotificationType((1, 3, 6, 1, 6, 3, 1, 1, 5, 3)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifAdminStatus"), ("IF-MIB", "ifOperStatus"))
if mibBuilder.loadTexts: linkDown.setStatus('current')
linkUp = NotificationType((1, 3, 6, 1, 6, 3, 1, 1, 5, 4)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifAdminStatus"), ("IF-MIB", "ifOperStatus"))
if mibBuilder.loadTexts: linkUp.setStatus('current')
ifConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 31, 2))
ifGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 31, 2, 1))
ifCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 31, 2, 2))
ifCompliance3 = ModuleCompliance((1, 3, 6, 1, 2, 1, 31, 2, 2, 3)).setObjects(("IF-MIB", "ifGeneralInformationGroup"), ("IF-MIB", "linkUpDownNotificationsGroup"), ("IF-MIB", "ifFixedLengthGroup"), ("IF-MIB", "ifHCFixedLengthGroup"), ("IF-MIB", "ifPacketGroup"), ("IF-MIB", "ifHCPacketGroup"), ("IF-MIB", "ifVHCPacketGroup"), ("IF-MIB", "ifCounterDiscontinuityGroup"), ("IF-MIB", "ifRcvAddressGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifCompliance3 = ifCompliance3.setStatus('current')
ifGeneralInformationGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 10)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"), ("IF-MIB", "ifType"), ("IF-MIB", "ifSpeed"), ("IF-MIB", "ifPhysAddress"), ("IF-MIB", "ifAdminStatus"), ("IF-MIB", "ifOperStatus"), ("IF-MIB", "ifLastChange"), ("IF-MIB", "ifLinkUpDownTrapEnable"), ("IF-MIB", "ifConnectorPresent"), ("IF-MIB", "ifHighSpeed"), ("IF-MIB", "ifName"), ("IF-MIB", "ifNumber"), ("IF-MIB", "ifAlias"), ("IF-MIB", "ifTableLastChange"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifGeneralInformationGroup = ifGeneralInformationGroup.setStatus('current')
ifFixedLengthGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 2)).setObjects(("IF-MIB", "ifInOctets"), ("IF-MIB", "ifOutOctets"), ("IF-MIB", "ifInUnknownProtos"), ("IF-MIB", "ifInErrors"), ("IF-MIB", "ifOutErrors"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifFixedLengthGroup = ifFixedLengthGroup.setStatus('current')
ifHCFixedLengthGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 3)).setObjects(("IF-MIB", "ifHCInOctets"), ("IF-MIB", "ifHCOutOctets"), ("IF-MIB", "ifInOctets"), ("IF-MIB", "ifOutOctets"), ("IF-MIB", "ifInUnknownProtos"), ("IF-MIB", "ifInErrors"), ("IF-MIB", "ifOutErrors"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifHCFixedLengthGroup = ifHCFixedLengthGroup.setStatus('current')
ifPacketGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 4)).setObjects(("IF-MIB", "ifInOctets"), ("IF-MIB", "ifOutOctets"), ("IF-MIB", "ifInUnknownProtos"), ("IF-MIB", "ifInErrors"), ("IF-MIB", "ifOutErrors"), ("IF-MIB", "ifMtu"), ("IF-MIB", "ifInUcastPkts"), ("IF-MIB", "ifInMulticastPkts"), ("IF-MIB", "ifInBroadcastPkts"), ("IF-MIB", "ifInDiscards"), ("IF-MIB", "ifOutUcastPkts"), ("IF-MIB", "ifOutMulticastPkts"), ("IF-MIB", "ifOutBroadcastPkts"), ("IF-MIB", "ifOutDiscards"), ("IF-MIB", "ifPromiscuousMode"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifPacketGroup = ifPacketGroup.setStatus('current')
ifHCPacketGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 5)).setObjects(("IF-MIB", "ifHCInOctets"), ("IF-MIB", "ifHCOutOctets"), ("IF-MIB", "ifInOctets"), ("IF-MIB", "ifOutOctets"), ("IF-MIB", "ifInUnknownProtos"), ("IF-MIB", "ifInErrors"), ("IF-MIB", "ifOutErrors"), ("IF-MIB", "ifMtu"), ("IF-MIB", "ifInUcastPkts"), ("IF-MIB", "ifInMulticastPkts"), ("IF-MIB", "ifInBroadcastPkts"), ("IF-MIB", "ifInDiscards"), ("IF-MIB", "ifOutUcastPkts"), ("IF-MIB", "ifOutMulticastPkts"), ("IF-MIB", "ifOutBroadcastPkts"), ("IF-MIB", "ifOutDiscards"), ("IF-MIB", "ifPromiscuousMode"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifHCPacketGroup = ifHCPacketGroup.setStatus('current')
ifVHCPacketGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 6)).setObjects(("IF-MIB", "ifHCInUcastPkts"), ("IF-MIB", "ifHCInMulticastPkts"), ("IF-MIB", "ifHCInBroadcastPkts"), ("IF-MIB", "ifHCOutUcastPkts"), ("IF-MIB", "ifHCOutMulticastPkts"), ("IF-MIB", "ifHCOutBroadcastPkts"), ("IF-MIB", "ifHCInOctets"), ("IF-MIB", "ifHCOutOctets"), ("IF-MIB", "ifInOctets"), ("IF-MIB", "ifOutOctets"), ("IF-MIB", "ifInUnknownProtos"), ("IF-MIB", "ifInErrors"), ("IF-MIB", "ifOutErrors"), ("IF-MIB", "ifMtu"), ("IF-MIB", "ifInUcastPkts"), ("IF-MIB", "ifInMulticastPkts"), ("IF-MIB", "ifInBroadcastPkts"), ("IF-MIB", "ifInDiscards"), ("IF-MIB", "ifOutUcastPkts"), ("IF-MIB", "ifOutMulticastPkts"), ("IF-MIB", "ifOutBroadcastPkts"), ("IF-MIB", "ifOutDiscards"), ("IF-MIB", "ifPromiscuousMode"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifVHCPacketGroup = ifVHCPacketGroup.setStatus('current')
ifRcvAddressGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 7)).setObjects(("IF-MIB", "ifRcvAddressStatus"), ("IF-MIB", "ifRcvAddressType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifRcvAddressGroup = ifRcvAddressGroup.setStatus('current')
ifStackGroup2 = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 11)).setObjects(("IF-MIB", "ifStackStatus"), ("IF-MIB", "ifStackLastChange"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifStackGroup2 = ifStackGroup2.setStatus('current')
ifCounterDiscontinuityGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 13)).setObjects(("IF-MIB", "ifCounterDiscontinuityTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifCounterDiscontinuityGroup = ifCounterDiscontinuityGroup.setStatus('current')
linkUpDownNotificationsGroup = NotificationGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 14)).setObjects(("IF-MIB", "linkUp"), ("IF-MIB", "linkDown"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
linkUpDownNotificationsGroup = linkUpDownNotificationsGroup.setStatus('current')
ifTestTable = MibTable((1, 3, 6, 1, 2, 1, 31, 1, 3), )
if mibBuilder.loadTexts: ifTestTable.setStatus('deprecated')
ifTestEntry = MibTableRow((1, 3, 6, 1, 2, 1, 31, 1, 3, 1), )
ifEntry.registerAugmentions(("IF-MIB", "ifTestEntry"))
ifTestEntry.setIndexNames(*ifEntry.getIndexNames())
if mibBuilder.loadTexts: ifTestEntry.setStatus('deprecated')
ifTestId = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 3, 1, 1), TestAndIncr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ifTestId.setStatus('deprecated')
ifTestStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notInUse", 1), ("inUse", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ifTestStatus.setStatus('deprecated')
ifTestType = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 3, 1, 3), AutonomousType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ifTestType.setStatus('deprecated')
ifTestResult = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("none", 1), ("success", 2), ("inProgress", 3), ("notSupported", 4), ("unAbleToRun", 5), ("aborted", 6), ("failed", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifTestResult.setStatus('deprecated')
ifTestCode = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 3, 1, 5), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifTestCode.setStatus('deprecated')
ifTestOwner = MibTableColumn((1, 3, 6, 1, 2, 1, 31, 1, 3, 1, 6), OwnerString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ifTestOwner.setStatus('deprecated')
ifGeneralGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 1)).setObjects(("IF-MIB", "ifDescr"), ("IF-MIB", "ifType"), ("IF-MIB", "ifSpeed"), ("IF-MIB", "ifPhysAddress"), ("IF-MIB", "ifAdminStatus"), ("IF-MIB", "ifOperStatus"), ("IF-MIB", "ifLastChange"), ("IF-MIB", "ifLinkUpDownTrapEnable"), ("IF-MIB", "ifConnectorPresent"), ("IF-MIB", "ifHighSpeed"), ("IF-MIB", "ifName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifGeneralGroup = ifGeneralGroup.setStatus('deprecated')
ifTestGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 8)).setObjects(("IF-MIB", "ifTestId"), ("IF-MIB", "ifTestStatus"), ("IF-MIB", "ifTestType"), ("IF-MIB", "ifTestResult"), ("IF-MIB", "ifTestCode"), ("IF-MIB", "ifTestOwner"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifTestGroup = ifTestGroup.setStatus('deprecated')
ifStackGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 9)).setObjects(("IF-MIB", "ifStackStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifStackGroup = ifStackGroup.setStatus('deprecated')
ifOldObjectsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 31, 2, 1, 12)).setObjects(("IF-MIB", "ifInNUcastPkts"), ("IF-MIB", "ifOutNUcastPkts"), ("IF-MIB", "ifOutQLen"), ("IF-MIB", "ifSpecific"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifOldObjectsGroup = ifOldObjectsGroup.setStatus('deprecated')
ifCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 31, 2, 2, 1)).setObjects(("IF-MIB", "ifGeneralGroup"), ("IF-MIB", "ifStackGroup"), ("IF-MIB", "ifFixedLengthGroup"), ("IF-MIB", "ifHCFixedLengthGroup"), ("IF-MIB", "ifPacketGroup"), ("IF-MIB", "ifHCPacketGroup"), ("IF-MIB", "ifTestGroup"), ("IF-MIB", "ifRcvAddressGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifCompliance = ifCompliance.setStatus('deprecated')
ifCompliance2 = ModuleCompliance((1, 3, 6, 1, 2, 1, 31, 2, 2, 2)).setObjects(("IF-MIB", "ifGeneralInformationGroup"), ("IF-MIB", "ifStackGroup2"), ("IF-MIB", "ifCounterDiscontinuityGroup"), ("IF-MIB", "ifFixedLengthGroup"), ("IF-MIB", "ifHCFixedLengthGroup"), ("IF-MIB", "ifPacketGroup"), ("IF-MIB", "ifHCPacketGroup"), ("IF-MIB", "ifRcvAddressGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ifCompliance2 = ifCompliance2.setStatus('deprecated')
mibBuilder.exportSymbols("IF-MIB", ifIndex=ifIndex, ifNumber=ifNumber, ifCompliance2=ifCompliance2, ifConformance=ifConformance, ifOutBroadcastPkts=ifOutBroadcastPkts, ifTestEntry=ifTestEntry, ifLastChange=ifLastChange, ifOutOctets=ifOutOctets, ifHCInMulticastPkts=ifHCInMulticastPkts, ifMtu=ifMtu, ifHCInOctets=ifHCInOctets, ifSpeed=ifSpeed, ifFixedLengthGroup=ifFixedLengthGroup, ifInBroadcastPkts=ifInBroadcastPkts, ifName=ifName, ifCompliance=ifCompliance, ifSpecific=ifSpecific, ifHCOutMulticastPkts=ifHCOutMulticastPkts, ifStackTable=ifStackTable, ifCounterDiscontinuityTime=ifCounterDiscontinuityTime, ifInMulticastPkts=ifInMulticastPkts, ifHCFixedLengthGroup=ifHCFixedLengthGroup, ifTestType=ifTestType, ifCounterDiscontinuityGroup=ifCounterDiscontinuityGroup, ifEntry=ifEntry, ifOutNUcastPkts=ifOutNUcastPkts, ifRcvAddressEntry=ifRcvAddressEntry, linkUp=linkUp, ifHighSpeed=ifHighSpeed, linkDown=linkDown, InterfaceIndexOrZero=InterfaceIndexOrZero, ifGeneralGroup=ifGeneralGroup, ifOutMulticastPkts=ifOutMulticastPkts, ifGeneralInformationGroup=ifGeneralInformationGroup, ifInNUcastPkts=ifInNUcastPkts, ifLinkUpDownTrapEnable=ifLinkUpDownTrapEnable, ifRcvAddressGroup=ifRcvAddressGroup, ifInOctets=ifInOctets, ifOperStatus=ifOperStatus, ifRcvAddressType=ifRcvAddressType, ifInUnknownProtos=ifInUnknownProtos, ifInDiscards=ifInDiscards, ifXTable=ifXTable, interfaces=interfaces, OwnerString=OwnerString, ifAdminStatus=ifAdminStatus, PYSNMP_MODULE_ID=ifMIB, linkUpDownNotificationsGroup=linkUpDownNotificationsGroup, ifTableLastChange=ifTableLastChange, ifStackGroup=ifStackGroup, ifXEntry=ifXEntry, ifStackEntry=ifStackEntry, ifStackGroup2=ifStackGroup2, ifMIBObjects=ifMIBObjects, ifType=ifType, ifTestResult=ifTestResult, ifStackLastChange=ifStackLastChange, ifHCOutOctets=ifHCOutOctets, ifOutErrors=ifOutErrors, ifStackLowerLayer=ifStackLowerLayer, InterfaceIndex=InterfaceIndex, ifTestOwner=ifTestOwner, ifPacketGroup=ifPacketGroup, ifTestId=ifTestId, ifHCOutBroadcastPkts=ifHCOutBroadcastPkts, ifPromiscuousMode=ifPromiscuousMode, ifStackHigherLayer=ifStackHigherLayer, ifHCOutUcastPkts=ifHCOutUcastPkts, ifTestTable=ifTestTable, ifPhysAddress=ifPhysAddress, ifMIB=ifMIB, ifOutQLen=ifOutQLen, ifConnectorPresent=ifConnectorPresent, ifInErrors=ifInErrors, ifOutDiscards=ifOutDiscards, ifCompliance3=ifCompliance3, ifVHCPacketGroup=ifVHCPacketGroup, ifDescr=ifDescr, ifOldObjectsGroup=ifOldObjectsGroup, ifHCInBroadcastPkts=ifHCInBroadcastPkts, ifGroups=ifGroups, ifStackStatus=ifStackStatus, ifOutUcastPkts=ifOutUcastPkts, ifRcvAddressStatus=ifRcvAddressStatus, ifHCPacketGroup=ifHCPacketGroup, ifAlias=ifAlias, ifTestStatus=ifTestStatus, ifRcvAddressAddress=ifRcvAddressAddress, ifCompliances=ifCompliances, ifTestCode=ifTestCode, ifInUcastPkts=ifInUcastPkts, ifTestGroup=ifTestGroup, ifHCInUcastPkts=ifHCInUcastPkts, ifTable=ifTable, ifRcvAddressTable=ifRcvAddressTable)
|
from collections import OrderedDict
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Dict, List, Tuple
import pytest
from datamodel_code_generator import DataTypeManager
from datamodel_code_generator.model import DataModel, DataModelFieldBase
from datamodel_code_generator.model.pydantic import BaseModel
from datamodel_code_generator.parser.base import Parser, relative, sort_data_models
from datamodel_code_generator.reference import snake_to_upper_camel
class A(DataModel):
pass
class B(DataModel):
pass
class C(Parser):
def parse_raw(self, name: str, raw: Dict) -> None:
pass
def parse(self) -> str:
return 'parsed'
def test_parser():
c = C(
data_model_type=D,
data_model_root_type=B,
data_model_field_type=DataModelFieldBase,
base_class='Base',
source='',
)
assert c.data_model_type == D
assert c.data_model_root_type == B
assert c.data_model_field_type == DataModelFieldBase
assert c.base_class == 'Base'
def test_sort_data_models():
reference = [
BaseModel(name='A', reference_classes={'A', 'C'}, fields=[]),
BaseModel(name='B', reference_classes={'B'}, fields=[]),
BaseModel(name='C', reference_classes={'B'}, fields=[]),
]
unresolved, resolved, require_update_action_models = sort_data_models(reference)
expected = OrderedDict()
expected['B'] = reference[1]
expected['C'] = reference[2]
expected['A'] = reference[0]
assert resolved == expected
assert unresolved == []
assert require_update_action_models == ['B', 'A']
def test_sort_data_models_unresolved():
reference = [
BaseModel(name='A', reference_classes=['A', 'C'], fields=[]),
BaseModel(name='B', reference_classes=['B'], fields=[]),
BaseModel(name='C', reference_classes=['B'], fields=[]),
BaseModel(name='D', reference_classes=['A', 'C', 'v'], fields=[]),
BaseModel(name='z', reference_classes=['v'], fields=[]),
]
with pytest.raises(Exception):
sort_data_models(reference)
def test_sort_data_models_unresolved_raise_recursion_error():
reference = [
BaseModel(name='A', reference_classes=['A', 'C'], fields=[]),
BaseModel(name='B', reference_classes=['B'], fields=[]),
BaseModel(name='C', reference_classes=['B'], fields=[]),
BaseModel(name='D', reference_classes=['A', 'C', 'v'], fields=[]),
BaseModel(name='z', reference_classes=['v'], fields=[]),
]
with pytest.raises(Exception):
sort_data_models(reference, recursion_count=100000)
@pytest.mark.parametrize(
'current_module,reference,val',
[
('', 'Foo', ('', '')),
('a', 'a.Foo', ('', '')),
('a', 'a.b.Foo', ('.', 'b')),
('a.b', 'a.Foo', ('.', 'Foo')),
('a.b.c', 'a.Foo', ('..', 'Foo')),
('a.b.c', 'Foo', ('...', 'Foo')),
],
)
def test_relative(current_module: str, reference: str, val: Tuple[str, str]):
assert relative(current_module, reference) == val
@pytest.mark.parametrize(
'word,expected',
[
(
'_hello',
'_Hello',
), # In case a name starts with a underline, we should keep it.
('hello_again', 'HelloAgain'), # regular snake case
('hello__again', 'HelloAgain'), # handles double underscores
(
'hello___again_again',
'HelloAgainAgain',
), # handles double and single underscores
('hello_again_', 'HelloAgain'), # handles trailing underscores
('hello', 'Hello'), # no underscores
('____', '_'), # degenerate case, but this is the current expected behavior
],
)
def test_snake_to_upper_camel(word, expected):
"""Tests the snake to upper camel function."""
actual = snake_to_upper_camel(word)
assert actual == expected
class D(DataModel):
def __init__(
self, filename: str, data: str, name: str, fields: List[DataModelFieldBase]
):
super().__init__(name, fields)
self._data = data
def render(self) -> str:
return self._data
|
import collections
import io
import os
import typing
import torch
import numpy as np
from .operators import CommonGraph, ExtendedOperator, GraphOptimizer, HybridQuantizer
from .operators.op_version import OPVersioner
from .operators.tflite import Tensor
from .operators.torch import OPERATOR_CONVERTER_DICT
from .operators.torch.base import NoTrackOperator
from ..util.converter_util import generate_converter_config
from ..util.util import get_logger
log = get_logger(__name__, 'INFO')
class TFLiteConverter(object):
def __init__(
self,
model: typing.Union[torch.jit.ScriptFunction, torch.jit.ScriptModule, torch.nn.Module],
dummy_input: typing.Union[torch.Tensor, typing.Iterable[torch.Tensor]],
tflite_path: str,
input_transpose: typing.Optional[typing.Union[bool, typing.Iterable[bool]]] = None,
dump_jit_model_path: typing.Optional[str] = None,
dump_dummy_input_path: typing.Optional[str] = None,
dump_config_path: typing.Optional[str] = None,
strict_symmetric_check: bool = False,
preserve_tensors: bool = False,
optimize: int = GraphOptimizer.ALL_OPTIMIZE,
quantize_target_type: str = 'uint8',
hybrid_quantization_from_float: bool = False,
hybrid_per_channel: bool = False,
hybrid_asymmetric_inputs: bool = True,
fuse_quant_dequant: bool = False,
gc_when_reload: bool = False,
) -> None:
""" The TFLiteConverter class
Args:
model (typing.Union[torch.jit.ScriptFunction, torch.jit.ScriptModule, torch.nn.Module]): The input model \
(either traced or non-traced)
dummy_input (typing.Union[torch.Tensor, typing.Iterable[torch.Tensor]]): A viable input to the model
tflite_path (str): Path to use for exporting
input_transpose (typing.Optional[typing.Union[bool, typing.Iterable[bool]]], optional): Whether to \
transpose the input(s). Defaults to None(True for 4d-input, False otherwise).
dump_jit_model_path (typing.Optional[str]): The path for dumping the jit model. Defaults to None
dump_dummy_input_path (typing.Optional[str]): The path for dumping the dummy input. Defaults to None
dump_config_path (typing.Optional[str]): The path for dumping the json config. Defaults to None
strict_symmetric_check (bool): Strict symmetric quantization checks. Defaults to False
preserve_tensors (bool): Preserve the copies of the intermediate tensors. Defaults to False
optimize (int): The level of graph optimization. Defaults to `GraphOptimizer.ALL_OPTIMIZE`
quantize_target_type (str): Target type for quantization. Defaults to 'uint8'
hybrid_quantization_from_float (bool): Direct hybrid quantization from a float model. Defaults to False
hybrid_per_channel (bool): Prefer per-channel kernels in hybrid quantization. Defaults to False
hybrid_asymmetric_inputs (bool): Prefer asymmetric inputs while performing hybrid quantization
fuse_quant_dequant (bool): Remove quant and dequant nodes directly connected to i/o nodes. Defaults to False
gc_when_reload (bool): Apply GC when reloading the torchscript into memory
"""
self.model = model
self.lower_model = None
self.graph = None
self.tensor_map = {}
self.tensor_map_copies = {}
self.common_graph = CommonGraph()
if type(dummy_input) in (tuple, list):
self.dummy_input = dummy_input
else:
self.dummy_input = [dummy_input]
self.tflite_path = tflite_path
self.input_transpose = input_transpose
self.strict_symmetric_check = strict_symmetric_check
self.dump_jit_model_path = dump_jit_model_path
self.dump_dummy_input_path = dump_dummy_input_path
self.dump_config_path = dump_config_path
self.preserve_tensors = preserve_tensors
self.optimize = optimize
self.hybrid = hybrid_quantization_from_float
self.hybrid_per_channel = hybrid_per_channel
self.hybrid_asymmetric_inputs = hybrid_asymmetric_inputs
self.fuse_quant_dequant = fuse_quant_dequant
self.gc_when_reload = gc_when_reload
if quantize_target_type == 'uint8':
self.q_type = np.uint8
if self.strict_symmetric_check:
log.warning('Symmetric quantized model with uint8 is unsupported in most backends of TFLite')
if self.hybrid:
if self.hybrid_per_channel:
raise AttributeError('Per-channel kernels supports int8 only')
log.warning(
'Unless you are using legacy TFLite (<1.14), please set quantize_target_type to int8 instead'
)
elif quantize_target_type == 'int8':
self.q_type = np.int8
elif quantize_target_type == 'int16':
if self.hybrid:
raise AttributeError('Hybrid kernels supports int8 only')
if not self.strict_symmetric_check:
raise AttributeError('Int16 quantization requires strict_symmetric_check=True')
self.q_type = np.int16
else:
raise AttributeError(f'unknown quantize_target_type: {quantize_target_type}, expected: uint8, int8, int16')
if dump_config_path and not dump_jit_model_path:
raise AssertionError("when dump_config_path is set, dump_jit_model_path is required to be set")
self.input_offset = 1
def init_jit_graph(self):
# Multi-GPU modules doesn't support JIT tracing
if isinstance(self.model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)):
self.model = self.model.module
if not isinstance(self.model, (torch.jit.ScriptFunction, torch.jit.ScriptModule)):
if hasattr(self.model, 'cpu'):
self.model.cpu()
if hasattr(self.model, 'eval'):
self.model.eval()
with torch.no_grad():
script = torch.jit.trace(self.model, self.dummy_input)
# Remove reference to original model to save memory
self.model = None
# Have to save it once, otherwise something weird happens
if self.dump_jit_model_path is None:
with io.BytesIO() as f:
torch.jit.save(script, f)
f.seek(0)
script = torch.jit.load(f)
else:
jit_model_dir = os.path.abspath(os.path.dirname(self.dump_jit_model_path))
os.makedirs(jit_model_dir, exist_ok=True)
torch.jit.save(script, self.dump_jit_model_path)
if self.gc_when_reload:
import gc
script = None
gc.collect()
script = torch.jit.load(self.dump_jit_model_path)
self.model = script
if isinstance(self.model, torch.jit.ScriptFunction):
self.input_offset = 0
if self.dump_dummy_input_path is not None:
dummy_arrs = list(map(lambda x: x.detach().cpu().numpy(), self.dummy_input))
np.savez(self.dump_dummy_input_path, *dummy_arrs)
if self.dump_config_path is not None:
generate_converter_config(
self.dummy_input,
[],
self.input_transpose,
[],
self.dump_jit_model_path,
self.tflite_path,
self.dump_config_path,
)
def init_lowered_module(self):
assert (
isinstance(self.model, torch.jit.ScriptFunction)
or self.model.training is False
or str(next(self.model.graph.inputs()).type()) == '__torch__.PlaceholderModule'
), 'Model is in training model'
graph = self.model.graph
# Inline everything
torch._C._jit_pass_inline(graph)
# Remove fork/wait nodes
torch._C._jit_pass_inline_fork_wait(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_lower_all_tuples(graph)
# we record now record some ops like ones/zeros
# into a trace where we previously recorded constants
# use constant prop to maintain our current level of onnx support
# without implementing symbolics for all of them
torch._C._jit_pass_constant_propagation(graph)
# _split_tensor_list_constants(graph, graph)
# run dce to eliminate dead parts of the graph that might have been
# left behind by things like symbolic_override
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_canonicalize_graph_fuser_ops(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_peephole(graph, True)
torch._C._jit_pass_fuse_addmm(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_peephole(graph, True)
torch._C._jit_pass_lower_all_tuples(graph)
self.graph = graph
log.debug('Lowered graph:')
log.debug(self.graph)
def init_input_transpose(self):
input_transpose = self.input_transpose
if type(input_transpose) not in (tuple, list):
input_transpose = [input_transpose] * len(self.dummy_input)
for i, t in enumerate(self.dummy_input):
if input_transpose[i] is None:
input_transpose[i] = t.dim() == 4
self.input_transpose = input_transpose
def init_common_graph(self):
graph_inputs = [x.debugName() for x in list(self.graph.inputs())][self.input_offset :]
graph_outputs = [x.debugName() for x in list(self.graph.outputs())]
self.common_graph.inputs.extend(graph_inputs)
self.common_graph.outputs.extend(graph_outputs)
self.common_graph.input_transpose.extend(self.input_transpose)
tensors = []
for i, node in enumerate(graph_inputs):
tensors.append(
Tensor(
self.dummy_input[i],
node,
has_buffer=False,
asymmetric=not self.strict_symmetric_check,
q_type=self.q_type,
)
)
self.common_graph.add_nodes(tensors, ExtendedOperator.INPUT_NODE)
def init_inputs(self):
graph_inputs = [x.debugName() for x in list(self.graph.inputs())]
for i, node in enumerate(graph_inputs):
if self.input_offset > 0 and i == 0:
self.tensor_map[graph_inputs[i]] = self.model
else:
self.tensor_map[graph_inputs[i]] = self.dummy_input[i - self.input_offset]
def unsupported_operations(self, unique=True) -> typing.List[str]:
"""Returns unsupported operations in the graph"""
if self.graph is None:
self.init_lowered_module()
all_nodes = list(self.graph.nodes())
ops = []
for node in all_nodes:
k = node.kind()
converter_type = OPERATOR_CONVERTER_DICT.get(k, None)
if converter_type is None:
ops.append(k)
if unique:
return list(set(ops))
else:
return ops
def init_operations(self):
log.debug('Initialize operators...')
node_queue = collections.deque(self.graph.nodes())
while node_queue:
node = node_queue.popleft()
k = node.kind()
output_tensors = []
converter_type = OPERATOR_CONVERTER_DICT.get(k, NoTrackOperator)
converter = converter_type(node, self.tensor_map, not self.strict_symmetric_check, self.q_type)
# Don't track the operator if all the input nodes are not tracked unless it has custom implementation
# (e.g prim::* ops)
if converter_type.run == NoTrackOperator.run and converter_type != NoTrackOperator:
no_track_flag = True
for n in converter.input_names:
if self.common_graph.has_nested_names(n):
nested_names = self.common_graph.get_list_expanded_names(n)
for x in nested_names:
if x in self.common_graph.tensor_map:
no_track_flag = False
break
elif n in self.common_graph.tensor_map:
no_track_flag = False
break
if no_track_flag:
converter_type = NoTrackOperator
converter = converter_type(node, self.tensor_map, not self.strict_symmetric_check, self.q_type)
if k != 'prim::Constant':
log.debug(f'{k} {converter.input_names} -> {converter.output_names} {converter_type.__name__}')
# Don't fetch attrs and schemas for non-tracking nodes
if converter_type != NoTrackOperator:
try:
attrs = converter.fetch_all_attrs(node)
except StopIteration:
attrs = None
args = converter.fetch_annotated_args(node)
else:
attrs = None
args = None
converter.parse(node, attrs, args, self.common_graph)
outputs = converter.output_names
new_nodes = converter.output_nodes
if output_tensors is not None:
output_tensors.extend(converter.get_output_tensors())
if len(new_nodes) > 0:
node_queue.extendleft(reversed(new_nodes))
assert len(output_tensors) == len(outputs)
for t, name in zip(output_tensors, outputs):
self.tensor_map[name] = t
if self.preserve_tensors and isinstance(t, torch.Tensor):
self.tensor_map_copies[name] = t.detach().clone()
def __try_infer_type(self, params):
inferred = torch._C._jit_try_infer_type(params)
if hasattr(inferred, 'type'):
return inferred.type().annotation_str
else:
return str(inferred)
def __unpack_params(self, params):
return NoTrackOperator.unpack_params(None, params)
def convert(self):
"""Converts the model to the TFLite format
Raises:
Exception: If unsupported ops are found, an Exception will be raised
"""
self.init_input_transpose()
self.init_jit_graph()
self.init_lowered_module()
self.init_common_graph()
self.init_inputs()
self.init_operations()
unsupported_ops = self.unsupported_operations()
if len(unsupported_ops) > 0:
log.error(f'Unsupported ops: {", ".join(unsupported_ops)}')
raise Exception("Cannot continue due to fatal error")
else:
optimizer = GraphOptimizer(self.common_graph, self.optimize, self.fuse_quant_dequant)
optimizer.optimize()
if self.hybrid:
quantizer = HybridQuantizer(
self.common_graph, self.hybrid_asymmetric_inputs, self.q_type, self.hybrid_per_channel
)
quantizer.quantize()
optimizer.cleanup_dead_nodes()
versioner = OPVersioner(self.common_graph)
versioner.process()
self.common_graph.convert(self.tflite_path)
log.info(f'Generated model saved to {self.tflite_path}')
def visualize(self, hide_constants=True):
"""Visualize the TinyNeuralNetwork Graph
Args:
hide_constants (bool, optional): Hide the constant nodes in the graph. Defaults to True.
"""
self.common_graph.visualize(hide_constants)
def get_outputs(self):
"""Returns the output of the model, which is evaluated via tracing nodes one by one"""
outputs = []
for name in self.common_graph.outputs:
outputs.append(self.tensor_map[name])
return outputs
def get_value(self, name, default_val=None):
"""Returns the output according to the name of the node. If the name doesn't exist, `default_val` is returned"""
if self.preserve_tensors:
val = self.tensor_map_copies.get(name, default_val)
else:
val = self.tensor_map.get(name, default_val)
type_ = self.__try_infer_type(val)
if type_.endswith('PackedParamsBase'):
return self.__unpack_params(val)
return val
def tensor_names(self) -> typing.List[str]:
"""Returns the all the names of the intermediate tensors
Returns:
typing.List[str]: The names of the intermediate tensors
"""
if self.preserve_tensors:
return list(self.tensor_map_copies.keys())
else:
return list(self.tensor_map.keys())
def inputs_for_tflite(self) -> typing.List[np.ndarray]:
"""Prepare inputs for the TFLite backend
Returns:
typing.List[np.ndarray]: The input tensors
"""
arrs = []
for t, trans in zip(self.dummy_input, self.input_transpose):
arr = t.detach().clone().numpy()
if trans:
arr = np.transpose(arr, (0, 2, 3, 1))
arrs.append(arr)
return arrs
|
# Copyright (c) 2013, erpcloud.systems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns=get_columns()
data=get_data(filters,columns)
return columns, data
def get_columns():
return [
{
"label": _("Contract No"),
"fieldname": "name",
"fieldtype": "Link",
"options": "PMS Lease Contract",
"width": 155
},
{
"label": _("Party"),
"fieldname": "party_name",
"fieldtype": "Data",
"width": 200
},
{
"label": _("Posting Date"),
"fieldname": "posting_date",
"fieldtype": "Date",
"width": 105
},
{
"label": _("Contract Period In Months"),
"fieldname": "no_of_months",
"fieldtype": "Data",
"width": 220
},
{
"label": _("Start Date"),
"fieldname": "start_date",
"fieldtype": "Date",
"width": 100
},
{
"label": _("End Date"),
"fieldname": "end_date",
"fieldtype": "Date",
"width": 100
},
{
"label": _("Unit"),
"fieldname": "unit",
"fieldtype": "Data",
"width": 100
},
{
"label": _("Unit Type"),
"fieldname": "type",
"fieldtype": "Data",
"width": 110
},
{
"label": _("Floor"),
"fieldname": "floor",
"fieldtype": "Data",
"width": 110
},
{
"label": _("Zone"),
"fieldname": "zone",
"fieldtype": "Data",
"width": 110
},
{
"label": _("Area"),
"fieldname": "area",
"fieldtype": "Data",
"width": 140
},
{
"label": _("Allocation"),
"fieldname": "allocation",
"fieldtype": "Data",
"width": 90
},
{
"label": _("Activity"),
"fieldname": "activity",
"fieldtype": "Data",
"width": 140
},
{
"label": _("Internal Space"),
"fieldname": "internal_space",
"fieldtype": "Float",
"width": 130
},
{
"label": _("External Space"),
"fieldname": "external_space",
"fieldtype": "Float",
"width": 130
},
{
"label": _("Total Space"),
"fieldname": "total_space",
"fieldtype": "Float",
"width": 110
},
{
"label": _("Meter Price"),
"fieldname": "meter_price",
"fieldtype": "Currency",
"width": 160
},
{
"label": _("State"),
"fieldname": "state",
"fieldtype": "Data",
"width": 110
},
{
"label": _("Rent Amount"),
"fieldname": "rent_value_",
"fieldtype": "Currency",
"width": 140
},
{
"label": _("Annual Increase %"),
"fieldname": "annual_increase",
"fieldtype": "Percent",
"width": 160
},
{
"label": _("Annual Increase Type"),
"fieldname": "annual_increase_type",
"fieldtype": "Data",
"width": 160
},
{
"label": _("Insurance Amount"),
"fieldname": "insurance_value",
"fieldtype": "Currency",
"width": 160
},
{
"label": _("Total Payable Amount"),
"fieldname": "total_payable_amount",
"fieldtype": "Currency",
"width": 160
},
{
"label": _("Total Amount Paid"),
"fieldname": "total_amount_paid",
"fieldtype": "Currency",
"width": 160
}
]
def get_data(filters, columns):
item_price_qty_data = []
item_price_qty_data = get_item_price_qty_data(filters)
return item_price_qty_data
def get_item_price_qty_data(filters):
conditions = ""
if filters.get("contract"):
conditions += " and a.name=%(contract)s"
if filters.get("unit"):
conditions += " and a.unit=%(unit)s"
if filters.get("type"):
conditions += " and a.type=%(type)s"
if filters.get("floor"):
conditions += " and a.floor=%(floor)s"
if filters.get("zone"):
conditions += " and a.zone=%(zone)s"
if filters.get("area"):
conditions += " and a.area=%(area)s"
if filters.get("activity"):
conditions += " and a.activity=%(activity)s"
if filters.get("allocation"):
conditions += " and a.allocation=%(allocation)s"
if filters.get("from_date"):
conditions += " and a.posting_date>=%(from_date)s"
if filters.get("to_date"):
conditions += " and a.posting_date<=%(to_date)s"
item_results = frappe.db.sql("""
select
a.name as name,
a.party_name as party_name,
a.posting_date as posting_date,
a.unit as unit,
a.type as type,
a.floor as floor,
a.zone as zone,
a.area as area,
a.activity as activity,
a.allocation as allocation,
a.internal_space as internal_space,
a.external_space as external_space,
(a.internal_space + a.external_space) as total_space,
a.meter_price as meter_price,
a.state as state,
a.no_of_months as no_of_months,
a.rent_value_ as rent_value_,
a.annual_increase as annual_increase,
a.annual_increase_type as annual_increase_type,
a.start_date as start_date,
a.end_date as end_date,
a.total_payable_amount as total_payable_amount,
a.total_amount_paid as total_amount_paid,
a.insurance_value as insurance_value
from `tabPMS Lease Contract` a
where
a.docstatus = 1
{conditions}
""".format(conditions=conditions), filters, as_dict=1)
result = []
if item_results:
for item_dict in item_results:
data = {
'name': item_dict.name,
'party_name': item_dict.party_name,
'unit': item_dict.unit,
'type': item_dict.type,
'floor': item_dict.floor,
'zone': item_dict.zone,
'area': item_dict.area,
'allocation': item_dict.allocation,
'internal_space': item_dict.internal_space,
'external_space': item_dict.external_space,
'total_space': item_dict.total_space,
'meter_price': item_dict.meter_price,
'activity': item_dict.activity,
'state': item_dict.state,
'posting_date': item_dict.posting_date,
'no_of_months': item_dict.no_of_months,
'rent_value_': item_dict.rent_value_,
'annual_increase': item_dict.annual_increase,
'annual_increase_type': item_dict.annual_increase_type,
'start_date': item_dict.start_date,
'end_date': item_dict.end_date,
'total_payable_amount': item_dict.total_payable_amount,
'total_amount_paid': item_dict.total_amount_paid,
'insurance_value': item_dict.insurance_value,
}
result.append(data)
return result
|
from middleware.db import repository
from middleware.connection.conn import session
from middleware.db.tables import PokemonMoveAvailability
from pokedex.db.tables import VersionGroup
red_blue_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'red-blue').one()
yellow_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'yellow').one()
gold_silver_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'gold-silver').one()
crystal_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'crystal').one()
ruby_sapphir_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'ruby-sapphire').one()
firered_leafgreen_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'firered-leafgreen').one()
emerald_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'emerald').one()
fire_red_leaf_green_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'firered-leafgreen').one()
diamond_pearl_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'diamond-pearl').one()
platinum_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'platinum').one()
heart_gold_soul_silver_vg = session.query(VersionGroup).filter(
VersionGroup.identifier == 'heartgold-soulsilver').one()
black_white_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'black-white').one()
black2_white2_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'black-2-white-2').one()
xy_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'x-y').one()
oras_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'omega-ruby-alpha-sapphire').one()
sun_moon_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'sun-moon').one()
ultra_sun_ultra_moon_vg = session.query(VersionGroup).filter(
VersionGroup.identifier == 'ultra-sun-ultra-moon').one()
lgpe_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'lets-go-pikachu-lets-go-eevee').one()
sword_shield_vg = session.query(VersionGroup).filter(VersionGroup.identifier == 'sword-shield').one()
def load_basic_move_availabilities():
save_availabilities(red_blue_vg, 1, 151)
save_availabilities(yellow_vg, 1, 151)
save_availabilities(crystal_vg, 1, 251)
save_availabilities(gold_silver_vg, 1, 251)
save_availabilities(fire_red_leaf_green_vg, 1, 386)
save_availabilities(ruby_sapphir_vg, 1, 386)
save_availabilities(emerald_vg, 1, 386)
save_availabilities(diamond_pearl_vg, 1, 493)
save_availabilities(platinum_vg, 1, 493)
save_availabilities(heart_gold_soul_silver_vg, 1, 493)
save_availabilities(black_white_vg, 1, 649)
save_availabilities(black2_white2_vg, 1, 649)
save_availabilities(xy_vg, 1, 721)
save_availabilities(oras_vg, 1, 721)
save_availabilities(sun_moon_vg, 1, 807)
save_availabilities(ultra_sun_ultra_moon_vg, 1, 807)
save_alola_pokemons(sun_moon_vg)
save_alola_pokemons(ultra_sun_ultra_moon_vg)
save_alola_pokemons(sword_shield_vg, True)
save_galar_pokemons(sword_shield_vg)
save_default_gen8_pokemons(sword_shield_vg)
save_availabilities(lgpe_vg, 1, 151)
save_availabilities(lgpe_vg, 808, 809)
save_alola_pokemons(lgpe_vg)
def load_specific_pokemon_move_availabilities():
# gen 3
save_pokemon_move_availabilities_with_forms([ruby_sapphir_vg, emerald_vg, firered_leafgreen_vg],
'deoxys-normal', ['deoxys-attack', 'deoxys-defense', 'deoxys-speed'],
False, True, False, False, True)
# gen4
save_pokemon_move_availabilities_with_forms([diamond_pearl_vg, platinum_vg, heart_gold_soul_silver_vg],
'deoxys-normal', ['deoxys-attack', 'deoxys-defense', 'deoxys-speed'],
False, True, False, False, True)
save_pokemon_move_availabilities_with_forms([diamond_pearl_vg, platinum_vg, heart_gold_soul_silver_vg],
'wormadam-plant', ['wormadam-sandy', 'wormadam-trash'],
False, True, True, False, True)
save_pokemon_move_availabilities_with_forms([platinum_vg, heart_gold_soul_silver_vg, heart_gold_soul_silver_vg],
'shaymin-land', ['shaymin-sky'],
False, True, False, False, True)
# gen 5
save_pokemon_move_availabilities_with_forms([black_white_vg, black2_white2_vg],
'deoxys-normal', ['deoxys-attack', 'deoxys-defense', 'deoxys-speed'],
False, True, False, False, True)
save_pokemon_move_availabilities_with_forms([black_white_vg, black2_white2_vg],
'wormadam-plant', ['wormadam-sandy', 'wormadam-trash'],
False, True, True, False, True)
save_pokemon_move_availabilities_with_forms([black_white_vg, black2_white2_vg],
'shaymin-land', ['shaymin-sky'],
False, True, False, False, True)
save_pokemon_move_availabilities_with_forms([black2_white2_vg],
'kyurem', ['kyurem-black', 'kyurem-white'], True)
# gen6
# noinspection DuplicatedCode
save_pokemon_move_availabilities_with_forms([xy_vg, oras_vg],
'meowstic-male', ['meowstic-female'],
False, True, False, False, False)
save_pokemon_move_availabilities_with_forms([xy_vg, oras_vg],
'deoxys-normal', ['deoxys-attack', 'deoxys-defense', 'deoxys-speed'],
False, True, False, False, True)
save_pokemon_move_availabilities_with_forms([xy_vg, oras_vg],
'wormadam-plant', ['wormadam-sandy', 'wormadam-trash'],
False, True, True, False, True)
save_pokemon_move_availabilities_with_forms([xy_vg, oras_vg],
'shaymin-land', ['shaymin-sky'],
False, True, False, False, True)
save_pokemon_move_availabilities_with_forms([xy_vg, oras_vg],
'kyurem', ['kyurem-black', 'kyurem-white'], True)
save_pokemon_move_availabilities_with_forms([xy_vg, oras_vg],
'hoopa', ['hoopa-unbound'], False, True, False, False, False)
# gen7
# noinspection DuplicatedCode
save_pokemon_move_availabilities_with_forms([sun_moon_vg, ultra_sun_ultra_moon_vg],
'meowstic-male', ['meowstic-female'],
False, True, False, False, False)
save_pokemon_move_availabilities_with_forms([sun_moon_vg, ultra_sun_ultra_moon_vg],
'deoxys-normal', ['deoxys-attack', 'deoxys-defense', 'deoxys-speed'],
False, True, False, False, True)
save_pokemon_move_availabilities_with_forms([sun_moon_vg, ultra_sun_ultra_moon_vg],
'wormadam-plant', ['wormadam-sandy', 'wormadam-trash'],
False, True, True, False, True)
save_pokemon_move_availabilities_with_forms([sun_moon_vg, ultra_sun_ultra_moon_vg],
'shaymin-land', ['shaymin-sky'],
False, True, False, False, True)
save_pokemon_move_availabilities_with_forms([sun_moon_vg, ultra_sun_ultra_moon_vg],
'kyurem', ['kyurem-black', 'kyurem-white'], True)
save_pokemon_move_availabilities_with_forms([sun_moon_vg, ultra_sun_ultra_moon_vg],
'hoopa', ['hoopa-unbound'], False, True, False, False, False)
save_pokemon_move_availabilities_with_forms([sun_moon_vg],
'lycanroc-midday', ['lycanroc-midnight'],
False, True, False, False, True)
save_pokemon_move_availabilities_with_forms([ultra_sun_ultra_moon_vg],
'lycanroc-midday', ['lycanroc-midnight', 'lycanroc-dusk'],
False, True, False, False, True)
save_pokemon_move_availabilities_with_forms([sun_moon_vg, ultra_sun_ultra_moon_vg],
'thundurus-incarnate', ['thundurus-therian'],
False, False, True, False, False)
save_pokemon_move_availabilities_with_forms([ultra_sun_ultra_moon_vg],
'necrozma', ['necrozma-dusk', 'necrozma-dawn'], True)
# gen 8
save_pokemon_move_availabilities_with_forms([sword_shield_vg],
'meowstic-male', ['meowstic-female'],
False, True, True, False, False)
save_pokemon_move_availabilities_with_forms([sword_shield_vg],
'indeedee-male', ['indeedee-female'],
False, True, True, True, False)
save_pokemon_move_availabilities_with_forms([sword_shield_vg],
'lycanroc-midday', ['lycanroc-midnight', 'lycanroc-dusk'],
False, True, True, False, True)
save_pokemon_move_availabilities_with_forms([sword_shield_vg],
'toxtricity-amped', ['toxtricity-low-key'],
False, True, True, False, True)
save_pokemon_move_availabilities_with_forms([sword_shield_vg],
'urshifu-single-strike', ['urshifu-rapid-strike'])
save_pokemon_move_availabilities_with_forms([sword_shield_vg],
'calyrex', ['calyrex-ice', 'calyrex-shadow'], True)
def save_pokemon_move_availabilities_with_forms(version_groups: list, original_name: str, forms: list,
specific_page_forms=False, level=True, machine=True, egg=True,
tutor=True):
with session.no_autoflush:
for version_group in version_groups:
original_pokemon_availability = repository.find_availability_by_pkm_and_form(
original_name, version_group)
for form in forms:
form_pokemon = repository.find_pokemon_by_identifier(form)
availability = PokemonMoveAvailability()
availability.version_group_id = version_group.id
availability.pokemon_id = form_pokemon.id
availability.has_pokepedia_page = specific_page_forms
availability.is_default = False
availability.level = level
availability.machine = machine
availability.egg = egg
availability.tutor = tutor
session.add(availability)
original_pokemon_availability.forms.append(availability)
session.add(original_pokemon_availability)
session.commit()
def save_availabilities(version_group, start, end):
pokemons = repository.find_default_pokemons_in_national_dex(start, end)
for pokemon in pokemons:
move_availability = PokemonMoveAvailability()
move_availability.version_group_id = version_group.id
move_availability.pokemon_id = pokemon.id
session.add(move_availability)
session.commit()
def save_alola_pokemons(version_group, gen8=False):
excludeds = [
'rattata-alola', 'raticate-alola', 'geodude-alola', 'graveler-alola', 'golem-alola', 'grimer-alola', 'muk-alola'
]
pokemons = repository.find_alola_pokemons()
for pokemon in pokemons:
if gen8:
if pokemon.identifier in excludeds:
continue
move_availability = PokemonMoveAvailability()
move_availability.version_group_id = version_group.id
move_availability.pokemon_id = pokemon.id
move_availability.is_default = False
session.add(move_availability)
session.commit()
def save_galar_pokemons(version_group):
pokemons = repository.find_galar_pokemons()
for pokemon in pokemons:
move_availability = PokemonMoveAvailability()
move_availability.version_group_id = version_group.id
move_availability.pokemon_id = pokemon.id
move_availability.is_default = False
session.add(move_availability)
session.commit()
def save_default_gen8_pokemons(version_group):
pokemons = repository.find_default_gen8_pokemons()
for pokemon in pokemons:
move_availability = PokemonMoveAvailability()
move_availability.version_group_id = version_group.id
move_availability.pokemon_id = pokemon.id
session.add(move_availability)
session.commit()
|
# Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import datetime
from keystone.common import authorization
from keystone import exception
from keystone.common import controller
from keystone.common import dependency
@dependency.requires('roles_api', 'identity_api')
class BaseControllerV3(controller.V3Controller):
@classmethod
def base_url(cls, context, path=None):
"""Construct a path and pass it to V3Controller.base_url method."""
path = '/OS-ROLES/' + cls.collection_name
return super(BaseControllerV3, cls).base_url(context, path=path)
# CUSTOM API CHECKS
def _check_allowed_to_manage_roles(self, context, protection, role=None, role_id=None):
"""Add a flag for the policy engine if the user is allowed to manage
the requested application.
"""
ref = {}
application_id = None
if role_id or (role and not 'application_id' in role):
role = self.roles_api.get_role(role_id)
if role:
application_id = role['application_id']
if 'application_id' in context['query_string']:
# List filtering
application_id = context['query_string']['application_id']
if ('environment' in context and
authorization.AUTH_CONTEXT_ENV in context['environment']):
user_id = context['environment'][authorization.AUTH_CONTEXT_ENV]['user_id']
allowed_applications = self.roles_api.list_applications_user_allowed_to_manage_roles(
user_id=user_id, organization_id=None)
ref['is_allowed_to_manage_roles'] = application_id in allowed_applications
else:
ref['is_allowed_to_manage_roles'] = False
self.check_protection(context, protection, ref)
def _check_allowed_to_get_and_assign(self, context, protection, user_id=None,
role_id=None, organization_id=None,
application_id=None):
"""Add a flag for the policy engine if the user is allowed to asign and
remove roles from a user or list application assignments.
"""
user_id = user_id if user_id else context['query_string'].get('user_id')
ref = {}
if ('environment' in context and
authorization.AUTH_CONTEXT_ENV in context['environment']):
if application_id:
req_user = self.identity_api.get_user(
context['environment'][authorization.AUTH_CONTEXT_ENV]['user_id'])
req_project_id = context['environment'][authorization.AUTH_CONTEXT_ENV]['project_id']
if req_project_id == req_user.get('default_project_id'):
# user acting as user
allowed_roles = self.roles_api.list_roles_user_allowed_to_assign(
user_id=req_user['id'], organization_id=None)
else:
# user logged as org
allowed_roles = self.roles_api.list_roles_organization_allowed_to_assign(
organization_id=req_project_id)
if role_id:
# Role must be allowed
ref['is_allowed_to_get_and_assign'] = role_id in list(
itertools.chain(*allowed_roles.values()))
else:
# application must be allowed
ref['is_allowed_to_get_and_assign'] = application_id in allowed_roles.keys()
elif user_id:
ref['is_allowed_to_get_and_assign'] = (
user_id == context['environment'][authorization.AUTH_CONTEXT_ENV]['user_id'])
else:
ref['is_allowed_to_get_and_assign'] = False
self.check_protection(context, protection, ref)
def _check_allowed_to_manage_permissions(self, context, protection, permission=None,
permission_id=None, role_id=None):
"""Add a flag for the policy engine if the user is allowed to manage
the requested application.
"""
ref = {}
application_id = None
if permission_id or (permission and not 'application_id' in permission):
permission = self.roles_api.get_permission(permission_id)
if permission:
application_id = permission['application_id']
if role_id:
role = self.roles_api.get_role(role_id)
application_id = role['application_id']
if 'application_id' in context['query_string']:
# List filtering
application_id = context['query_string']['application_id']
if ('environment' in context and
authorization.AUTH_CONTEXT_ENV in context['environment']):
user_id = context['environment'][authorization.AUTH_CONTEXT_ENV]['user_id']
allowed_applications = self.roles_api.list_applications_user_allowed_to_manage_roles(
user_id=user_id, organization_id=None)
ref['is_allowed_to_manage_roles'] = application_id in allowed_applications
else:
ref['is_allowed_to_manage_roles'] = False
self.check_protection(context, protection, ref)
def _check_allowed_to_manage_consumer(self, context, protection, consumer_id=None,
consumer=None):
"""Add a flag for the policy engine if the user is allowed to manage
the requested application.
"""
ref = {}
if ('environment' in context and
authorization.AUTH_CONTEXT_ENV in context['environment']):
user_id = context['environment'][authorization.AUTH_CONTEXT_ENV]['user_id']
allowed_applications = self.roles_api.list_applications_user_allowed_to_manage(
user_id=user_id, organization_id=None)
ref['is_allowed_to_manage'] = consumer_id in allowed_applications
else:
ref['is_allowed_to_manage'] = False
self.check_protection(context, protection, ref)
# CONTROLLERS
class RoleCrudV3(BaseControllerV3):
collection_name = 'roles'
member_name = 'role'
@controller.protected(callback=_check_allowed_to_manage_roles)
def list_roles(self, context):
"""Description of the controller logic."""
filters = context['query_string']
ref = self.roles_api.list_roles(**filters)
return RoleCrudV3.wrap_collection(context, ref)
@controller.protected(callback=_check_allowed_to_manage_roles)
def create_role(self, context, role):
ref = self._assign_unique_id(self._normalize_dict(role))
role_ref = self.roles_api.create_role(ref)
return RoleCrudV3.wrap_member(context, role_ref)
@controller.protected(callback=_check_allowed_to_manage_roles)
def get_role(self, context, role_id):
role_ref = self.roles_api.get_role(role_id)
return RoleCrudV3.wrap_member(context, role_ref)
@controller.protected(callback=_check_allowed_to_manage_roles)
def update_role(self, context, role_id, role):
self._require_matching_id(role_id, role)
ref = self.roles_api.update_role(role_id, self._normalize_dict(role))
return RoleCrudV3.wrap_member(context, ref)
@controller.protected(callback=_check_allowed_to_manage_roles)
def delete_role(self, context, role_id):
self.roles_api.delete_role(role_id)
class RoleUserAssignmentV3(BaseControllerV3):
collection_name = 'role_assignments'
member_name = 'role_assignment'
@classmethod
def _add_self_referential_link(cls, context, ref):
pass
@controller.protected(callback=_check_allowed_to_get_and_assign)
def list_role_user_assignments(self, context):
filters = context['query_string']
use_default_org = filters.pop('default_organization', False)
user_id = filters.get('user_id', False)
if use_default_org and user_id:
user = self.identity_api.get_user(user_id)
organization_id = user.get('default_project_id', None)
if not organization_id:
raise exception.ProjectNotFound(
message='This user has no default organization')
filters['organization_id'] = organization_id
ref = self.roles_api.list_role_user_assignments(**filters)
return RoleUserAssignmentV3.wrap_collection(context, ref)
@controller.protected(callback=_check_allowed_to_get_and_assign)
def add_role_to_user(self, context, role_id, user_id,
organization_id, application_id):
self.roles_api.add_role_to_user(role_id, user_id,
organization_id, application_id)
@controller.protected(callback=_check_allowed_to_get_and_assign)
def remove_role_from_user(self, context, role_id, user_id,
organization_id, application_id):
self.roles_api.remove_role_from_user(role_id, user_id,
organization_id, application_id)
@controller.protected(callback=_check_allowed_to_get_and_assign)
def add_role_to_user_default_org(self, context, role_id, user_id,
application_id):
user = self.identity_api.get_user(user_id)
organization_id = user.get('default_project_id', None)
if organization_id:
self.roles_api.add_role_to_user(role_id, user_id,
organization_id, application_id)
else:
raise exception.ProjectNotFound(
message='This user has no default organization')
@controller.protected(callback=_check_allowed_to_get_and_assign)
def remove_role_from_user_default_org(self, context, role_id, user_id,
application_id):
user = self.identity_api.get_user(user_id)
organization_id = user.get('default_project_id', None)
if organization_id:
self.roles_api.remove_role_from_user(role_id, user_id,
organization_id, application_id)
else:
raise exception.ProjectNotFound(
message='This user has no default organization')
class RoleOrganizationAssignmentV3(BaseControllerV3):
collection_name = 'role_assignments'
member_name = 'role_assignment'
@classmethod
def _add_self_referential_link(cls, context, ref):
pass
@controller.protected(callback=_check_allowed_to_get_and_assign)
def list_role_organization_assignments(self, context):
filters = context['query_string']
ref = self.roles_api.list_role_organization_assignments(**filters)
return RoleOrganizationAssignmentV3.wrap_collection(context, ref)
@controller.protected(callback=_check_allowed_to_get_and_assign)
def add_role_to_organization(self, context, role_id,
organization_id, application_id):
self.roles_api.add_role_to_organization(role_id,
organization_id,
application_id)
#@controller.protected(callback=_check_allowed_to_get_and_assign)
def remove_role_from_organization(self, context, role_id,
organization_id, application_id):
self.roles_api.remove_role_from_organization(role_id,
organization_id,
application_id)
class AllowedActionsControllerV3(BaseControllerV3):
@controller.protected()
def list_roles_user_allowed_to_assign(self, context, user_id,
organization_id):
ref = self.roles_api.list_roles_user_allowed_to_assign(
user_id, organization_id)
response = {
'allowed_roles': ref
}
return response
@controller.protected()
def list_roles_organization_allowed_to_assign(self, context,
organization_id):
ref = self.roles_api.list_roles_organization_allowed_to_assign(
organization_id)
response = {
'allowed_roles': ref
}
return response
@controller.protected()
def list_applications_user_allowed_to_manage(self, context, user_id,
organization_id):
ref = self.roles_api.list_applications_user_allowed_to_manage(
user_id, organization_id)
response = {
'allowed_applications': ref
}
return response
@controller.protected()
def list_applications_organization_allowed_to_manage(self, context,
organization_id):
ref = self.roles_api.list_applications_organization_allowed_to_manage(
organization_id)
response = {
'allowed_applications': ref
}
return response
@controller.protected()
def list_applications_user_allowed_to_manage_roles(self, context, user_id,
organization_id):
ref = self.roles_api.list_applications_user_allowed_to_manage_roles(
user_id, organization_id)
response = {
'allowed_applications': ref
}
return response
@controller.protected()
def list_applications_organization_allowed_to_manage_roles(self, context,
organization_id):
ref = self.roles_api.list_applications_organization_allowed_to_manage_roles(
organization_id)
response = {
'allowed_applications': ref
}
return response
class PermissionCrudV3(BaseControllerV3):
collection_name = 'permissions'
member_name = 'permission'
@controller.protected(callback=_check_allowed_to_manage_permissions)
def list_permissions(self, context):
"""Description of the controller logic."""
filters = context['query_string']
ref = self.roles_api.list_permissions(**filters)
return PermissionCrudV3.wrap_collection(context, ref)
@controller.protected(callback=_check_allowed_to_manage_permissions)
def create_permission(self, context, permission):
ref = self._assign_unique_id(self._normalize_dict(permission))
permission_ref = self.roles_api.create_permission(ref)
return PermissionCrudV3.wrap_member(context, permission_ref)
@controller.protected(callback=_check_allowed_to_manage_permissions)
def get_permission(self, context, permission_id):
permission_ref = self.roles_api.get_permission(permission_id)
return PermissionCrudV3.wrap_member(context, permission_ref)
@controller.protected(callback=_check_allowed_to_manage_permissions)
def update_permission(self, context, permission_id, permission):
self._require_matching_id(permission_id, permission)
ref = self.roles_api.update_permission(
permission_id, self._normalize_dict(permission))
return PermissionCrudV3.wrap_member(context, ref)
@controller.protected(callback=_check_allowed_to_manage_permissions)
def delete_permission(self, context, permission_id):
self.roles_api.delete_permission(permission_id)
@controller.protected(callback=_check_allowed_to_manage_permissions)
def list_permissions_for_role(self, context, role_id):
ref = self.roles_api.list_permissions_for_role(role_id)
return PermissionCrudV3.wrap_collection(context, ref)
@controller.protected(callback=_check_allowed_to_manage_permissions)
def add_permission_to_role(self, context, role_id, permission_id):
self.roles_api.add_permission_to_role(role_id, permission_id)
@controller.protected(callback=_check_allowed_to_manage_permissions)
def remove_permission_from_role(self, context, role_id, permission_id):
self.roles_api.remove_permission_from_role(role_id, permission_id)
@dependency.requires('identity_api', 'oauth2_api')
class FiwareApiControllerV3(BaseControllerV3):
#@controller.protected()
def authorized_organizations(self, context, token_id):
""" Returns all the organizations in which the user has a role
from the application that got the OAuth2.0 token.
"""
# TODO(garcianavalon) check if token is valid, use user_id to filter in get
token = self.oauth2_api.get_access_token(token_id)
user = self.identity_api.get_user(token['authorizing_user_id'])
application_id = token['consumer_id']
organizations = self.roles_api.get_authorized_organizations(
user, application_id, remove_default_organization=True)
return {
'organizations': organizations
}
# @controller.protected()
def validate_oauth2_token(self, context, token_id):
""" Return a list of the roles and permissions of the user associated
with this token.
See https://github.com/ging/fi-ware-idm/wiki/Using-the-FI-LAB-instance\
#get-user-information-and-roles
"""
# TODO(garcianavalon) check if token is valid, use user_id to filter in get
token = self.oauth2_api.get_access_token(token_id)
application_id = token['consumer_id']
application = self.oauth2_api.get_consumer(application_id)
if not token['authorizing_user_id']:
# Client Credentials Grant
# We validate the token but no user info is provided
return {
}
if not token['valid'] or datetime.datetime.strptime(token['expires_at'], '%Y-%m-%d %H:%M:%S') < datetime.datetime.today():
raise exception.Unauthorized
user = self.identity_api.get_user(token['authorizing_user_id'])
organizations = self.roles_api.get_authorized_organizations(
user, application_id)
# remove the default organization and extract its roles
user_roles = []
user_organization = next((org for org in organizations
if org['id'] == user['default_project_id']), None)
if user_organization:
organizations.remove(user_organization)
# extract the user-scoped roles
user_roles = user_organization.pop('roles')
def _get_name(user):
name = user.get('username')
if not name:
name = user['name']
return name
response_body = {
'id':user['id'],
'email': user['name'],
'displayName': _get_name(user),
'roles': user_roles,
'organizations': organizations,
'app_id': application_id,
'isGravatarEnabled': user['use_gravatar'] if 'use_gravatar' in user else False
}
if 'ac_domain' in application:
response_body['app_azf_domain'] = application['ac_domain']
return response_body
@dependency.requires('oauth2_api')
class ExtendedPermissionsConsumerCrudV3(BaseControllerV3):
"""This class is ment to extend the basic consumer with callbacks that use
the internal permission from this extensions.
"""
collection_name = 'consumers'
member_name = 'consumer'
@controller.protected()
def list_consumers(self, context):
ref = self.oauth2_api.list_consumers()
return ExtendedPermissionsConsumerCrudV3.wrap_collection(context, ref)
@controller.protected()
def create_consumer(self, context, consumer):
ref = self._assign_unique_id(self._normalize_dict(consumer))
consumer_ref = self.oauth2_api.create_consumer(ref)
return ExtendedPermissionsConsumerCrudV3.wrap_member(context, consumer_ref)
@controller.protected(callback=_check_allowed_to_manage_consumer)
def get_consumer(self, context, consumer_id):
consumer_ref = self.oauth2_api.get_consumer_with_secret(consumer_id)
return ExtendedPermissionsConsumerCrudV3.wrap_member(context, consumer_ref)
@controller.protected(callback=_check_allowed_to_manage_consumer)
def update_consumer(self, context, consumer_id, consumer):
self._require_matching_id(consumer_id, consumer)
ref = self._normalize_dict(consumer)
self._validate_consumer_ref(ref)
ref = self.oauth2_api.update_consumer(consumer_id, ref)
return ExtendedPermissionsConsumerCrudV3.wrap_member(context, ref)
def _validate_consumer_ref(self, consumer):
if 'secret' in consumer:
msg = 'Cannot change consumer secret'
raise exception.ValidationError(message=msg)
@controller.protected(callback=_check_allowed_to_manage_consumer)
def delete_consumer(self, context, consumer_id):
self.oauth2_api.delete_consumer(consumer_id)
|
"""
NFS exports configuration
=========================
NFSExports and NFSExportsD provide a parsed output of the content of an exports
file as defined in ``man exports(5)``. The content is parsed into a
dictionary, where the key is the export path and the value is another
dictionary, where the key is the hostname and the value is the option list,
parsed into an actual list.
The default (``"-"``) hostname is not specially handled, nor are wildcards.
If export paths are defined multiple times in a file, only the first one is
parsed. All subsequent redefinitions are not parsed and the raw line is added
to the ``ignored_lines`` member.
All raw lines are kept in ``raw_lines``, which is a ``dict`` where the key is
the export path and the value is the stripped raw line.
Parsers included in this module are:
NFSExports - file ``nfs_exports``
---------------------------------
NFSExportsD - files in the ``nfs_exports.d`` directory
------------------------------------------------------
Sample content of the ``/etc/exports`` file::
/home/utcs/shared/ro @group(ro,sync) ins1.example.com(rw,sync,no_root_squash) ins2.example.com(rw,sync,no_root_squash)
/home/insights/shared/rw @group(rw,sync) ins1.example.com(rw,sync,no_root_squash) ins2.example.com(ro,sync,no_root_squash)
/home/insights/shared/special/all/mail @group(rw,sync,no_root_squash)
/home/insights/ins/special/all/config @group(ro,sync,no_root_squash) ins1.example.com(rw,sync,no_root_squash)
#/home/insights ins1.example.com(rw,sync,no_root_squash)
/home/example @group(rw,sync,root_squash) ins1.example.com(rw,sync,no_root_squash) ins2.example.com(rw,sync,no_root_squash)
# A duplicate host for this exported path
/home/example ins2.example.com(rw,sync,no_root_squash)
Examples:
>>> type(exports)
<class 'insights.parsers.nfs_exports.NFSExports'>
>>> type(exports.data) == type({})
True
>>> exports.raw_lines['/home/insights/shared/rw'] # List of lines that define this path
['/home/insights/shared/rw @group(rw,sync) ins1.example.com(rw,sync,no_root_squash) ins2.example.com(ro,sync,no_root_squash)']
>>> exports.raw_lines['/home/example'] # Lines are stored even if they contain duplicate hosts
['/home/example @group(rw,sync,root_squash) ins1.example.com(rw,sync,no_root_squash) ins2.example.com(rw,sync,no_root_squash)', '/home/example ins2.example.com(rw,sync,no_root_squash)']
>>> exports.ignored_exports
{'/home/example': {'ins2.example.com': ['rw', 'sync', 'no_root_squash']}}
>>> sorted(list(exports.all_options()))
['no_root_squash', 'ro', 'root_squash', 'rw', 'sync']
>>> sorted(list(exports.export_paths()))
['/home/example', '/home/insights/ins/special/all/config', '/home/insights/shared/rw', '/home/insights/shared/special/all/mail', '/home/utcs/shared/ro']
"""
from itertools import chain
from .. import Parser, parser
from . import get_active_lines
from insights.specs import Specs
from insights.util import deprecated
class NFSExportsBase(Parser):
"""
Class to parse ``/etc/exports`` and ``/etc/exports.d/*.exports``.
Exports are stored keyed on the path of the export, and then the host
definition. The flags are stored as a list. NFS allows the same path
to be listed on multiple lines and in multiple files, but an exported
path can only have one definition for a given host.
Attributes:
data (dict): Key is export path, value is a dict, where the key is the
client host and the value is a list of options.
ignored_exports (dict): A dictionary of exported paths that have host
definitions that conflicted with a previous definition.
ignored_lines (dict): A synonym for the above `ignored_exports`
dictionary, for historical reasons.
raw_lines (dict of lists): The list of the raw lines that define each
exported path, including any lines that may have ignored exports.
"""
def _parse_host(self, content):
if "(" in content:
host, options = content.split("(")
options = options.rstrip(")").split(",")
else:
host, options = content, []
return host, options
def _parse_line(self, line):
split = [i.strip() for i in line.split()]
path, hosts = split[0], dict(self._parse_host(s) for s in split[1:])
return path, hosts
def parse_content(self, content):
# Exports can be duplicated, but the path-host tuple cannot: the
# first read will be stored and all later path-host tuples cause
# `exportfs` to generate a warning when setting up the export.
self.data = {}
self.ignored_exports = {}
self.ignored_lines = self.ignored_exports
self.raw_lines = {}
for line in get_active_lines(content):
path, hosts = self._parse_line(line)
if path not in self.data:
# New path, just add the hosts.
self.data[path] = hosts
self.raw_lines[path] = [line]
else:
# Add to raw lines even if some (or all) hosts are ignored.
self.raw_lines[path].append(line)
# Have to check each path-host tuple
for host, flags in hosts.items():
if host not in self.data[path]:
# Only add if it doesn't already exist.
self.data[path][host] = flags
else:
if path not in self.ignored_exports:
self.ignored_exports[path] = {host: flags}
else:
self.ignored_exports[path][host] = flags
def export_paths(self):
"""Returns the set of all export paths as strings"""
return set(self.data.keys())
def all_options(self):
"""Returns the set of all options used in all export entries"""
items = chain.from_iterable(hosts.values() for hosts in self.data.values())
return set(chain.from_iterable(items))
def __iter__(self):
return iter(self.data.items())
@staticmethod
def reconstitute(path, d):
"""
.. warning::
This function is deprecated. Please use the `raw_lines` dictionary
property of the parser instance instead, as this contains the actual
lines from the exports file.
'Reconstitute' a line from its parsed value. The original lines are
not used for this. The hosts in d are listed in alphabetical order,
and the options are listed in the order originally given.
Arguments:
path (str): The exported path
d (dict): The hosts definition of the exported path
Returns:
str: A line simulating the definition of that exported path to
those hosts.
"""
deprecated(
NFSExportsBase.reconstitute,
'Please use the `raw_lines` dictionary property of the parser instance'
)
return " ".join([path] + ["%s(%s)" % (host, ",".join(options))
for host, options in d.items()])
@parser(Specs.nfs_exports)
class NFSExports(NFSExportsBase):
"""Subclass to attach ``nfs_exports`` spec to"""
pass
@parser(Specs.nfs_exports_d)
class NFSExportsD(NFSExportsBase):
"""Subclass to attach ``nfs_exports.d`` spec to"""
pass
|
from __future__ import unicode_literals, print_function
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
env = Environment(undefined=StrictUndefined)
env.loader = FileSystemLoader(".")
vrf_list [
{"vrf_name": "blue1", "ipv4_enabled": True, "ipv6_enabled": True,"red": "100:1"}
{"vrf_name": "blue2, "ipv4_enabled": True, "ipv6_enabled": True,"red": "200:2"}
]
vrf_vars = {"vrf_list": vrf_list}
my_template = "bgp_template.j2"
j2_template = env.get_template(my_template)
output = j2_template.render(**vrf_vars)
print(output)
|
__author__ = 'mattjmorris'
from .dynamo import Dynamo
from boto.dynamodb2.table import Table
from datetime import datetime, date, timedelta
from pandas import DataFrame
import re
DAY_STR_FORMAT = "%Y-%m-%d"
DAY_STR_RE = re.compile(r'^(\d{4})-(\d{2})-(\d{2})$')
SECOND_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
SECOND_STR_RE = re.compile(r'^(\d{4})-(\d{2})-(\d{2})\s(\d{2}:\d{2}:\d{2})$')
class DDBRuns(Dynamo):
@classmethod
def from_test_mode(cls, access_key=None, secret=None):
"""
Use this for getting an instance of this class that uses test tables.
"""
instance = cls(access_key, secret)
instance.table = Table('test_runs', connection=instance.connection)
return instance
def __init__(self, access_key=None, secret=None):
"""
When called directly (as should be done for production code), sets table to the production 'runs' table.
"""
super(DDBRuns, self).__init__(access_key, secret)
self.table = Table('runs', connection=self.connection)
def save_new_run(self, dt_str=None, start_date_str=None, end_date_str=None):
"""
dt_str = datetime of run. Defaults to now.
start_date_str = the start date for look-back of query performance data processing. * No default
end_date_str = the end date for query performance data processing. Defaults to today.
"""
assert start_date_str, "start_date_str is required when saving a new run to runs table."
assert DAY_STR_RE.match(start_date_str)
if end_date_str:
assert DAY_STR_RE.match(end_date_str)
if dt_str:
assert SECOND_STR_RE.match(dt_str)
dt_str = dt_str or datetime.now().strftime(SECOND_STR_FORMAT)
end_date_str = end_date_str or datetime.now().strftime(DAY_STR_FORMAT)
return self.table.put_item(data={'dt': dt_str, 'start': start_date_str, 'end': end_date_str})
def most_recent_start_date_str(self):
"""
:return: a string representing most recent start date from db
"""
df = self.get_runs_df()
if df.empty:
return None
else:
# should already be sorted, but just in case...
df.sort(columns=['dt'], ascending=True, inplace=True)
return df.iloc[len(df)-1]['start']
def most_recent_end_date_str(self):
"""
:return: a string representing most recent end date from db
"""
df = self.get_runs_df()
if df.empty:
return None
else:
# should already be sorted, but just in case...
df.sort(columns=['dt'], ascending=True, inplace=True)
return df.iloc[len(df)-1]['end']
def get_runs_df(self):
"""
Returns all table as dataframe, sorted with most recent entry on bottom (ascending order)
"""
df = DataFrame([{k: v for k, v in list(r.items())} for r in self.table.scan()])
if df.empty:
return df
else:
df.sort(columns=['dt'], ascending=True, inplace=True)
# force df to have columns in this order
return df[['dt', 'start', 'end']]
def modify_throughput(self, requested_read, requested_write, table=None):
table = table or self.table
return super(DDBRuns, self).modify_throughput(requested_read, requested_write, table)
def truncate_table(self):
"""
WARNING! Only use for test mode table
"""
assert self.table.table_name == 'test_runs', "Will only truncate test table. To truncate production table, run code manually"
with self.table.batch_write() as batch:
for item in self.table.scan():
batch.delete_item(dt=item['dt'])
def thors_start_end_date_strings(self, new_run=True, days_ago_start=30):
if new_run:
if days_ago_start is not None:
print(days_ago_start)
start_date_str = self._days_ago_str(days_ago_start)
else:
start_date_str = self.most_recent_end_date_str()
end_date_str = date.today().strftime(DAY_STR_FORMAT)
else:
start_date_str = self.most_recent_start_date_str()
end_date_str = self.most_recent_end_date_str()
assert start_date_str, "Start date string is None, please check the database since we are not doing a new run"
assert end_date_str, "End date string is None, please check the database since we are not doing a new run"
return start_date_str, end_date_str
def _days_ago_str(self, num_days_ago):
return (date.today() - timedelta(days=num_days_ago)).strftime(DAY_STR_FORMAT)
def start_end_date_strings(self, new_run=True, days_ago_start=30):
if new_run:
start_date_str = self.most_recent_end_date_str() or self._days_ago_str(days_ago_start)
end_date_str = date.today().strftime(DAY_STR_FORMAT)
else:
start_date_str = self.most_recent_start_date_str()
end_date_str = self.most_recent_end_date_str()
return start_date_str, end_date_str
|
# -*- coding: utf-8 -*-
from .aliases import *
from .helper import Helper
from ..consts.lessons import LESSON_ID_MAX_LENGTH, LESSON_TITLE_MAX_LENGTH, LESSON_BODY_MAX_LENGTH, LESSON_DESC_MAX_LENGTH
from .topics import Topics
from wcics.utils.time import get_time
class lessons(dbmodel, Helper):
id = dbcol(dbint, primary_key = True)
lid = dbcol(dbstr(LESSON_ID_MAX_LENGTH), unique = True, nullable = False)
oid = dbcol(dbint, dbforkey('organizations.id', ondelete = "CASCADE", onupdate = "CASCADE"))
title = dbcol(dbstr(LESSON_TITLE_MAX_LENGTH), nullable = False)
desc = dbcol(dbstr(LESSON_DESC_MAX_LENGTH), nullable = False)
body = dbcol(dbstr(LESSON_BODY_MAX_LENGTH), nullable = False)
create_time = dbcol(dbint, default = get_time, nullable = False)
def has_author(self, uid):
return LessonAuthors.query.filter_by(lid = self.id, uid = uid).count() > 0
@property
def authors(self):
from .users import Users
# importing inside the function to avoid a circular import at the top
# the alternative is worse, which uses a private method and key access to the name directly
return Users.query.join(LessonAuthors).filter(LessonAuthors.lid == self.id).all()
@property
def author_ids(self):
return [uid for (uid,) in db.session.query(LessonAuthors.uid).filter_by(lid = self.id).all()]
@property
def topics(self):
return LessonTopics.query.filter_by(lid = self.id).all()
class lesson_topics(dbmodel, Helper):
lid = dbcol(dbint, dbforkey(lessons.id, onupdate = "CASCADE", ondelete = "CASCADE"), primary_key = True)
tid = dbcol(dbint, dbforkey(Topics.id, onupdate = "CASCADE", ondelete = "CASCADE"), primary_key = True)
class lesson_authors(dbmodel, Helper):
lid = dbcol(dbint, dbforkey(lessons.id, onupdate = "CASCADE", ondelete = "CASCADE"), primary_key = True)
uid = dbcol(dbint, dbforkey('users.id', onupdate = "CASCADE", ondelete = "CASCADE"), primary_key = True)
oid = dbcol(dbint, dbforkey('organizations.id', onupdate = "CASCADE", ondelete = "CASCADE"), nullable = False)
__table_args__ = (db.ForeignKeyConstraint(("uid", "oid"), ("organization_users.uid", "organization_users.oid")), )
Lessons = lessons
LessonTopics = lesson_topics
LessonAuthors = lesson_authors
|
# coding: utf-8
"""
=====================================
Feature Extraction for Classification
=====================================
Extract acoustic features from labeled data for
training an environment or speech classifier.
To see how soundpy implements this, see `soundpy.builtin.envclassifier_feats`.
"""
###############################################################################################
#
#####################################################################
import os, sys
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parparentdir = os.path.dirname(parentdir)
packagedir = os.path.dirname(parparentdir)
sys.path.insert(0, packagedir)
import matplotlib.pyplot as plt
import soundpy as sp
import IPython.display as ipd
package_dir = '../../../'
os.chdir(package_dir)
sp_dir = package_dir
######################################################
# Prepare for Extraction: Data Organization
# -----------------------------------------
######################################################
# I will use a sample speech commands data set:
##########################################################
# Designate path relevant for accessing audiodata
data_dir = '/home/airos/Projects/Data/sound/speech_commands_small_section/'
######################################################
# Choose Feature Type
# ~~~~~~~~~~~~~~~~~~~
# We can extract 'mfcc', 'fbank', 'powspec', and 'stft'.
# if you are working with speech, I suggest 'fbank', 'powspec', or 'stft'.
feature_type = 'fbank'
######################################################
# Set Duration of Audio
# ~~~~~~~~~~~~~~~~~~~~~
# How much audio in seconds used from each audio file.
# The example noise and speech files are only 1 second long
dur_sec = 1
#############################################################
# Built-In Functionality - soundpy extracts the features for you
# ----------------------------------------------------------------------------
############################################################
# Define which data to use and which features to extract
# Everything else is based on defaults. A feature folder with
# the feature data will be created in the current working directory.
# (Although, you can set this under the parameter `data_features_dir`)
# `visualize` saves periodic images of the features extracted.
# This is useful if you want to know what's going on during the process.
extraction_dir = sp.envclassifier_feats(data_dir,
feature_type=feature_type,
dur_sec=dur_sec,
visualize=True);
################################################################
# The extracted features, extraction settings applied, and
# which audio files were assigned to which datasets
# will be saved in the following directory:
extraction_dir
############################################################
# And that's it!
|
import os.path
import sys
import pytest
from shapely.geometry import LineString
from shapely.geometry import Point
import conftest
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from interpoint import models
class TestPtOnEdge:
def test_is_point(self, fp_pt1):
"""
Point と LineString をテスト
"""
ptonedge = models.PtOnEdge(fp_pt1)
assert ptonedge.is_point()
ptonedge.gdf['geometry'] = LineString([(0, 0), (1, 1)])
assert not ptonedge.is_point()
def test_is_numeric_m(self, fp_pt1):
"""
縦断距離フィールドで、数値と文字列をテスト
"""
ptonedge = models.PtOnEdge(fp_pt1)
fld_m = ptonedge.gdf.columns[0]
assert ptonedge.is_numeric(fld_m)
ptonedge.gdf[fld_m] = [c for c in 'abcde']
assert not ptonedge.is_numeric(fld_m)
def test_is_numeric_z(self, fp_pt1):
"""
標高値フィールドで、数値と文字列をテスト
"""
ptonedge = models.PtOnEdge(fp_pt1)
fld_z = ptonedge.gdf.columns[1]
assert ptonedge.is_numeric(fld_z)
ptonedge.gdf[fld_z] = [c for c in 'abcde']
assert not ptonedge.is_numeric(fld_z)
def test_fieldname(self, fp_pt1):
"""
カラム名と縦断距離昇順をテスト
"""
ptonedge = models.PtOnEdge(fp_pt1)
ptonedge.fieldname(*conftest.COLS_MZ)
list_columns = ptonedge.gdf.columns.tolist()
assert list_columns == ['m', 'z', 'geometry']
assert ptonedge.gdf['m'].is_monotonic # type: ignore
@pytest.mark.parametrize(('idx', 'm', 'z', 'x', 'y'), [
(0, 0, 10, 0, 0),
(15, 150, 11.5, 150, 150),
(40, 400, 14, 400, 400)
])
def test_interpolate_lengthwide(self, fp_pt1, idx, m, z, x, y):
"""
NAテストと、同値テスト
"""
pitch = 10.0
ptonedge = models.PtOnEdge(fp_pt1)
ptonedge.fieldname(*conftest.COLS_MZ)
ptonedge.interpolate_lengthwide(pitch=pitch)
assert ptonedge.gdf.isnull().values.sum() == 0 # type: ignore
list_mzxyg = ptonedge.gdf.iloc[idx].tolist() # type: ignore
assert list_mzxyg == [m, z, x, y, Point(x, y, z)]
class TestPtAll:
@pytest.mark.parametrize(('idx', 'i', 'x', 'y', 'm', 'z'), [
(0, 0, 0.0, 0.0, 0.0, 10.0),
(100, 2, 180.0, 200.0, 180.0, 11.8),
(450, 10, 400.0, 500.0, 400.0, 14.0)
])
def test_interpolate_crosswide(
self, ptall: models.PtAll, idx, i, x, y, m, z
):
"""
NAテストと、同値テスト
"""
assert ptall.gdf.isnull().values.sum() == 0
list_ixymzg = ptall.gdf.iloc[idx].tolist() # type: ignore
assert list_ixymzg == [i, x, y, m, z, Point(x, y, z)]
def test_output_shp(self, ptall: models.PtAll, fn='ptall.shp'):
"""
シェープファイル出力テスト
"""
fp = os.path.join(conftest.TMPFOLD, fn)
ptall.output_shp(fp)
assert os.path.isfile(fp)
conftest.delfiles(fn)
def test_is_qual_projected_crs(fp_pt1, fp_pt2):
"""
crs投影座標一致、定義なし、地理座標、不一致をテスト
"""
ptonedge1 = models.PtOnEdge(fp_pt1)
ptonedge2 = models.PtOnEdge(fp_pt2)
assert models.is_equal_projected_crs(ptonedge1, ptonedge2)
ptonedge1.gdf.crs = None
assert not models.is_equal_projected_crs(ptonedge1, ptonedge2)
ptonedge1.gdf.crs = 'EPSG:4612'
assert not models.is_equal_projected_crs(ptonedge1, ptonedge2)
ptonedge1.gdf.crs = 'EPSG:2443'
ptonedge2.gdf.crs = 'EPSG:2444'
assert not models.is_equal_projected_crs(ptonedge1, ptonedge2)
def test_is_equal_m(fp_pt1, fp_pt2):
"""
2つの PtOnEdge の縦断距離で、一致と不一致をテスト
"""
ptonedge1 = models.PtOnEdge(fp_pt1)
ptonedge2 = models.PtOnEdge(fp_pt2)
ptonedge1.fieldname(*conftest.COLS_MZ)
ptonedge2.fieldname(*conftest.COLS_MZ)
assert models.is_equal_m(ptonedge1, ptonedge2)
ptonedge1.gdf['m'] = ptonedge1.gdf['m'] + 1 # type: ignore
assert not models.is_equal_m(ptonedge1, ptonedge2)
|
#! /usr/bin/python
import time
from datetime import datetime
import serial
from xbee import XBee, ZigBee
PORT = '/dev/ttyUSB0'
BAUD_RATE = 9600
# Open serial port and enable flow control
ser = serial.Serial(PORT, BAUD_RATE, bytesize=8, parity='N', stopbits=1, timeout=None, xonxoff=1, rtscts=1, dsrdtr=1)
# Create API object
xbee = ZigBee(ser,escaped=True)
#DEST_ADDR_LONG = "\x00\x13\xA2\x00\x40\x9C\x91\xA5"
#DEST_ADDR_LONG = "\x00\x13\xA2\x00\x40\xC5\x5A\x84"
# Router
DEST_ADDR_LONG = "\x00\x13\xA2\x00\x40\xC5\x5B\x05"
#part to discovery shot 16-bit address
xbee.send("tx",data="000\n",dest_addr_long=DEST_ADDR_LONG,dest_addr="\xff\xfe")
response = xbee.wait_read_frame()
shot_addr = response["dest_addr"]
# Continuously read and print packets
while True:
try:
print "send data"
tstart = datetime.now()
xbee.send("tx",data="321\n",dest_addr_long=DEST_ADDR_LONG,dest_addr=shot_addr)
xbee.send("tx",data="322\n",dest_addr_long=DEST_ADDR_LONG,dest_addr=shot_addr)
xbee.send("tx",data="323\n",dest_addr_long=DEST_ADDR_LONG,dest_addr=shot_addr)
xbee.send("tx",data="324\n",dest_addr_long=DEST_ADDR_LONG,dest_addr=shot_addr)
xbee.send("tx",data="325\n",dest_addr_long=DEST_ADDR_LONG,dest_addr=shot_addr)
tend = datetime.now()
print tend - tstart
time.sleep(1)
except KeyboardInterrupt:
break
ser.close()
|
# Generated by Django 3.0.8 on 2020-08-08 15:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Shop', '0003_like'),
('Main', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street_address', models.CharField(max_length=100)),
('apartment_address', models.CharField(max_length=100)),
('country', django_countries.fields.CountryField(max_length=2)),
('zip', models.CharField(max_length=100)),
('address_type', models.CharField(choices=[('B', 'Billing'), ('S', 'Shipping')], max_length=1)),
('default', models.BooleanField(default=False)),
('usr', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Addresses',
},
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stripe_charge_id', models.CharField(max_length=50)),
('amount', models.FloatField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('usr', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ordered', models.BooleanField(default=False)),
('quantity', models.IntegerField(default=1)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Shop.Item')),
('usr', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ref_code', models.CharField(blank=True, max_length=20, null=True)),
('start_date', models.DateTimeField(auto_now_add=True)),
('ordered_date', models.DateTimeField()),
('ordered', models.BooleanField(default=False)),
('being_delivered', models.BooleanField(default=False)),
('received', models.BooleanField(default=False)),
('refund_requested', models.BooleanField(default=False)),
('refund_granted', models.BooleanField(default=False)),
('billing_address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='billing_address', to='Main.Address')),
('items', models.ManyToManyField(to='Main.OrderItem')),
('payment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='Main.Payment')),
('shipping_address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='shipping_address', to='Main.Address')),
('usr', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
from math import cos, pi
from typing import Callable, List
from src.common.model.line_segment import LineSegment
from src.common.model.numerical_integrator import NumericalIntegrator
def get_mohler_roots(n: int) -> List[float]:
return [cos((2 * k - 1) / (2 * n) * pi) for k in range(1, n + 1)]
def get_mohler_coefficients(n: int) -> List[float]:
return [pi / n] * n
class MohlerMethod(NumericalIntegrator):
def integrate(self, *, f: Callable, segment: LineSegment, n: int, **kwargs) -> float:
roots = get_mohler_roots(n)
coefficients = get_mohler_coefficients(n)
return sum([coefficient * f(root) for coefficient, root in zip(coefficients, roots)])
@property
def accuracy_degree(self) -> int:
raise NotImplementedError
|
# Citcuit pseudocode
# Data structures
struct op:
# operation data
tx_type: # type of transaction, see the list: https://docs.google.com/spreadsheets/d/1ejK1MJfVehcwjgjVDFD3E2k1EZ7auqbG_y0DKidS9nA/edit#gid=0
chunk: # op chunk number (0..3)
pubdata_chunk: # current chunk of the pubdata (always 8 bytes)
args: # arguments for the operation
# Merkle branches
lhs: # left Merkle branch data
rhs: # right Merkle branch data
clear_account: # bool: instruction to clear the account in the current branch
clear_subaccount: # bool: instruction to clear the subaccount in the current branch
# precomputed witness:
a: # depends on the optype, used for range checks
b: # depends on the optype, used for range checks
new_root: # new state root after the operation is applied
account_path: # Merkle path witness for the account in the current branch
subtree_path: # Merkle path witness for the subtree in the current branch
struct cur: # current Merkle branch data
struct computed:
last_chunk: # bool: whether the current chunk is the last one in sequence
pubdata: # pubdata accumulated over all chunks
range_checked: # bool: ensures that a >= b
new_pubkey_hash: # hash of the new pubkey, truncated to 20 bytes (used only for deposits)
# Circuit functions
def circuit:
running_hash := initial_hash
current_root := last_state_root
prev.lhs := { 0, ... }
prev.rhs := { 0, ... }
prev.chunk := 0
prev.new_root := 0
for op in operations:
# enfore correct bitlentgh for every input in witness
# TODO: create a macro gadget to recursively iterate over struct member annotations (ZKS-119).
for x in op:
verify_bitlength(x)
# check and prepare data
verify_correct_chunking(op, computed)
accumulate_sha256(op.pubdata_chunk)
accumulate_pubdata(op, computed)
# prepare Merkle branch
cur := select_branch(op, computed)
cur.cosigner_pubkey_hash := hash(cur.cosigner_pubkey)
# check initial Merkle paths, before applying the operation
op.clear_account := False
op.clear_subaccount := False
state_root := check_account_data(op, cur, computed, check_intersection = False)
enforce state_root == current_root
# check validity and perform state updates for the current branch by modifying `cur` struct
execute_op(op, cur, computed)
# check final Merkle paths after applying the operation
new_root := check_account_data(op, cur, computed, check_intersection = True)
# NOTE: this is checked separately for each branch side, and we already enforced
# that `op.new_root` remains unchanged for both by enforcing that it is shared by all chunks
enforce new_root == op.new_root
# update global state root on the last op chunk
if computed.last_chunk:
current_root = new_root
# update `prev` references
# TODO: need a gadget to copy struct members one by one (ZKS-119).
prev.rhs = op.rhs
prev.lhs = op.lhs
prev.args = op.args
prev.new_root = op.new_root
prev.chunk = op.chunk
# final checks after the loop end
enforce current_root == new_state_root
enforce running_hash == pubdata_hash
enforce last_chunk # any operation should close with the last chunk
# make sure that operation chunks are passed correctly
def verify_correct_chunking(op, computed):
# enforce chunk sequence correctness
enforce (op.chunk == 0) or (op.chunk == prev.chunk + 1) # ensure that chunks come in sequence
max_chunks := switch op.tx_type
deposit => 4,
transfer_to_new=> 1,
transfer => 2,
# ...and so on
enforce op.chunk < max_chunks # 4 constraints
computed.last_chunk = op.chunk == max_chunks-1 # flag to mark the last op chunk
# enforce that all chunks share the same witness:
# - `op.args` for the common arguments of the operation
# - `op.lhs` and `op.rhs` for left and right Merkle branches
# - `new_root` of the state after the operation is applied
correct_inputs :=
op.chunk == 0 # skip check for the first chunk
or (
prev.args == op.args and
prev.lhs == op.lhs and
prev.rhs == op.rhs and
prev.new_root == op.new_root
) # TODO: need a gadget for logical equality which works with structs (ZKS-119).
enforce correct_inputs
# accumulate pubdata from multiple chunks
def accumulate_pubdata(op, computed):
computed.pubdata =
if op.chunk == 0:
op.pubdata_chunk # initialize from the first chunk
else:
computed.pubdata << 8 + op.pubdata_chunk
# determine the Merkle branch side (0 for LHS, 1 for RHS) and set `cur` for the current Merkle branch
def select_branch(op, computed):
op.current_side := LHS if op.tx_type == 'deposit' else op.chunk
# TODO: need a gadget for conditional swap applied to each struct member (ZKS-119).
cur := op.lhs if current_side == LHS else op.rhs
return cur
def check_account_data(op, cur, computed, check_intersection):
# leaf data for account and balance leaves
subaccount_data := (
cur.subaccount_balance,
cur.subaccount_nonce,
cur.creation_nonce,
cur.cosigner_pubkey_hash,
cur.cosigner_balance,
cur.subaccount_token)
balance_data := cur.balance
# subaccount emptiness check and clearing
cur.subaccount_is_empty := subaccount_data == EMPTY_SUBACCOUNT
subaccount_data = EMPTY_SUBACCOUNT if clear_subaccount else subaccount_data
# subtree Merkle checks
balances_root := merkle_root(token, op.balances_path, balance_data)
subaccounts_root := merkle_root(token, op.balances_path, subaccount_data)
subtree_root := hash(balances_root, subaccounts_root)
# account data
account_data := hash(cur.owner_pub_key, cur.subtree_root, cur.account_nonce)
# account emptiness check and clearing
cur.account_is_empty := account_data == EMPTY_ACCOUNT
account_data = EMPTY_ACCOUNT if clear_account else account_data
# final state Merkle root verification with conditional intersection check
intersection_path := intersection(op.account_path, cur.account, lhs.account, rhs.account,
lhs.intersection_hash, rhs.intersection_hash)
path_witness := intersection_path if check_intersection else op.account_path
state_root := merkle_root(cur.account, path_witness, account_data)
return state_root
# verify operation and execute state updates
def execute_op(op, cur, computed):
# universal range check
computed.range_checked := op.a >= op.b
# unpack floating point values and hashes
op.args.amount := unpack(op.args.amount_packed)
op.args.fee := unpack(op.args.fee_packed)
# some operations require tighter amount packing (with less precision)
computed.compact_amount_correct := op.args.amount == op.args.compact_amount * 256
# new pubkey hash for deposits
computed.new_pubkey_hash := hash(cur.new_pubkey)
# signature check
# NOTE: signature check must always be valid, but msg and signer can be phony
enforce check_sig(cur.sig_msg, cur.signer_pubkey)
# execute operations
op_valid := False
op_valid = op_valid or op.tx_type == 'noop'
op_valid = op_valid or transfer_to_new(op, cur, computed)
op_valid = op_valid or deposit(op, cur, computed)
op_valid = op_valid or close_account(op, cur, computed)
op_valid = op_valid or withdraw(op, cur, computed)
op_valid = op_valid or escalation(op, cur, computed)
op_valid = op_valid or create_subaccount(op, cur, computed)
op_valid = op_valid or close_subaccount(op, cur, computed)
op_valid = op_valid or fill_orders(op, cur, computed)
# `op` MUST be one of the operations and MUST be valid
enforce op_valid
def transfer_to_new(op, cur, computed):
# transfer_to_new validation is split into lhs and rhs; pubdata is combined from both branches
lhs_valid :=
op.tx_type == 'transfer_to_new'
# here we process the first chunk
and op.chunk == 0
# sender authorized spending and recepient
and lhs.sig_msg == hash('transfer_to_new', lhs.account, lhs.token, lhs.account_nonce, op.args.amount_packed,
op.args.fee_packed, cur.new_pubkey)
# sender is account owner
and lhs.signer_pubkey == cur.owner_pub_key
# sender has enough balance: we checked above that `op.a >= op.b`
# NOTE: no need to check overflow for `amount + fee` because their bitlengths are enforced]
and computed.range_checked and (op.a == cur.balance) and (op.b == (op.args.amount + op.args.fee) )
# NOTE: updating the state is done by modifying data in the `cur` branch
if lhs_valid:
cur.leaf_balance = cur.leaf_balance - (op.args.amount + op.args.fee)
cur.account_nonce = cur.account_nonce + 1
rhs_valid :=
op.tx_type == 'transfer_to_new'
# here we process the second (last) chunk
and op.chunk == 1
# compact amount is passed to pubdata for this operation
and computed.compact_amount_correct
# pubdata contains correct data from both branches, so we verify it agains `lhs` and `rhs`
and pubdata == (op.tx_type, lhs.account, lhs.token, lhs.compact_amount, cur.new_pubkey_hash, rhs.account, rhs.fee)
# new account branch is empty
and cur.account_is_empty
# sender signed the same recepient pubkey of which the hash was passed to public data
and lhs.new_pubkey == rhs.new_pubkey
if rhs_valid:
cur.leaf_balance = op.args.amount
return lhs_valid or rhs_valid
def deposit(op, cur, computed):
ignore_pubdata := not last_chunk
tx_valid :=
op.tx_type == 'deposit'
and (ignore_pubdata or pubdata == (cur.account, cur.token, args.compact_amount, cur.new_pubkey_hash, args.fee))
and cur.is_account_empty
and computed.compact_amount_correct
and computed.range_checked and (op.a == op.args.amount) and (op.b == op.args.fee)
if tx_valid:
cur.leaf_balance = op.args.amount - op.args.fee
return tx_valid
def close_account(op, cur, computed):
tx_valid :=
op.tx_type == 'close_account'
and pubdata == (cur.account, cur.subtree_root)
and cur.sig_msg == ('close_account', cur.account, cur.leaf_index, cur.account_nonce, cur.amount, cur.fee)
and cur.signer_pubkey == cur.owner_pub_key
if tx_valid:
op.clear_account = True
return tx_valid
def no_nonce_overflow(nonce):
nonce_overflow := cur.leaf_nonce == 0x10000-1 # nonce is 2 bytes long
return not nonce_overflow
def withdraw(op, cur, computed):
tx_valid :=
op.tx_type == 'withdraw'
and computed.compact_amount_correct
and pubdata == (op.tx_type, cur.account, cur.token, op.args.amount, op.args.fee)
and computed.range_checked and (op.a == cur.balance) and (op.b == (op.args.amount + op.args.fee) )
and cur.sig_msg == ('withdraw', cur.account, cur.token, cur.account_nonce, cur.amount, cur.fee)
and cur.signer_pubkey == cur.owner_pub_key
and no_nonce_overflow(cur.leaf_nonce)
if tx_valid:
cur.balance = cur.balance - (op.args.amount + op.args.fee)
cur.account_nonce = cur.leaf_nonce + 1
return tx_valid
def escalation(op, cur, computed):
tx_valid :=
op.tx_type == 'escalation'
and pubdata == (op.tx_type, cur.account, cur.subaccount, cur.creation_nonce, cur.leaf_nonce)
and cur.sig_msg == ('escalation', cur.account, cur.subaccount, cur.creation_nonce)
(cur.signer_pubkey == cur.owner_pub_key or cur.signer_pubkey == cosigner_pubkey)
if tx_valid:
cur.clear_subaccount = True
return tx_valid
def transfer(op, cur, computed):
lhs_valid :=
op.tx_type == 'transfer'
and op.chunk == 0
and lhs.sig_msg == ('transfer', lhs.account, lhs.token, lhs.account_nonce, op.args.amount_packed,
op.args.fee_packed, rhs.account_pubkey)
and lhs.signer_pubkey == cur.owner_pub_key
and computed.range_checked and (op.a == cur.balance) and (op.b == (op.args.amount + op.args.fee) )
and no_nonce_overflow(cur.account_nonce)
if lhs_valid:
cur.balance = cur.balance - (op.args.amount + op.args.fee)
cur.account_nonce = cur.account_nonce + 1
rhs_valid :=
op.tx_type == 'transfer'
and op.chunk == 1
and not cur.account_is_empty
and pubdata == (op.tx_type, lhs.account, lhs.token, op.args.amount, rhs.account, op.args.fee)
and computed.range_checked and (op.a == (cur.balance + op.args.amount) ) and (op.b == cur.balance )
if rhs_valid:
cur.balance = cur.balance + op.args.amount
return lhs_valid or rhs_valid
# Subaccount operations
def create_subaccount(op, cur, computed):
# On the LHS we have cosigner, we only use it for a overflow check
lhs_valid: =
op.tx_type == 'create_subaccount'
and op.chunk == 0
and computed.range_checked and (op.a == rhs.balance) and (op.b == (op.args.amount + op.args.fee) )
# We process everything else on the RHS
rhs_valid :=
op.tx_type == 'create_subaccount'
and op.chunk == 1
and cur.sig_msg == (
'create_subaccount',
cur.account, # cur = rhs
lhs.account, # co-signer account on the lhs
cur.token,
cur.account_nonce,
op.args.amount_packed,
op.args.fee_packed )
and cur.signer_pubkey == cur.owner_pub_key
and cur.subaccount_is_empty
and pubdata == (op.tx_type, lhs.account, lhs.leaf_index, op.args.amount, rhs.account, op.args.fee)
and computed.range_checked and (op.a == (cur.subaccount_balance + op.args.amount) ) and (op.b == cur.subaccount_balance)
and no_nonce_overflow(cur.account_nonce)
if rhs_valid:
# initialize subaccount
cur.subaccount_balance = cur.subaccount_balance + op.args.amount
cur.creation_nonce = cur.account_nonce
cur.cosigner_pubkey = lhs.account_pubkey
cur.subaccount_token = cur.token
# update main account
cur.balance = cur.balance - (op.args.amount + op.args.fee)
cur.account_nonce = cur.account_nonce + 1
return lhs_valid or rhs_valid
def close_subaccount(op, cur, computed):
# tbd: similar to create_subaccount()
def fill_orders(op, cur, computed):
# tbd
|
#!/bin/env python3
"ngugen -- generate nginx.unit config files, safely and sanely via a tiny DSL;"
if True:
from collections import OrderedDict # To make things look 'right';
import json
import os
import re
__author__ = "mr.wooK@gmail.com"
__license__ = "MIT License"
__copyright__ = "2021 (c)"
DEBUG = False
"""
# Routes
{ "match": { "uri": [ "/v1" ]}, "action": { "pass": "applications/error_v1" } },
{ "match": { "uri": [ "/search", "/esearch", "/search/v0", "/search/v1" ] } }
{ "match": { "uri": [ "/dummy/v0" ] }, "action": { "pass": "applications/dummy_v0" } },
{ "match": { "uri": [ "/dummy" ] }, "action": { "pass": "applications/dummy_app" } },
{ "action": { "pass": "applications/search" } },
Applications
"error_v1": {
"type": "python3.8",
"user": "nobody",
"path": "/var/www/unit/applications/error_server",
"module": "error_server",
"working_directory" : "/tmp",
"processes" : {
"max": 3,
"spare": 2
},
"environment": {
"aws_access_key_id":"x",
"aws_secret_access_key":"y"
}
},
,
"dummy_app": {
"type": "python3.8",
"working_directory": "/var/app/error_service",
"path": "/var/www/unit/applications/error_server",
"module": "dummy_server"
},
"dummy_v0": {
"type": "python3.8",
"working_directory": "/var/app/error_service",
"path": "/var/www/unit/applications/error_server",
"module": "dummy_server"
},
____________________________________________________________________________
% ngugen <conf_file>
: show -- show file as conf
: jq -- show via jq
: q
: write <conf_file>.json
: !param spec (as below)
: update <conf_file>
"""
class Ngugen():
ASSIGNMENT = re.compile(r'(?P<lhs>[\w\.\"]+)\s*=\s*(?P<rhs>.*)')
INCLUDE = re.compile(r'^include\s+(?P<filename>[\w\.\/]+)')
LISTENERS = re.compile(r'^listeners\s+(?P<domains>.*):(?P<port>\d+)\s+(?P<processor>.*)')
ROUTES = re.compile(r'^routes\s+(?P<matcher>\w+)\s*(?P<processor>[a-z]+)\s*(?P<application>\w+)\s*(?P<targets>.*)')
def __init__(self, cfg_file = None):
self._fn = cfg_file
self._globals = dict(applications=dict(), listeners=dict(), routes=dict(),
settings=dict(), isolation=dict(), extras=dict())
self._listeners = {}
self._applications = {}
self._isolation = {}
self._routes = [ ]
self._settings = {}
self._extras = {}
self._top = { "global" : self._globals, "apps" : self._applications,
"applications" : self._applications,
"listeners" : self._listeners, "routes" : self._routes,
"settings": self._settings, "isolation" : self._isolation,
"extras" : self._extras }
self._sequence = [ 'listeners', 'routes', 'applications',
'settings', 'isolation', 'extras' ]
if cfg_file:
self.load(cfg_file)
def parse_line(self, ln):
ln = ln.strip()
match = Ngugen.ASSIGNMENT.match(ln)
if match:
gd = match.groupdict()
return self._assign(gd['lhs'], gd['rhs'])
match = Ngugen.INCLUDE.match(ln)
if match:
gd = match.groupdict()
return self._include(gd['filename'])
match = Ngugen.LISTENERS.match(ln)
if match:
gd = match.groupdict()
return self._listener(gd['domains'], gd['port'], gd['processor'])
match = Ngugen.ROUTES.match(ln)
if match:
gd = match.groupdict()
if gd['processor'] == "default":
targets = ""
else:
targets = gd['targets']
return self._routing(gd['matcher'], gd['processor'], gd['application'], targets)
return False
def load(self, fn):
ifd = open(fn, 'r')
ibuf = [ln[:-1].strip() for ln in ifd.readlines()]
ifd.close()
ibuf = [ln for ln in ibuf if ln]
ibuf = [ln for ln in ibuf if not ln[0] in [ '#', ';' ] ]
self._ibuf = ibuf
load_ok = True
for ln in self._ibuf:
self.debug(f"--{ln}--")
rc = self.parse_line(ln)
if not rc:
print(f"Parse failed for {ln}")
load_ok = False
return load_ok
def _assign(self, lhs, rhs):
self.debug(f"_assign: {lhs} {rhs}")
lhs, rhs = lhs.strip(), rhs.strip()
if lhs != lhs.lower():
print("WARNING: assignment: left hand side {lhs} is not all lower case")
if '"' in lhs:
lhs_split = self._assignment_quoted(lhs)
if not lhs_split:
raise ValueError(f"Bad quotes in assignment in {lhs}")
else:
lhs_split = lhs.split('.')
if len(lhs_split) < 2:
raise ValueError(f"Bad assignment destination in {lhs}")
group = lhs_split.pop(0).lower()
if group not in self._top:
raise ValueError(f"Unknown assignment group {group}")
domain = self._top[group]
while lhs_split:
pointer = lhs_split.pop(0)
if lhs_split and (pointer in domain):
domain = domain[pointer]
continue
# if more things in vector but pointer not established in domain, establish it;
if lhs_split and (pointer not in domain):
domain[pointer] = dict()
domain = domain[pointer]
continue
# Check for terminal pointer, and make assignment
if (not lhs_split) and pointer:
domain[pointer] = rhs
return True
# if not lhs_split, should have done assignment by now?
raise ValueError(f"Termination issues in assignment for {lhs} = {rhs}")
def _assignment_quoted(self, lhs):
lhs_split = [ ]
while lhs:
if not lhs.startswith('"'):
if '.' in lhs:
dot = lhs.index('.')
lhs_split.append(lhs[:dot])
lhs = lhs[dot + 1:]
continue
# Terminal word
lhs_split.append(lhs)
return lhs_split
# lhs must start with '"' by now;
if lhs.count('"') & 1:
raise ValueError(f"Odd number of quotes in {lhs}")
q0 = 0 # Skip the first quote...
q1 = lhs.index('"', q0 +1 )
# q1 = lhs[q0+1:lhs.index('"') + 1] # Get everything between the quotes...
field = lhs[q0 +1:q1]
lhs = lhs[len(field) + 3:] # Lose dot + what's been parsed before next iteration;
lhs_split.append(field)
continue
return lhs_split
def debug(self, txt):
if DEBUG:
print(txt)
return True
def _include(self, fn):
self.debug(f"_include {fn}")
self._load(fn)
return True
def _listener(self, domains, port, processor):
"""
Listeners are a dict of key-value pairs where the key is
"hostnames:portnums and the value is a dictionary of pass options;
"""
group = self._top['listeners']
domain_port = f"{domains}:{port}"
action, where = re.split(r'\s+', processor)
group[domain_port] = { action : where }
return True
def _routing(self, matcher, processor, app, targets):
"""
routes are a list of dicts, where match provides a predicate condition
(ie: uri against a list of potential matches), with a final action case that provides
the default route if none of the prior matches succeeds;
"""
routes = self._top['routes']
matcher, processor = matcher.lower(), processor.lower()
if matcher == "match_uri":
targets = targets.replace(",", " ")
uri_list = re.split(r'\s+', targets)
pass_dict = { processor : app }
route = dict(match=OrderedDict(uri=uri_list, action=pass_dict))
routes.append(route)
return True
elif matcher == "default":
route = dict(action={ processor : app })
routes.append(route)
return True
else:
raise ValueError(f"Unknown processor type {processor}")
return True
def save(self, fn):
if os.path.isfile(fn):
os.rename(fn, f"{fn}~")
ofd = open(fn, 'w')
top = dict()
final = dict()
for section in self._sequence:
user_globals = self._globals[section]
specified = self._top[section]
if section == 'routes':
top[section] = specified
continue
elif section == 'extras':
top.update(self._top[section])
continue
elif not self._top[section]:
continue
elif section == 'applications':
if not self._top[section]:
continue
top[section] = dict()
subsections = list(self._top[section].keys())
subsections.sort()
for sub in subsections:
per_app = self._top[section][sub]
use = { **user_globals, **per_app }
top[section][sub] = use
continue
else:
top[section] = { **user_globals, **specified}
continue
output = json.dumps(top, indent=2)
ofd.write(f"{output}\n")
ofd.close()
return True
if __name__ == "__main__":
import sys
args = sys.argv[:]
pname = args.pop(0)
if not args:
print(f"No filename on command line, that's all folks!")
sys.exit(1)
ifn = args.pop(0)
if not os.path.isfile(ifn):
print(f"File {ifn} not found, ttfn!")
sys.exit(1)
if args:
ofn = args.pop(0)
else:
ofn = os.path.splitext(ifn)[0] + ".json"
ngugen = Ngugen(ifn)
ngugen.save(ofn)
print(f"Wrote {ofn}")
|
import datetime
import json
import os
import typing
import gpxpy # type: ignore
import pint # type: ignore
import s2sphere # type: ignore
import polyline # type: ignore
from stravaviz.exceptions import TrackLoadError
from stravaviz.units import Units
class Track:
"""Create and maintain info about a given activity track (corresponding to one GPX file).
Attributes:
file_names: Basename of a given file passed in load_gpx.
polylines: Lines interpolated between each coordinate.
start_time: Activity start time.
end_time: Activity end time.
length: Length of the track (2-dimensional).
self.special: True if track is special, else False.
Methods:
load_gpx: Load a GPX file into the current track.
bbox: Compute the border box of the track.
append: Append other track to current track.
load_cache: Load track from cached json data.
store_cache: Cache the current track.
"""
def __init__(self) -> None:
self.file_names: typing.List[str] = []
self.polylines: typing.List[typing.List[s2sphere.LatLng]] = []
self.elevations = []
self._start_time: typing.Optional[datetime.datetime] = None
self._end_time: typing.Optional[datetime.datetime] = None
# Don't use Units().meter here, as this constructor is called from
# within a thread (which would create a second unit registry!)
self._length_meters = 0.0
self.special = False
def load_gpx(self, file_name: str) -> None:
"""Load the GPX file into self.
Args:
file_name: GPX file to be loaded .
Raises:
TrackLoadError: An error occurred while parsing the GPX file (empty or bad format).
PermissionError: An error occurred while opening the GPX file.
"""
try:
self.file_names = [os.path.basename(file_name)]
# Handle empty gpx files
# (for example, treadmill runs pulled via garmin-connect-export)
if os.path.getsize(file_name) == 0:
raise TrackLoadError("Empty GPX file")
with open(file_name, "r") as file:
self._load_gpx_data(gpxpy.parse(file))
except TrackLoadError as e:
raise e
except gpxpy.gpx.GPXXMLSyntaxException as e:
raise TrackLoadError("Failed to parse GPX.") from e
except PermissionError as e:
raise TrackLoadError("Cannot load GPX (bad permissions)") from e
except Exception as e:
raise TrackLoadError("Something went wrong when loading GPX.") from e
def has_time(self) -> bool:
return self._start_time is not None and self._end_time is not None
def start_time(self) -> datetime.datetime:
assert self._start_time is not None
return self._start_time
def set_start_time(self, value: datetime.datetime) -> None:
self._start_time = value
def end_time(self) -> datetime.datetime:
assert self._end_time is not None
return self._end_time
def set_end_time(self, value: datetime.datetime) -> None:
self._end_time = value
@property
def length_meters(self) -> float:
return self._length_meters
@length_meters.setter
def length_meters(self, value: float) -> None:
self._length_meters = value
def length(self) -> pint.quantity.Quantity:
return self._length_meters * Units().meter
def bbox(self) -> s2sphere.LatLngRect:
"""Compute the smallest rectangle that contains the entire track (border box)."""
bbox = s2sphere.LatLngRect()
for line in self.polylines:
for latlng in line:
bbox = bbox.union(s2sphere.LatLngRect.from_point(latlng.normalized()))
return bbox
def _load_gpx_data(self, gpx: gpxpy.gpx.GPX) -> None:
self._start_time, self._end_time = gpx.get_time_bounds()
if not self.has_time():
raise TrackLoadError("Track has no start or end time.")
self._length_meters = gpx.length_2d()
if self._length_meters <= 0:
raise TrackLoadError("Track is empty.")
gpx.simplify()
for t in gpx.tracks:
for s in t.segments:
line = [s2sphere.LatLng.from_degrees(p.latitude, p.longitude) for p in s.points]
self.polylines.append(line)
elevation_line = [p.elevation for p in s.points]
if not all(elevation_line):
raise TrackLoadError("Track has invalid elevations.")
self.elevations.append(elevation_line)
def append(self, other: "Track") -> None:
"""Append other track to self."""
self._end_time = other.end_time()
self.polylines.extend(other.polylines)
self._length_meters += other.length_meters
self.file_names.extend(other.file_names)
self.special = self.special or other.special
|
import click
import pkgutil
import os
import os.path as osp
from . import (
CONFIG_DIR,
)
@click.command()
@click.option("--config", default=None, type=click.Path(exists=True))
def cli(config):
# make the bimker configuration dir
os.makedirs(CONFIG_DIR)
## Generate or link to the config file
config_path = osp.join(CONFIG_DIR, 'config.py')
if config is None:
# generate the default config.py file
config_str = pkgutil.get_data(__name__,
'profile_config/config.py')
with open(config_path, 'wb') as wf:
wf.write(config_str)
else:
os.symlink(config, config_path)
# then generate the default env.sh file
env_str = pkgutil.get_data(__name__,
'env_script/env.sh')
env_path = osp.join(CONFIG_DIR, 'env.sh')
with open(env_path, 'wb') as wf:
wf.write(env_str)
if __name__ == "__main__":
cli()
|
# optimizer
optimizer = dict(type='AdamW', lr=1e-4, betas=(0.95, 0.99), weight_decay=0.01,)
# learning policy
lr_config = dict(
policy='CosineAnnealing',
warmup='linear',
warmup_iters=1600 * 8,
warmup_ratio=1.0 / 100,
min_lr_ratio=1e-8,
by_epoch=False) # test add by_epoch false
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=1600 * 24)
checkpoint_config = dict(by_epoch=False, max_keep_ckpts=10, interval=1600)
evaluation = dict(by_epoch=False,
start=0,
interval=1600,
pre_eval=True,
rule='less',
save_best='abs_rel_all',
greater_keys=("a1_all", "a2_all", "a3_all"),
less_keys=("abs_rel_all", "rmse_all"))
|
bl_info = {"name": "Icicle Generator",
"author": "Eoin Brennan (Mayeoin Bread)",
"version": (2, 1),
"blender": (2, 7, 4),
"location": "View3D > Add > Mesh",
"description": "Adds a linear string of icicles of different sizes",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Add Mesh"}
import bpy
import bmesh
from mathutils import Vector
from math import pi, sin, cos, tan, asin, acos, atan
from bpy.props import FloatProperty, IntProperty
import random
class IcicleGenerator(bpy.types.Operator):
"""Icicle Generator"""
bl_idname = "mesh.icicle_gen"
bl_label = "Icicle Generator"
bl_options = {"REGISTER", "UNDO"}
##
# User input
##
# Maximum radius
maxR = FloatProperty(name="Max R",
description="Maximum radius of a cone",
default=0.15,
min=0.01,
max=1.0,
unit="LENGTH")
# Minimum radius
minR = FloatProperty(name="Min R",
description="Minimum radius of a cone",
default=0.025,
min=0.01,
max=1.0,
unit="LENGTH")
# Maximum depth
maxD = FloatProperty(name="Max D",
description="Maximum depth (height) of cone",
default=2.0,
min=0.2,
max=2.0,
unit="LENGTH")
# Minimum depth
minD = FloatProperty(name="Min D",
description="Minimum depth (height) of cone",
default=1.5,
min=0.2,
max=2.0,
unit="LENGTH")
# Number of verts at base of cone
verts = IntProperty(name="Vertices", description="Number of vertices", default=8, min=3, max=24)
# Number of iterations before giving up trying to add cones
# Prevents crashes and freezes
# Obviously, the more iterations, the more time spent calculating.
# Max value (10,000) is safe but can be slow,
# 2000 to 5000 should be adequate for 95% of cases
its = IntProperty(name="Iterations", description="Number of iterations before giving up, prevents freezing/crashing", default=2000, min=1, max=10000)
##
# Main function
##
def execute(self, context):
rad = self.maxR
radM = self.minR
depth = self.maxD
minD = self.minD
##
# Add cone function
##
def add_cone(x, y, z, randrad, rd):
ac = bpy.ops.mesh.primitive_cone_add
ac(
vertices=self.verts,
radius1=randrad,
radius2=0.0,
depth=rd,
end_fill_type='NGON',
view_align=False,
location=(x, y, z),
rotation=(pi, 0.0, 0.0))
##
# Add icicle function
##
def add_icicles(rad, radM, depth, minD):
pos1 = Vector((0.0, 0.0, 0.0))
pos2 = Vector((0.0, 0.0, 0.0))
pos = 0
obj = bpy.context.object
bm = bmesh.from_edit_mesh(obj.data)
wm = obj.matrix_world
# Vectors for selected verts
for v in bm.verts:
if v.select:
if pos == 0:
p1 = v.co
pos = 1
elif pos == 1:
p2 = v.co
pos = 2
else:
p5 = v.co
# Set first to left most vert on X-axis...
if(p1.x > p2.x):
pos1 = p2
pos2 = p1
# Or bottom-most on Y-axis if X-axis not used
elif(p1.x == p2.x):
if(p1.y > p2.y):
pos1 = p2
pos2 = p1
else:
pos1 = p1
pos2 = p2
else:
pos1 = p1
pos2 = p2
# World matrix for positioning
pos1 = pos1 * wm
pos2 = pos2 * wm
# X values not equal, working on X-Y-Z planes
if pos1.x != pos2.x:
# Get the angle of the line
if(pos2.y != pos1.y):
angle = atan((pos2.x - pos1.x) / (pos2.y - pos1.y))
print("Angle:", angle)
else:
angle = pi / 2
# Total length of line, neglect Z-value (Z only affects height)
xLength = (((pos2.x - pos1.x)**2) + ((pos2.y - pos1.y)**2))**0.5
# Slopes if lines
ySlope = (pos2.y - pos1.y) / (pos2.x - pos1.x)
zSlope = (pos2.z - pos1.z) / (pos2.x - pos1.x)
# Fixes positioning error with some angles
if (angle < 0):
i = pos2.x
j = pos2.y
k = pos2.z
else:
i = pos1.x
j = pos1.y
k = pos1.z
l = 0.0
# Z and Y axis' intercepts
zInt = k - (zSlope * i)
yInt = j - (ySlope * i)
# Equal values, therfore radius should be that size
if(radM == rad):
randrad = rad
# Otherwise randomise it
else:
randrad = (rad - radM) * random.random()
# Depth, as with radius above
if(depth == minD):
rd = depth
else:
rd = (depth - minD) * random.random()
# Get user iterations
iterations = self.its
# Counter for iterations
c = 0
while(l < xLength) and (c < iterations):
if(radM == rad):
rr = randrad
else:
rr = randrad + radM
if(depth == minD):
dd = rd
else:
dd = rd + minD
# Icicles generally taller than wider, check if true
if(dd > rr):
# If the new icicle won't exceed line length
# Fix for overshooting lines
if(l + rr + rr <= xLength):
# Using sine/cosine of angle keeps icicles consistently spaced
i = i + (rr) * sin(angle)
j = j + (rr) * cos(angle)
l = l + rr
# Add a cone in new position
add_cone(i, j, (i * zSlope) + (zInt - (dd) / 2), rr, dd)
# Add another radius to i & j to prevent overlap
i = i + (rr) * sin(angle)
j = j + (rr) * cos(angle)
l = l + rr
# New values for rad and depth
if(radM == rad):
randrad = rad
else:
randrad = (rad - radM) * random.random()
if(depth == minD):
rd = depth
else:
rd = (depth - minD) * random.random()
# If overshoot, try find smaller cone
else:
if(radM == rad):
randrad = rad
else:
randrad = (rad - radM) * random.random()
if(depth == minD):
rd = depth
else:
rd = (depth - minD) * random.random()
# If wider than taller, try find taller than wider
else:
if(radM == rad):
randrad = rad
else:
randrad = (rad - radM) * random.random()
if(depth == minD):
rd = depth
else:
rd = (depth - minD) * random.random()
# Increase iterations by 1
c = c + 1
# if(c >= iterations):
# print("Too many iterations, please try different values")
# print("Try increasing gaps between min and max values")
# If X values equal, then just working in Y-Z plane,
# Provided Y values not equal
elif (pos1.x == pos2.x) and (pos1.y != pos2.y):
# Absolute length of Y line
xLength = ((pos2.y - pos1.y)**2)**0.5
i = pos1.x
j = pos1.y
k = pos1.z
l = 0.0
# Z-slope and intercept
zSlope = (pos2.z - pos1.z) / (pos2.y - pos1.y)
zInt = k - (zSlope * j)
# Same as above for X-Y-Z plane, just X values don't change
if(radM == rad):
randrad = rad
else:
randrad = (rad - radM) * random.random()
if(depth == minD):
rd = depth
else:
rd = (depth - minD) * random.random()
iterations = self.its
c = 0
while(l < xLength) and (c < iterations):
if(radM == rad):
rr = randrad
else:
rr = randrad + radM
if(depth == minD):
dd = rd
else:
dd = rd + minD
if(dd > rr):
if(l + rr + rr <= xLength):
j = j + (rr)
l = l + (rr)
add_cone(i, j, (i * zSlope) + (zInt - (dd) / 2), rr, dd)
j = j + (rr)
l = l + (rr)
if(radM == rad):
randrad = rad
else:
randrad = (rad - radM) * random.random()
if(depth == minD):
rd = depth
else:
rd = (depth - minD) * random.random()
else:
if(radM == rad):
randrad = rad
else:
randrad = (rad - radM) * random.random()
if(depth == minD):
rd = depth
else:
rd = (depth - minD) * random.random()
else:
if(radM == rad):
randrad = rad
else:
randrad = (rad - radM) * random.random()
if(depth == minD):
rd = depth
else:
rd = (depth - minD) * random.random()
c = c + 1
# Otherwise X and Y values the same, so either verts are on top of each other
# Or its a vertical line. Either way, we don't like it
else:
print("Cannot work on vertical lines")
##
# Run function
##
def runIt(rad, radM, depth, minD):
# Check that min values are less than max values
if(rad >= radM) and (depth >= minD):
obj = bpy.context.object
if obj.mode == 'EDIT':
# List of initial edges
oEdge = []
bm = bmesh.from_edit_mesh(obj.data)
for e in bm.edges:
if e.select:
# Append selected edges to list
oEdge.append(e.index)
# For every initially selected edge, add cones
for e in oEdge:
bpy.ops.mesh.select_all(action='DESELECT')
bm.edges.ensure_lookup_table()
bm.edges[e].select = True
add_icicles(rad, radM, depth, minD)
else:
print("Object not in edit mode")
# Run the function
obj = bpy.context.object
if obj.type == 'MESH':
runIt(rad, radM, depth, minD)
else:
print("Only works on meshes")
return {'FINISHED'}
# Add to menu and register/unregister stuff
def menu_func(self, context):
self.layout.operator(IcicleGenerator.bl_idname, text="Icicle", icon="PLUGIN")
def register():
bpy.utils.register_class(IcicleGenerator)
bpy.types.INFO_MT_mesh_add.append(menu_func)
def unregister():
bpy.utils.unregister_class(IcicleGenerator)
bpy.types.INFO_MT_mesh_add.remove(menu_func)
if __name__ == "__main__":
register()
|
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2021 Nathan Juraj Michlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
import logging
from dataclasses import dataclass
from numbers import Number
from typing import Any
from typing import Dict
from typing import final
from typing import Sequence
from typing import Tuple
from typing import Union
import torch
from disent.frameworks.ae._ae_mixin import _AeAndVaeMixin
from disent.frameworks.helper.util import detach_all
from disent.model import AutoEncoder
from disent.util.iters import map_all
# ========================================================================= #
# framework_vae #
# ========================================================================= #
class Ae(_AeAndVaeMixin):
"""
Basic Auto Encoder
------------------
See the docs for the VAE, while the AE is more simple, the VAE docs
cover the concepts needed to get started with writing Auto-Encoder
sub-classes.
Like the VAE, the AE is also written such that you can change the
number of required input observations that should be fed through the
network in parallel with `REQUIRED_OBS`. Various hooks are also made
available to add functionality and access the internal data.
- HOOKS:
* `hook_ae_intercept_zs`
* `hook_ae_compute_ave_aug_loss` (NB: not the same as `hook_compute_ave_aug_loss` from VAEs)
- OVERRIDES:
* `compute_ave_recon_loss`
"""
# override
REQUIRED_Z_MULTIPLIER = 1
REQUIRED_OBS = 1
@dataclass
class cfg(_AeAndVaeMixin.cfg):
pass
def __init__(self, model: AutoEncoder, cfg: cfg = None, batch_augment=None):
super().__init__(cfg=cfg, batch_augment=batch_augment)
# initialise the auto-encoder mixin (recon handler, model, enc, dec, etc.)
self._init_ae_mixin(model=model)
# --------------------------------------------------------------------- #
# AE Training Step -- Overridable #
# --------------------------------------------------------------------- #
@final
def do_training_step(self, batch, batch_idx):
xs, xs_targ = self._get_xs_and_targs(batch, batch_idx)
# FORWARD
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- #
# latent variables
zs = map_all(self.encode, xs)
# [HOOK] intercept latent variables
zs, logs_intercept_zs = self.hook_ae_intercept_zs(zs)
# reconstruct without the final activation
xs_partial_recon = map_all(self.decode_partial, detach_all(zs, if_=self.cfg.detach_decoder))
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- #
# LOSS
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- #
# compute all the recon losses
recon_loss, logs_recon = self.compute_ave_recon_loss(xs_partial_recon, xs_targ)
# [HOOK] augment loss
aug_loss, logs_aug = self.hook_ae_compute_ave_aug_loss(zs=zs, xs_partial_recon=xs_partial_recon, xs_targ=xs_targ)
# compute combined loss
loss = 0
if not self.cfg.disable_rec_loss: loss += recon_loss
if not self.cfg.disable_aug_loss: loss += aug_loss
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- #
# log general
self.log_dict({
**logs_intercept_zs,
**logs_recon,
**logs_aug,
})
# log progress bar
self.log_dict({
'recon_loss': float(recon_loss),
'aug_loss': float(aug_loss),
}, prog_bar=True)
# return values
return loss
# --------------------------------------------------------------------- #
# Overrideable #
# --------------------------------------------------------------------- #
def hook_ae_intercept_zs(self, zs: Sequence[torch.Tensor]) -> Tuple[Sequence[torch.Tensor], Dict[str, Any]]:
return zs, {}
def hook_ae_compute_ave_aug_loss(self, zs: Sequence[torch.Tensor], xs_partial_recon: Sequence[torch.Tensor], xs_targ: Sequence[torch.Tensor]) -> Tuple[Union[torch.Tensor, Number], Dict[str, Any]]:
return 0, {}
def compute_ave_recon_loss(self, xs_partial_recon: Sequence[torch.Tensor], xs_targ: Sequence[torch.Tensor]) -> Tuple[Union[torch.Tensor, Number], Dict[str, Any]]:
# compute reconstruction loss
pixel_loss = self.recon_handler.compute_ave_loss_from_partial(xs_partial_recon, xs_targ)
# return logs
return pixel_loss, {
'pixel_loss': pixel_loss
}
# --------------------------------------------------------------------- #
# AE Model Utility Functions (Visualisation) #
# --------------------------------------------------------------------- #
@final
def encode(self, x: torch.Tensor) -> torch.Tensor:
"""Get the deterministic latent representation (useful for visualisation)"""
return self._model.encode(x)
@final
def decode(self, z: torch.Tensor) -> torch.Tensor:
"""Decode latent vector z into reconstruction x_recon (useful for visualisation)"""
return self.recon_handler.activate(self._model.decode(z))
@final
def forward(self, batch: torch.Tensor) -> torch.Tensor:
"""Feed through the full deterministic model (useful for visualisation)"""
return self.decode(self.encode(batch))
# --------------------------------------------------------------------- #
# AE Model Utility Functions (Training) #
# --------------------------------------------------------------------- #
@final
def decode_partial(self, z: torch.Tensor) -> torch.Tensor:
"""Decode latent vector z into partial reconstructions that exclude the final activation if there is one."""
return self._model.decode(z)
# ========================================================================= #
# END #
# ========================================================================= #
|
# -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapi.admin_permission import AdminPermission
from networkapi.auth import has_perm
from networkapi.infrastructure.xml_utils import dumps_networkapi
import logging
from networkapi.rest import RestResource, UserNotAuthorizedError
from networkapi.equipamento.models import Marca, Modelo, EquipamentoError, MarcaNotFoundError
from networkapi.exception import InvalidValueError
from networkapi.util import is_valid_int_greater_zero_param
class ModelGetByBrandResource(RestResource):
log = logging.getLogger('ModelGetByBrandResource')
def handle_get(self, request, user, *args, **kwargs):
"""Treat requests GET to list all the Model by Brand.
URL: model/brand/<id_brand>/
"""
try:
self.log.info("GET to list all the Model by Brand")
# User permission
if not has_perm(user, AdminPermission.BRAND_MANAGEMENT, AdminPermission.READ_OPERATION):
self.log.error(
u'User does not have permission to perform the operation.')
raise UserNotAuthorizedError(None)
id_brand = kwargs.get('id_brand')
# Valid ID Brand
if not is_valid_int_greater_zero_param(id_brand):
self.log.error(
u'The id_brand parameter is not a valid value: %s.', id_brand)
raise InvalidValueError(None, 'id_groupl3', id_brand)
# Find Brand by ID to check if it exist
Marca.get_by_pk(id_brand)
model_list = []
for model in Modelo.get_by_brand(id_brand):
model_map = dict()
model_map['id'] = model.id
model_map['nome'] = model.nome
model_map['id_marca'] = model.marca.id
model_map['nome_marca'] = model.marca.nome
model_list.append(model_map)
return self.response(dumps_networkapi({'model': model_list}))
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
except UserNotAuthorizedError:
return self.not_authorized()
except MarcaNotFoundError:
return self.response_error(167, id_brand)
except EquipamentoError:
return self.response_error(1)
|
# https://opensource.com/article/18/6/tornado-framework
# https://stackoverflow.com/questions/6131915/web-sockets-tornado-notify-client-on-database-update
# __init__.py
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.options import define, options
from tornado.web import Application
define('port', default=9000, help='port to listen on')
def main():
"""Construct and serve the tornado application."""
app = Application()
http_server = HTTPServer(app)
http_server.listen(options.port)
print('Listening on http://localhost:%i' % options.port)
IOLoop.current().start()
|
from os import path
import subprocess
from venv import EnvBuilder
projects_dir = path.join(path.abspath(path.dirname(__name__)), 'projects')
class PipEnv:
def __init__(self, name, libs):
self.env = EnvBuilder(clear=True, with_pip=True)
if not path.isdir(f"{projects_dir}/{name}"):
self.env.create(f"{projects_dir}/{name}")
self.pip = f"{projects_dir}/{name}/bin/pip"
def do_pip(self, cmd: list, **kwargs):
subprocess.run([self.pip] + cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs)
def install(self, name, *args):
self.do_pip(["install", name] + list(args))
print("Complete")
def uninstall(self, name, *args):
self.do_pip(["uninstall", name, "-y"] + list(args))
print("Complete")
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 19:30:32 2021
"""
GREETING = {
'en' : '''Hi! Im Clockwork Fox
I\'m here to greet every new member of your groups, also I can help you with some events you may want to do with friends.
If you want to know how to do some you can use some /help
''',
'es' : '''¡Hola! Soy Clockwork Fox
Estoy acá para dar la bienvenida a los nuevos miembros de tus grupos, también puedo ayudarte con algunos eventos que quieras hacer con amigos.
Si quieres saber como hacer algunas cosas puedes usar /help
'''
}
EVENT = {
'en' : '''Alright! Here\'s the new #CLOCKWORK_EVENT\n<i>Make sure you have started me on private, else you won't receive the notification</i>\n\n''',
'es' : '''¡Bien! Acá está el nuevo #CLOCKWORK_EVENT\n<i>Asegúrate de haberme iniciado en privado, de otra forma no recibirás notificaciones</i>\n\n''',
'en_btn' : "I\'m in!",
'es_btn' : "¡Cuenta conmigo!",
'en_error' : "Sorry, there\'s an event running",
'es_error' : "Lo siento, ya hay un evento en marcha"
}
LANG = {
'en' : "Alright, how sould I speak?",
'es' : "Bien, ¿cómo debería hablar?"
}
HELP = {
'en' : '''Here\'s some help
<b>/new_event <i>"event text"</i></b> - I'll start a new event, there's only allowed one event at time.
<b>/new_raffle <i>"texto del sorteo" "número de participantes"</i></b> - I\'ll start a raffle.
<b>NOTE</b>: if you want to cancel any of this you can use the <b><i>/cancel_event</i>/b> or <b><i>/cancel_raffle</i>/b>''',
#With the command /language you\'ll be able to change my language''',
'es' : '''Acá un poco de ayuda
<b>/new_event <i>"texto del evento"</i></b> - Iniciaré un evento, solo se permite un evento por grupo a la vez.
<b>/new_raffle <i>"texto del sorteo" "número de participantes"</i></b> - Iniciaré un sorteo.
<b>NOTA</b>: si quieres cancelar alguno de estos puedes usar el comando <b><i>/cancel_event</i></b> o <b><i>/cancel_raffle</i></b>'''
#Con el comando /language podrás cambiar mi idioma'''
}
RAFFLE = {
'en' : '''\n#CLOCKWORK_RAFFLE Press the button below to participate!''',
'es' : '''\n#CLOCKWORK_RAFFLE ¡Presiona el botón de abajo para participar!'''
}
NOTIFICATION = {
'en' : '''There is a <b>new</b> participant on the raffle, we have now: ''',
'es' : '''Hay un <b>nuevo</b> participante en el sorteo, tenemos ahora: '''
}
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import lmfit
import scipy.stats
import scipy.optimize
minimize = lmfit.minimize
from fitter import Fitter
# To use different defaults, change these three import statements.
from kid_readout.analysis.khalil import delayed_generic_s21 as default_model
from kid_readout.analysis.khalil import delayed_generic_guess as default_guess
from kid_readout.analysis.khalil import generic_functions as default_functions
from kid_readout.analysis.khalil import bifurcation_s21, bifurcation_guess
def fit_resonator(freq, s21, mask= None, errors=None, weight_by_errors=True, min_a = 0.08, fstat_thresh = 0.999,
delay_estimate = None, verbose=False):
if delay_estimate is not None:
def my_default_guess(f,data):
params = default_guess(f,data)
params['delay'].value = delay_estimate
return params
else:
my_default_guess = default_guess
rr = Resonator(freq, s21, mask=mask, errors=errors, weight_by_errors=weight_by_errors,guess=my_default_guess)
if delay_estimate is not None:
def my_bifurcation_guess(f,data):
params = bifurcation_guess(f,data)
params['delay'].value = delay_estimate
return params
else:
my_bifurcation_guess = bifurcation_guess
bif = Resonator(freq, s21, mask=mask, errors=errors, weight_by_errors=weight_by_errors,
guess = my_bifurcation_guess, model = bifurcation_s21)
fval = scipy.stats.f_value(np.sum(np.abs(rr.residual())**2),
np.sum(np.abs(bif.residual())**2),
rr.result.nfree, bif.result.nfree)
fstat = scipy.stats.distributions.f.cdf(fval,rr.result.nfree,bif.result.nfree)
aval = bif.result.params['a'].value
aerr = bif.result.params['a'].stderr
reasons = []
if aval <= aerr:
prefer_bif = False
reasons.append("Error on bifurcation parameter exceeds fitted value")
else:
if aval < min_a:
prefer_bif = False
reasons.append("Bifurcation parameter %f is less than minimum required %f" % (aval,min_a))
else:
#not sure this is working right, so leave it out for now.
if False:#fstat < fstat_thresh:
prefer_bif = False
reasons.append("F-statistic %f is less than threshold %f" % (fstat,fstat_thresh))
else:
prefer_bif = True
if verbose and not prefer_bif:
print "Not using bifurcation model because:",(','.join(reasons))
return rr,bif,prefer_bif
def fit_best_resonator(*args,**kwargs):
rr,bif,prefer_bif = fit_resonator(*args,**kwargs)
return (rr,bif)[prefer_bif]
class Resonator(Fitter):
"""
This class represents a single resonator. All of the
model-dependent behavior is contained in functions that are
supplied to the class. There is a little bit of Python magic that
allows for easy access to the fit parameters and functions of only
the fit parameters.
The idea is that, given sweep data f and s21,
r = Resonator(f, s21)
should just work. Modify the import statements to change the
defaults.
"""
def __init__(self, f, data, model=default_model, guess=default_guess, functions=default_functions,
mask=None, errors=None, weight_by_errors=True):
"""
Instantiate a resonator using our current best model.
Parameter model is a function S_21(params, f) that returns the
modeled values of S_21.
Parameter guess is a function guess(f, data) that returns a
good-enough initial guess at all of the fit parameters.
Parameter functions is a dictionary that maps keys that are
valid Python variables to functions that take a Parameters
object as their only argument.
Parameter mask is a boolean array of the same length as f and
data; only points f[mask] and data[mask] are used to fit the
data. The default is to use all data. Use this to exclude
glitches or resonances other than the desired one.
"""
if not np.iscomplexobj(data):
raise TypeError("Resonator data should always be complex, but got real values")
if errors is not None:
if not np.iscomplexobj(errors):
errors = errors*(1+1j) # ensure errors is complex
super(Resonator,self).__init__(f,data,model=model,guess=guess,functions=functions,mask=mask,
errors=errors,weight_by_errors=weight_by_errors)
if self.x_data.max() < 1e6:
self.freq_units_MHz = True
else:
self.freq_units_MHz = False
self.freq_data = self.x_data
self.s21_data = self.y_data
def get_normalization(self, freq, remove_amplitude = True, remove_delay = True, remove_phase = True):
"""
return the complex factor that removes the arbitrary amplitude, cable delay, and phase from the resonator fit
freq : float or array of floats
frequency in same units as the model was built with, at which normalization should be computed
remove_amplitude : bool, default True
include arbitrary amplitude correction
remove_delay : bool, default True
include cable delay correction
remove_phase : bool, default True
include arbitrary phase offset correction
"""
normalization = 1.0
if remove_amplitude:
normalization *= 1.0/self.A_mag
if remove_phase:
phi = self.phi + self.A_phase
else:
phi = 0
if remove_delay:
delay = self.delay
else:
delay = 0
normalization *= np.exp(1j*(2*np.pi*(freq-self.f_phi)*delay - phi))
return normalization
def normalize(self, freq, s21_raw, remove_amplitude = True, remove_delay = True, remove_phase = True):
"""
Normalize s21 data, removing arbitrary ampltude, delay, and phase terms
freq : float or array of floats
frequency in same units as the model was built with, at which normalization should be computed
s21_raw : complex or array of complex
raw s21 data which should be normalized
"""
normalization = self.get_normalization(freq, remove_amplitude=remove_amplitude, remove_delay=remove_delay,
remove_phase= remove_phase)
return s21_raw*normalization
def normalized_model(self,freq,remove_amplitude = True, remove_delay = True, remove_phase = True):
"""
Evaluate the model, removing arbitrary ampltude, delay, and phase terms
freq : float or array of floats
frequency in same units as the model was built with, at which normalized model should be evaluated
"""
return self.normalize(freq, self.model(x=freq),remove_amplitude=remove_amplitude, remove_delay=remove_delay,
remove_phase= remove_phase)
def approx_normalized_gradient(self,freq):
"""
Calculate the approximate gradient of the normalized model dS21/df at the given frequency.
The units will be S21 / Hz
freq : float or array of floats
frequency in same units as the model was built with, at which normalized gradient should be evaluated
"""
if self.freq_units_MHz:
df = 1e-6 # 1 Hz
else:
df = 1.0
f1 = freq+df
y = self.normalized_model(freq)
y1 = self.normalized_model(f1)
gradient = y1-y # division by 1 Hz is implied.
return gradient
def project_s21_to_delta_freq(self,freq,s21,use_data_mean=True,s21_already_normalized=False):
"""
Project s21 data onto the orthogonal vectors tangent and perpendicular to the resonance circle at the
measurement frequency
This results in complex data with the real part projected along the frequency direction (in Hz) and the
imaginary part projected along the dissipation direction (also in pseudo-Hz).
freq : float
frequency in same units as the model was built with, at which the S21 data was measured.
s21 : complex or array of complex
Raw S21 data measured at the indicated frequency
use_data_mean : bool, default True
if true, center the data on the mean of the data before projecting.
if false, center the data on the value of the model evaluated at the measurement frequency.
s21_already_normalized : bool, default False
if true, the s21 data has already been normalized
if false, first normalize the s21 data
"""
if s21_already_normalized:
normalized_s21 = s21
else:
normalized_s21 = self.normalize(freq,s21)
if use_data_mean:
mean_ = normalized_s21.mean()
else:
mean_ = self.normalized_model(freq)
gradient = self.approx_normalized_gradient(freq)
delta_freq = (normalized_s21-mean_)/gradient
return delta_freq
def convert_s21_to_freq_fluctuation(self,freq,s21):
"""
Use formula in Phil's LTD paper to convert S21 data to frequency fluctuations.
The result of this is the same as Re(S21/(dS21/df)), so the same as self.project_s21_to_delta_freq().real
freq : float
frequency in same units as the model was built with, at which the S21 data was measured.
s21 : complex or array of complex
Raw S21 data measured at the indicated frequency
"""
normalized_s21 = self.normalize(freq,s21)
gradient = self.approx_normalized_gradient(freq)
# using notation from Phil's LTD paper
I = normalized_s21.real
Q = normalized_s21.imag
dIdf = gradient.real
dQdf = gradient.imag
ef = (I*dIdf + Q*dQdf)/(dIdf**2 + dQdf**2)
return ef
|
import tweepy
from keys import consumer_key, consumer_secret, access_token, access_secret
from scrape import auth, api, get_tweets
from create import markov, dict_maker
auth.set_access_token(access_token, access_secret)
#Dictionaries for specific accounts are created here
playstation = dict_maker("PlayStation")
lil_nas_x = dict_maker("LilNasX")
mark_hamill = dict_maker("HamillHimself")
|
import logging
from typing import Optional
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT
from homeassistant.const import (
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_POWER_FACTOR,
DEVICE_CLASS_TIMESTAMP,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
#from homeassistant.components.sensor import (
# STATE_CLASS_MEASUREMENT,
# STATE_CLASS_TOTAL_INCREASING
#)
# For now, let's not force the newer version, we'll use the same constants
# but it'll be optional.
# TODO: Force the usage of new HA
STATE_CLASS_MEASUREMENT = "measurement"
STATE_CLASS_TOTAL_INCREASING = 'total_increasing'
from homeassistant.helpers.entity import Entity
from gehomesdk import ErdCode, ErdCodeType, ErdCodeClass, ErdMeasurementUnits
from .ge_erd_entity import GeErdEntity
from ...devices import ApplianceApi
_LOGGER = logging.getLogger(__name__)
class GeErdSensor(GeErdEntity, Entity):
"""GE Entity for sensors"""
def __init__(
self,
api: ApplianceApi,
erd_code: ErdCodeType,
erd_override: str = None,
icon_override: str = None,
device_class_override: str = None,
state_class_override: str = None,
uom_override: str = None,
):
super().__init__(api, erd_code, erd_override, icon_override, device_class_override)
self._uom_override = uom_override
self._state_class_override = state_class_override
@property
def state(self) -> Optional[str]:
try:
value = self.appliance.get_erd_value(self.erd_code)
except KeyError:
return None
# TODO: perhaps enhance so that there's a list of variables available
# for the stringify function to consume...
return self._stringify(value, temp_units=self._temp_units)
@property
def unit_of_measurement(self) -> Optional[str]:
return self._get_uom()
@property
def state_class(self) -> Optional[str]:
return self._get_state_class()
@property
def _temp_units(self) -> Optional[str]:
if self._measurement_system == ErdMeasurementUnits.METRIC:
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
def _get_uom(self):
"""Select appropriate units"""
#if we have an override, just use it
if self._uom_override:
return self._uom_override
if (
self.erd_code_class
in [ErdCodeClass.RAW_TEMPERATURE, ErdCodeClass.NON_ZERO_TEMPERATURE]
or self.device_class == DEVICE_CLASS_TEMPERATURE
):
return self._temp_units
if (
self.erd_code_class == ErdCodeClass.BATTERY
or self.device_class == DEVICE_CLASS_BATTERY
):
return "%"
if self.erd_code_class == ErdCodeClass.PERCENTAGE:
return "%"
if self.device_class == DEVICE_CLASS_POWER_FACTOR:
return "%"
if self.erd_code_class == ErdCodeClass.FLOW_RATE:
if self._measurement_system == ErdMeasurementUnits.METRIC:
return "lpm"
return "gpm"
if self.erd_code_class == ErdCodeClass.LIQUID_VOLUME:
if self._measurement_system == ErdMeasurementUnits.METRIC:
return "l"
return "g"
return None
def _get_device_class(self) -> Optional[str]:
if self._device_class_override:
return self._device_class_override
if self.erd_code_class in [
ErdCodeClass.RAW_TEMPERATURE,
ErdCodeClass.NON_ZERO_TEMPERATURE,
]:
return DEVICE_CLASS_TEMPERATURE
if self.erd_code_class == ErdCodeClass.BATTERY:
return DEVICE_CLASS_BATTERY
if self.erd_code_class == ErdCodeClass.POWER:
return DEVICE_CLASS_POWER
if self.erd_code_class == ErdCodeClass.ENERGY:
return DEVICE_CLASS_ENERGY
return None
def _get_state_class(self) -> Optional[str]:
if self._state_class_override:
return self._state_class_override
if self.device_class in [DEVICE_CLASS_TEMPERATURE, DEVICE_CLASS_ENERGY]:
return STATE_CLASS_MEASUREMENT
if self.erd_code_class in [ErdCodeClass.FLOW_RATE, ErdCodeClass.PERCENTAGE]:
return STATE_CLASS_MEASUREMENT
if self.erd_code_class in [ErdCodeClass.LIQUID_VOLUME]:
return STATE_CLASS_TOTAL_INCREASING
return None
def _get_icon(self):
if self.erd_code_class == ErdCodeClass.DOOR:
if self.state.lower().endswith("open"):
return "mdi:door-open"
if self.state.lower().endswith("closed"):
return "mdi:door-closed"
return super()._get_icon()
async def set_value(self, value):
"""Sets the ERD value, assumes that the data type is correct"""
try:
await self.appliance.async_set_erd_value(self.erd_code, value)
except:
_LOGGER.warning(f"Could not set {self.name} to {value}")
|
import pytest
import sdk_cmd
import sdk_install
import sdk_hosts
import sdk_plan
import sdk_utils
import retrying
from security import transport_encryption
from tests import config
DEFAULT_JOURNAL_NODE_TLS_PORT = 8481
DEFAULT_NAME_NODE_TLS_PORT = 9003
DEFAULT_DATA_NODE_TLS_PORT = 9006
@pytest.fixture(scope='module')
def service_account(configure_security):
"""
Sets up a service account for use with TLS.
"""
try:
name = config.SERVICE_NAME
service_account_info = transport_encryption.setup_service_account(name)
yield service_account_info
finally:
transport_encryption.cleanup_service_account(config.SERVICE_NAME,
service_account_info)
@pytest.fixture(scope='module')
def hdfs_service_tls(service_account):
try:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
sdk_install.install(
config.PACKAGE_NAME,
service_name=config.SERVICE_NAME,
expected_running_tasks=config.DEFAULT_TASK_COUNT,
additional_options={
"service": {
"service_account": service_account["name"],
"service_account_secret": service_account["secret"],
"security": {
"transport_encryption": {
"enabled": True
}
}
}
},
timeout_seconds=30 * 60)
sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME)
yield service_account
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.tls
@pytest.mark.sanity
@pytest.mark.dcos_min_version('1.10')
@sdk_utils.dcos_ee_only
def test_healthy(hdfs_service_tls):
config.check_healthy(service_name=config.SERVICE_NAME)
@pytest.mark.tls
@pytest.mark.sanity
@pytest.mark.data_integrity
@pytest.mark.dcos_min_version('1.10')
@sdk_utils.dcos_ee_only
def test_write_and_read_data_over_tls(hdfs_service_tls):
test_filename = "test_data_tls" # must be unique among tests in this suite
config.write_data_to_hdfs(config.SERVICE_NAME, test_filename)
config.read_data_from_hdfs(config.SERVICE_NAME, test_filename)
@pytest.mark.tls
@pytest.mark.sanity
@pytest.mark.dcos_min_version('1.10')
@sdk_utils.dcos_ee_only
@pytest.mark.parametrize("node_type,port", [
('journal', DEFAULT_JOURNAL_NODE_TLS_PORT),
('name', DEFAULT_NAME_NODE_TLS_PORT),
('data', DEFAULT_DATA_NODE_TLS_PORT),
])
def test_verify_https_ports(node_type, port, hdfs_service_tls):
"""
Verify that HTTPS port is open name, journal and data node types.
"""
host = sdk_hosts.autoip_host(
config.SERVICE_NAME, "{}-0-node".format(node_type), port)
@retrying.retry(
wait_fixed=1000,
stop_max_delay=config.DEFAULT_HDFS_TIMEOUT*1000,
retry_on_result=lambda res: not res)
def fn():
exit_status, output = sdk_cmd.master_ssh(_curl_https_get_code(host))
return exit_status and output == '200'
assert fn()
def _curl_https_get_code(host):
"""
Create a curl command for a given host that outputs HTTP status code.
"""
return (
'/opt/mesosphere/bin/curl '
'-s -o /dev/null -w "%{{http_code}}" '
'https://{host}'
).format(host=host)
|
import numpy as np
import pandas as pd
import os
import argparse
from utils.data_utils import *
from tqdm import tqdm
import itertools
if __name__ == "__main__":
"""
Generate dataframe where each row represents patient admission
"""
parser = argparse.ArgumentParser(description="Process Mimic-iii CSV Files")
parser.add_argument(
"-p", "--path", default=None, type=str, help="path to mimic-iii csvs"
)
parser.add_argument(
"-s", "--save", default=None, type=str, help="path to dump output"
)
parser.add_argument(
"-min-adm",
"--min_admission",
default=1,
type=int,
help="minimum number of admissions for each patient",
)
args = parser.parse_args()
# format date time
df_adm = pd.read_csv(os.path.join(args.path, "ADMISSIONS.csv"))
df_adm.ADMITTIME = pd.to_datetime(
df_adm.ADMITTIME, format="%Y-%m-%d %H:%M:%S", errors="coerce"
)
df_adm.DISCHTIME = pd.to_datetime(
df_adm.DISCHTIME, format="%Y-%m-%d %H:%M:%S", errors="coerce"
)
df_adm.DEATHTIME = pd.to_datetime(
df_adm.DEATHTIME, format="%Y-%m-%d %H:%M:%S", errors="coerce"
)
df_adm = df_adm.sort_values(["SUBJECT_ID", "ADMITTIME"])
df_adm = df_adm.reset_index(drop=True)
# one task in the paper is to predict re-admission within 30 days
df_adm["NEXT_ADMITTIME"] = df_adm.groupby("SUBJECT_ID").ADMITTIME.shift(periods=-1)
df_adm["NEXT_ADMISSION_TYPE"] = df_adm.groupby("SUBJECT_ID").ADMISSION_TYPE.shift(
periods=-1
)
rows = df_adm.NEXT_ADMISSION_TYPE == "ELECTIVE"
df_adm.loc[rows, "NEXT_ADMITTIME"] = pd.NaT
df_adm.loc[rows, "NEXT_ADMISSION_TYPE"] = np.NaN
df_adm = df_adm.sort_values(["SUBJECT_ID", "ADMITTIME"])
# When we filter out the "ELECTIVE",
# we need to correct the next admit time
# for these admissions since there might
# be 'emergency' next admit after "ELECTIVE"
df_adm[["NEXT_ADMITTIME", "NEXT_ADMISSION_TYPE"]] = df_adm.groupby(["SUBJECT_ID"])[
["NEXT_ADMITTIME", "NEXT_ADMISSION_TYPE"]
].fillna(method="bfill")
df_adm["DAYS_NEXT_ADMIT"] = (
df_adm.NEXT_ADMITTIME - df_adm.DISCHTIME
).dt.total_seconds() / (24 * 60 * 60)
df_adm["readmission_label"] = (df_adm.DAYS_NEXT_ADMIT < 30).astype("int")
### filter out newborn and death
df_adm = df_adm[df_adm["ADMISSION_TYPE"] != "NEWBORN"]
df_adm["DURATION"] = (
df_adm["DISCHTIME"] - df_adm["ADMITTIME"]
).dt.total_seconds() / (24 * 60 * 60)
# remove patients with admissions < min_adm
if args.min_admission > 1:
df_adm = remove_min_admissions(df_adm, min_admits=args.min_admission)
# Adding clinical codes to dataset
# add diagnoses
code = "ICD9_CODE"
diagnoses = read_icd_diagnoses_table(args.path)
diagnoses = filter_codes(diagnoses, code=code, min_=10)
# add procedures
procedures = read_icd_procedures_table(args.path)
procedures = filter_codes(procedures, code=code, min_=10)
with open("vocab/icd-map.pkl", "rb") as f:
dic_icd = pickle.load(f)
with open("vocab/proc-map.pkl", "rb") as f:
dic_proc = pickle.load(f)
diagnoses['ICD9_SHORT'] = diagnoses['ICD9_CODE'].apply(lambda x: dic_icd[x])
# adding a constant to procedure code mapping to avoid conflicts
mapping_shift = max(dic_icd.values())
procedures['ICD9_CODE'] = procedures['ICD9_CODE'].astype(str)
procedures['PROC_SHORT'] = procedures['ICD9_CODE'].apply(lambda x: dic_proc[x]+mapping_shift
if x in dic_proc.keys() else None)
procedures.dropna(inplace=True)
diagnoses = group_by_return_col_list(
diagnoses, ["SUBJECT_ID", "HADM_ID"], 'ICD9_SHORT')
procedures = group_by_return_col_list(
procedures, ["SUBJECT_ID", "HADM_ID"], 'PROC_SHORT'
)
# ICU info
patients = read_patients_table(args.path)
stays = read_icustays_table(args.path)
stays = stays.merge(patients, how='inner', left_on=['SUBJECT_ID'], right_on=["SUBJECT_ID"])
cols = ["SUBJECT_ID", "HADM_ID"]
stays = stays.merge(diagnoses, how="inner", left_on=cols, right_on=cols)
stays = stays.merge(procedures, how="inner", left_on=cols, right_on=cols)
stays = add_age_to_icustays(stays)
df_adm = pd.merge(
df_adm, stays, on=["SUBJECT_ID", "HADM_ID"], how="inner"
)
df_adm["ADMITTIME_C"] = df_adm.ADMITTIME.apply(
lambda x: str(x).split(" ")[0]
)
df_adm["ADMITTIME_C"] = pd.to_datetime(
df_adm.ADMITTIME_C, format="%Y-%m-%d", errors="coerce"
)
df_adm = compute_time_delta(df_adm)
# only retain the first row for each HADM ID
df = df_adm.groupby("HADM_ID").first()
# remove organ donor admissions
if "DIAGNOSIS" in df.columns:
REMOVE_DIAGNOSIS = ~(
(df["DIAGNOSIS"] == "ORGAN DONOR ACCOUNT")
| (df["DIAGNOSIS"] == "ORGAN DONOR")
| (df["DIAGNOSIS"] == "DONOR ACCOUNT")
)
df = df[REMOVE_DIAGNOSIS]
# begin demographic info processing
demographic_cols = {
"AGE": [],
"GENDER": [],
"LAST_CAREUNIT": [],
"MARITAL_STATUS": [],
"ETHNICITY": [],
"DISCHARGE_LOCATION": [],
}
df["MARITAL_STATUS"], demographic_cols["MARITAL_STATUS"] = pd.factorize(
df["MARITAL_STATUS"]
)
df["ETHNICITY"], demographic_cols["ETHNICITY"] = pd.factorize(df["ETHNICITY"])
df["DISCHARGE_LOCATION"], demographic_cols["DISCHARGE_LOCATION"] = pd.factorize(
df["DISCHARGE_LOCATION"]
)
df["LAST_CAREUNIT"], demographic_cols["LAST_CAREUNIT"] = pd.factorize(
df["LAST_CAREUNIT"]
)
df["GENDER"], demographic_cols["GENDER"] = pd.factorize(df["GENDER"])
los_bins = [1, 2, 3, 4, 5, 6, 7, 8, 14, float("inf")]
los_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# LOS: Length of stay
df["LOS"] = pd.cut(df["LOS"], bins=los_bins, labels=los_labels)
data = {}
pids = list(set(df["SUBJECT_ID"]))
for i, pid in enumerate(tqdm(pids)):
pid_df = df[df["SUBJECT_ID"] == pid]
pid_df = pid_df.sort_values("ADMITTIME").reset_index()
# change key from subject ID to the integer number
data[i] = []
time = 0
tmp = pid_df.iloc[0]
# first part of admit data, which is same regardless of visits
admit_data = {}
# one-hot encoding for MARITAL, LAST_CAREUNIT and ETHNICITY
demographics = [tmp["AGE"], tmp["GENDER"]]
marital_status = np.zeros(
(demographic_cols["MARITAL_STATUS"].size,), dtype=int
)
marital_status[tmp["MARITAL_STATUS"]] = 1
demographics += list(marital_status)
icu_unit = np.zeros(
(demographic_cols["LAST_CAREUNIT"].size,), dtype=int
)
icu_unit[tmp["LAST_CAREUNIT"]] = 1
demographics += list(icu_unit)
ethnicity = np.zeros((demographic_cols["ETHNICITY"].size,), dtype=int)
ethnicity[tmp["ETHNICITY"]] = 1
demographics += list(ethnicity)
admit_data["demographics"] = demographics
admit_data["readmission"] = tmp["readmission_label"]
admit_data["mortality"] = tmp["DEATHTIME"] == tmp["DEATHTIME"]
data[i].append(admit_data)
for _, r in pid_df.iterrows():
admit_data = {}
# gather the medical codes for each visit
admit_data["diagnoses"] = r['ICD9_SHORT']
admit_data["procedures"] = r['PROC_SHORT']
admit_data["los"] = r["LOS"]
time += r["TIMEDELTA"]
admit_data["timedelta"] = time
data[i].append(admit_data)
pids = list(data.keys())
data_info = {}
data_info["num_patients"] = len(pids)
num_icd9_codes = max(dic_icd.values()) + 1 # mapping start with 0
num_proc_codes = max(dic_proc.values()) + 1
data_info["num_icd9_codes"] = num_icd9_codes
data_info["num_proc_codes"] = num_proc_codes
data_info["num_med_codes"] = 0
data_info["num_cpt_codes"] = 0
data_info["demographics_shape"] = len(data[pids[0]][0]["demographics"])
data_info["demographic_cols"] = demographic_cols
if not os.path.isdir(args.save):
os.makedirs(args.save)
with open(os.path.join(args.save, "data_no_grouped.pkl"), "wb") as handle:
data_dict = {}
data_dict["info"] = data_info
data_dict["data"] = data
pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
import sys
import paho.mqtt.subscribe as subscribe
broker = sys.argv[1]
topic = sys.argv[2]
def callback(client, userdata, message):
print("message received ", str(message.payload.decode("utf-8")))
try:
subscribe.callback(callback, topic,
hostname=broker).loop_forever()
except KeyboardInterrupt:
exit(0)
|
import numpy as np
import pandas as pd
import cv2
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
class DataLoader:
def __init__ (self, train_dataframe, validation_dataframe, image_dir):
self.train_dataframe =train_dataframe
self.validation_dataframe = validation_dataframe
self.image_dir = image_dir
return
def CreateImageGenerator(self):
#Normalize the image and perform angle rotation as a data augmentation techinique
self.train_image_generator = ImageDataGenerator(
rescale = 1.0/255.0,
rotation_range= 30,
fill_mode = 'nearest'
)
self.validation_image_generator = ImageDataGenerator(
rescale = 1.0/255.0
)
def CreateDataLoader(self):
self.train_data_loader = self.train_image_generator.flow_from_dataframe(
dataframe=self.train_dataframe,
directory = self.image_dir,
x_col = "filename",
y_col = "finding",
class_mode="binary",
batch_size=2, #images per batch
shuffle=True,
target_size= (416,416)
)
self.validation_data_loader = self.train_image_generator.flow_from_dataframe(
dataframe=self.train_dataframe,
directory = self.image_dir,
x_col = "filename",
y_col = "finding",
class_mode="binary",
batch_size=1, #images per batch
shuffle=True,
target_size= (416,416)
)
return
if __name__ == '__main__':
pass
|
import re
import nltk
from nltk.stem.porter import PorterStemmer
def data_cleaning(text):
# Remove symbols and punctuations & apply lower() to string
formatted_text = re.sub(r"[^\w\s]", " ", text).lower()
# Remove stopwords
stopwords = set(nltk.corpus.stopwords.words('english'))
words = [i for i in formatted_text.split() if not i in stopwords]
# Stemming tokens
word_stem = [PorterStemmer().stem(word) for word in words]
return " ".join(word_stem)
|
import traceback
from io import BytesIO
from PIL import Image
from ocr import ocr
import numpy as np
import json
import os
def validBegin(s):
return s.startswith("62") or s.startswith("60") or s.startswith("34") or s.startswith("35") or s.startswith("37") or s.startswith("51") or s.startswith("52") or s.startswith("53") or s.startswith("54") or s.startswith("55") or s.startswith("9") or s.startswith("43") or s.startswith("48") or s.startswith("42")
def filterLong(results):
res = ""
for cs in results:
trimed = "".join([c for c in cs if c.isdigit()])
l = len(trimed) # 16-19
if len(trimed) > len(res):
res = trimed
return res
def filter(results):
res = ""
for cs in results:
trimed = "".join([c for c in cs if c.isdigit()])
l = len(trimed) # 16-19
if l < 16 or l > 19:
continue
while trimed != "" and not validBegin(trimed):
trimed = trimed[1:]
if len(trimed) == 0:
continue
if len(trimed) >= 19:
trimed = trimed[0:19]
elif len(trimed) >= 16:
pass
else:
continue
if len(trimed) > len(res):
res = trimed
return res if res != "" else filterLong(results)
def get_result(img):
if os.getenv("IN_DEMO"):
return "00000000000000000"
image = np.array(img.convert('RGB'))
result, _ = ocr(image)
return filter(map(lambda x: x[1], result.values()))
|
PHASES = ["build", "test"]
CUDA_VERSIONS = [
None, # cpu build
"92",
"101",
"102",
]
STANDARD_PYTHON_VERSIONS = [
"3.6",
"3.7",
"3.8"
]
|
from dataclasses import dataclass
from bindings.gmd.dq_conceptual_consistency_type import DqConceptualConsistencyType
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
class DqConceptualConsistency(DqConceptualConsistencyType):
class Meta:
name = "DQ_ConceptualConsistency"
namespace = "http://www.isotc211.org/2005/gmd"
|
#!/usr/bin/env python
"""Geoname Annotator"""
from __future__ import absolute_import
import math
import re
import sqlite3
from collections import defaultdict
from .annotator import Annotator, AnnoTier, AnnoSpan
from .ngram_annotator import NgramAnnotator
from .ne_annotator import NEAnnotator
from geopy.distance import great_circle
from .maximum_weight_interval_set import Interval, find_maximum_weight_interval_set
from .get_database_connection import get_database_connection
from . import geoname_classifier
import logging
from six.moves import zip
logging.basicConfig(level=logging.ERROR, format='%(asctime)s %(message)s')
logger = logging.getLogger(__name__)
blocklist = set([
'January', 'February', 'March', 'April', 'May', 'June', 'July',
'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday',
'August', 'September', 'October', 'November', 'December',
'North', 'East', 'West', 'South',
'Northeast', 'Southeast', 'Northwest', 'Southwest',
'Eastern', 'Western', 'Southern', 'Northern',
'About', 'Many', 'See', 'Also', 'As', 'In', 'About', 'Health', 'Some',
'International', 'City', 'World', 'Federal', 'Federal District', 'The city',
'British', 'Russian',
'Valley', 'University', 'Center', 'Central',
# These locations could be legitimate,
# but they are rarely referred to in a context
# where its location is relevent.
'National Institutes of Health',
'Centers for Disease Control',
'Ministry of Health and Sanitation',
'1',
])
# Containment levels indicate which properties must match when determing
# whether a geoname of a given containment level contains another geoname.
# The admin codes generally correspond to states, provinces and cities.
CONTAINMENT_LEVELS = [
'country_code',
'admin1_code',
'admin2_code',
'admin3_code',
'admin4_code'
]
def location_contains(loc_outer, loc_inner):
"""
Do a comparison to see if the first geoname contains the second.
It returns an integer to indicate the level of containment.
0 indicates no containment. Siblings locations and identical locations
have 0 containment. The level of containment is determined by the specificty
of the outer location. e.g. USA would be a smaller number than Texas.
In order for containment to be detected the outer location must have a
ADM* or PCL* feature code, which is most countries, states, and districts.
"""
# Test the country code in advance for efficiency. The country code must match for
# any level of containment.
if loc_outer.country_code != loc_inner.country_code or loc_outer.country_code == '':
return 0
feature_code = loc_outer.feature_code
if feature_code == 'ADM1':
outer_feature_level = 2
elif feature_code == 'ADM2':
outer_feature_level = 3
elif feature_code == 'ADM3':
outer_feature_level = 4
elif feature_code == 'ADM4':
outer_feature_level = 5
elif re.match("^PCL.", feature_code):
outer_feature_level = 1
else:
return 0
for prop in CONTAINMENT_LEVELS[1:outer_feature_level]:
if loc_outer[prop] == '':
return 0
if loc_outer[prop] != loc_inner[prop]:
return 0
if loc_outer.geonameid == loc_inner.geonameid:
return 0
return outer_feature_level
class GeoSpan(AnnoSpan):
def __init__(self, start, end, doc, geoname):
super(GeoSpan, self).__init__(
start,
end,
doc,
metadata={
'geoname': geoname
})
self.geoname = geoname
self.label = geoname.name
def to_dict(self):
result = super(GeoSpan, self).to_dict()
result['geoname'] = self.geoname.to_dict()
return result
GEONAME_ATTRS = [
'geonameid',
'name',
'feature_code',
'country_code',
'admin1_code',
'admin2_code',
'admin3_code',
'admin4_code',
'longitude',
'latitude',
'population',
'asciiname',
'names_used',
'name_count']
ADMINNAME_ATTRS = [
'country_name',
'admin1_name',
'admin2_name',
'admin3_name']
class GeonameRow(object):
__slots__ = GEONAME_ATTRS + ADMINNAME_ATTRS + [
'alternate_locations',
'spans',
'parents',
'score',
'lat_long',
'high_confidence']
def __init__(self, sqlite3_row):
for key in sqlite3_row.keys():
if key in GEONAME_ATTRS:
setattr(self, key, sqlite3_row[key])
self.lat_long = (self.latitude, self.longitude,)
self.alternate_locations = set()
self.spans = set()
self.parents = set()
self.score = None
def add_spans(self, span_text_to_spans):
for name in self.names_used.split(';'):
for span in span_text_to_spans[name.lower().strip()]:
self.spans.add(span)
def __hash__(self):
return id(self)
def __repr__(self):
return self.name
def __getitem__(self, key):
return getattr(self, key)
def to_dict(self):
result = {}
for key in GEONAME_ATTRS:
result[key] = self[key]
for key in ADMINNAME_ATTRS:
if hasattr(self, key):
result[key] = self[key]
result['parents'] = [p.to_dict() for p in self.parents]
result['score'] = self.score
return result
class GeonameFeatures(object):
"""
This represents the aspects of a condidate geoname that are used to
determine whether it is being referenced.
"""
# The feature name array is used to maintain the order of the
# values in the feature vector.
feature_names = [
'log_population',
'name_count',
'num_spans',
'max_span_length',
'cannonical_name_used',
'loc_NE_portion',
'other_NE_portion',
'noun_portion',
'other_pos_portion',
'num_tokens',
'ambiguity',
'PPL_feature_code',
'ADM_feature_code',
'CONT_feature_code',
'other_feature_code',
'combined_span_parents',
# contextual features
'close_locations',
'very_close_locations',
'containing_locations',
'max_containment_level',
# high_confidence indicates the base feature set received a high score.
# It is an useful feature for preventing high confidence geonames
# from receiving low final scores when they lack contextual cues -
# for example, when they are the only location mentioned.
'high_confidence',
]
def __init__(self, geoname, spans_to_nes, span_to_tokens):
self.geoname = geoname
# The set of geonames that are mentioned in proximity to the spans
# corresponding to this feature.
# This will be populated by the add_contextual_features function.
self.nearby_mentions = set()
d = {}
d['log_population'] = math.log(geoname.population + 1)
# Geonames with lots of alternate names
# tend to be the ones most commonly referred to.
d['name_count'] = geoname.name_count
d['num_spans'] = len(geoname.spans)
d['max_span_length'] = max([
len(span.text) for span in geoname.spans])
def cannonical_name_match(span, geoname):
first_leaf = next(span.iterate_leaf_base_spans(), None)
if first_leaf:
span_text = first_leaf.text
else:
span_text = span.text
span_in_name = span_text in geoname.name or span_text in geoname.asciiname
return (float(len(span_text)) if span_in_name else 0) / len(geoname.name)
d['cannonical_name_used'] = max([
cannonical_name_match(span, geoname)
for span in geoname.spans
])
loc_NEs_overlap = 0
other_NEs_overlap = 0
total_spans = len(geoname.spans)
for span in geoname.spans:
for ne_span in spans_to_nes[span]:
if ne_span.label == 'GPE' or ne_span.label == 'LOC':
loc_NEs_overlap += 1
else:
other_NEs_overlap += 1
d['loc_NE_portion'] = float(loc_NEs_overlap) / total_spans
d['other_NE_portion'] = float(other_NEs_overlap) / total_spans
noun_pos_tags = 0
other_pos_tags = 0
pos_tags = 0
for span in geoname.spans:
for token_span in span_to_tokens[span]:
token = token_span.token
pos_tags += 1
if token.tag_.startswith("NN") or token.tag_ == "FW":
noun_pos_tags += 1
else:
other_pos_tags += 1
d['combined_span_parents'] = len(geoname.parents)
d['noun_portion'] = float(noun_pos_tags) / pos_tags
d['other_pos_portion'] = float(other_pos_tags) / pos_tags
d['num_tokens'] = pos_tags
d['ambiguity'] = len(geoname.alternate_locations)
feature_code = geoname.feature_code
if feature_code.startswith('PPL'):
d['PPL_feature_code'] = 1
elif feature_code.startswith('ADM'):
d['ADM_feature_code'] = 1
elif feature_code.startswith('CONT'):
d['CONT_feature_code'] = 1
else:
d['other_feature_code'] = 1
self._values = [0] * len(self.feature_names)
self.set_values(d)
def set_value(self, feature_name, value):
self._values[self.feature_names.index(feature_name)] = value
def set_values(self, value_dict):
for idx, name in enumerate(self.feature_names):
if name in value_dict:
self._values[idx] = value_dict[name]
def set_contextual_features(self):
"""
GeonameFeatures are initialized with only values that can be extracted
from the geoname database and span. This extends the GeonameFeature
with values that require information from nearby_mentions.
"""
geoname = self.geoname
close_locations = 0
very_close_locations = 0
containing_locations = 0
max_containment_level = 0
for recently_mentioned_geoname in self.nearby_mentions:
if recently_mentioned_geoname == geoname:
continue
containment_level = max(
location_contains(geoname, recently_mentioned_geoname),
location_contains(recently_mentioned_geoname, geoname))
if containment_level > 0:
containing_locations += 1
if containment_level > max_containment_level:
max_containment_level = containment_level
distance = great_circle(
recently_mentioned_geoname.lat_long, geoname.lat_long
).kilometers
if distance < 400:
close_locations += 1
if distance < 100:
very_close_locations += 1
self.set_values(dict(
close_locations=close_locations,
very_close_locations=very_close_locations,
containing_locations=containing_locations,
max_containment_level=max_containment_level))
def to_dict(self):
return {
key: value
for key, value in zip(self.feature_names, self._values)}
def values(self):
return self._values
class GeonameAnnotator(Annotator):
def __init__(self, custom_classifier=None):
self.connection = get_database_connection()
self.connection.row_factory = sqlite3.Row
if custom_classifier:
self.geoname_classifier = custom_classifier
else:
self.geoname_classifier = geoname_classifier
def get_candidate_geonames(self, doc):
"""
Returns an array of geoname dicts correponding to locations that the
document may refer to.
The dicts are extended with lists of associated AnnoSpans.
"""
if 'ngrams' not in doc.tiers:
doc.add_tiers(NgramAnnotator())
logger.info('Ngrams annotated')
if 'nes' not in doc.tiers:
doc.add_tiers(NEAnnotator())
logger.info('Named entities annotated')
def is_possible_geoname(text):
if text in blocklist:
return False
# We can rule out a few FPs and make the query much faster
# by only looking at capitalized names.
if text[0] != text[0].upper():
return False
if len(text) < 3 and text != text.upper():
return False
return True
all_ngrams = list(set([span.text.lower()
for span in doc.tiers['ngrams'].spans
if is_possible_geoname(span.text)
]))
logger.info('%s ngrams extracted' % len(all_ngrams))
cursor = self.connection.cursor()
geoname_results = list(cursor.execute('''
SELECT
geonames.*,
count AS name_count,
group_concat(alternatename, ";") AS names_used
FROM geonames
JOIN alternatename_counts USING ( geonameid )
JOIN alternatenames USING ( geonameid )
WHERE alternatename_lemmatized IN
(''' + ','.join('?' for x in all_ngrams) + ''')
GROUP BY geonameid''', all_ngrams))
logger.info('%s geonames fetched' % len(geoname_results))
geoname_results = [GeonameRow(g) for g in geoname_results]
# Associate spans with the geonames.
# This is done up front so span information can be used in the scoring
# function
span_text_to_spans = defaultdict(list)
for span in doc.tiers['ngrams'].spans:
if is_possible_geoname(span.text):
span_text_to_spans[span.text.lower()].append(span)
candidate_geonames = []
for geoname in geoname_results:
geoname.add_spans(span_text_to_spans)
# In rare cases geonames may have no matching spans because
# sqlite unicode equivalency rules match geonames that use different
# characters the document spans used to query them.
# These geonames are ignored.
if len(geoname.spans) > 0:
candidate_geonames.append(geoname)
# Add combined spans to locations that are adjacent to a span linked to
# an administrative division. e.g. Seattle, WA
span_to_geonames = defaultdict(list)
for geoname in candidate_geonames:
for span in geoname.spans:
span_to_geonames[span].append(geoname)
geoname_spans = span_to_geonames.keys()
combined_spans = AnnoTier(geoname_spans).chains(at_least=2, at_most=4, max_dist=4)
for combined_span in combined_spans:
leaf_spans = combined_span.iterate_leaf_base_spans()
first_spans = next(leaf_spans)
potential_geonames = {geoname: set()
for geoname in span_to_geonames[first_spans]}
for leaf_span in leaf_spans:
leaf_span_geonames = span_to_geonames[leaf_span]
next_potential_geonames = defaultdict(set)
for potential_geoname, prev_containing_geonames in potential_geonames.items():
containing_geonames = [
containing_geoname
for containing_geoname in leaf_span_geonames
if location_contains(containing_geoname, potential_geoname) > 0]
if len(containing_geonames) > 0:
next_potential_geonames[potential_geoname] |= prev_containing_geonames | set(containing_geonames)
potential_geonames = next_potential_geonames
for geoname, containing_geonames in potential_geonames.items():
geoname.spans.add(combined_span)
geoname.parents |= containing_geonames
# Replace individual spans with combined spans.
span_to_geonames = defaultdict(list)
for geoname in candidate_geonames:
geoname.spans = set(AnnoTier(geoname.spans).optimal_span_set().spans)
for span in geoname.spans:
span_to_geonames[span].append(geoname)
# Find locations with overlapping spans
# Note that is is possible for two valid locations to have
# overlapping names. For example, Harare Province has
# Harare as an alternate name, so the city Harare is very
# likely to be an alternate location that competes with it.
for span, geonames in span_to_geonames.items():
geoname_set = set(geonames)
for geoname in geonames:
geoname.alternate_locations |= geoname_set
for geoname in candidate_geonames:
geoname.alternate_locations -= set([geoname])
logger.info('%s alternative locations found' % sum([
len(geoname.alternate_locations)
for geoname in candidate_geonames]))
logger.info('%s candidate locations prepared' %
len(candidate_geonames))
return candidate_geonames
def extract_features(self, geonames, doc):
spans_to_nes = {}
span_to_tokens = {}
geospan_tier = AnnoTier(
set([span for geoname in geonames for span in geoname.spans]))
for span, ne_spans in geospan_tier.group_spans_by_containing_span(
doc.tiers['nes'], allow_partial_containment=True):
spans_to_nes[span] = ne_spans
for span, token_spans in geospan_tier.group_spans_by_containing_span(
doc.tiers['spacy.tokens']):
span_to_tokens[span] = token_spans
return [GeonameFeatures(geoname, spans_to_nes, span_to_tokens)
for geoname in geonames]
def add_contextual_features(self, features):
"""
Extend a list of features with values that are based on the geonames
mentioned nearby.
"""
logger.info('adding contextual features')
span_to_features = defaultdict(list)
for feature in features:
for span in feature.geoname.spans:
span_to_features[span].append(feature)
geoname_span_tier = AnnoTier(list(span_to_features.keys()))
def feature_generator(filter_fun=lambda x: True):
for span in geoname_span_tier.spans:
for feature in span_to_features[span]:
if filter_fun(feature):
yield span.start, feature
# Create iterators that will cycle through all the spans returning the span
# offset and the associated feature.
all_feature_span_iter = feature_generator()
resolved_feature_span_iter = feature_generator(
lambda x: x.geoname.high_confidence)
# boolean indicators of whether the corresponding iterator has reached
# its end.
afs_iter_end = False
rfs_iter_end = False
# The starting index of the of the current feature span or resolved
# feature span.
f_start = 0
rf_start = 0
# A ring buffer containing the recently mentioned resolved geoname
# features.
rf_buffer = []
rf_buffer_idx = 0
BUFFER_SIZE = 10
# The number of characters to lookahead searching for nearby mentions.
LOOKAHEAD_OFFSET = 50
# Fill the buffer to capacity with initially mentioned resolved
# features.
while len(rf_buffer) < BUFFER_SIZE:
try:
rf_start, feature = next(resolved_feature_span_iter)
rf_buffer.append(feature.geoname)
except StopIteration:
rfs_iter_end = True
break
# Iterate over all the feature spans and add the resolved features
# in the ring buffer to the nearby_mentions set.
while not afs_iter_end:
while rfs_iter_end or f_start < rf_start - LOOKAHEAD_OFFSET:
try:
f_start, feature = next(all_feature_span_iter)
except StopIteration:
afs_iter_end = True
break
feature.nearby_mentions.update(rf_buffer)
try:
rf_start, resolved_feature = next(resolved_feature_span_iter)
rf_buffer[rf_buffer_idx %
BUFFER_SIZE] = resolved_feature.geoname
rf_buffer_idx += 1
except StopIteration:
rfs_iter_end = True
for feature in features:
feature.set_contextual_features()
# deprecated
def cull_geospans(self, geo_spans):
print("The cull geospans function has been deprecated.")
mwis = find_maximum_weight_interval_set([
Interval(
geo_span.start,
geo_span.end,
# If the size is equal the score is used as a tie breaker.
geo_span.size() + geo_span.geoname.score,
geo_span
)
for geo_span in geo_spans
])
retained_spans = [interval.corresponding_object for interval in mwis]
return retained_spans
def annotate(self, doc):
logger.info('geoannotator started')
candidate_geonames = self.get_candidate_geonames(doc)
features = self.extract_features(candidate_geonames, doc)
if len(features) == 0:
doc.tiers['geonames'] = AnnoTier([])
return doc
scores = self.geoname_classifier.predict_proba_base([
list(f.values()) for f in features])
for geoname, feature, score in zip(candidate_geonames, features, scores):
geoname.high_confidence = float(
score[1]) > self.geoname_classifier.HIGH_CONFIDENCE_THRESHOLD
feature.set_value('high_confidence', geoname.high_confidence)
has_high_confidence_features = any(
[geoname.high_confidence for geoname in candidate_geonames])
if has_high_confidence_features:
self.add_contextual_features(features)
scores = self.geoname_classifier.predict_proba_contextual([
list(f.values()) for f in features])
for geoname, score in zip(candidate_geonames, scores):
geoname.score = float(score[1])
culled_geonames = [geoname
for geoname in candidate_geonames
if geoname.score > self.geoname_classifier.GEONAME_SCORE_THRESHOLD]
cursor = self.connection.cursor()
for geoname in culled_geonames:
geoname_results = list(cursor.execute('''
SELECT
cc.name,
a1.name,
a2.name,
a3.name
FROM adminnames a3
JOIN adminnames a2 ON (
a2.country_code = a3.country_code AND
a2.admin1_code = a3.admin1_code AND
a2.admin2_code = a3.admin2_code AND
a2.admin3_code = "" )
JOIN adminnames a1 ON (
a1.country_code = a3.country_code AND
a1.admin1_code = a3.admin1_code AND
a1.admin2_code = "" AND
a1.admin3_code = "" )
JOIN adminnames cc ON (
cc.country_code = a3.country_code AND
cc.admin1_code = "00" AND
cc.admin2_code = "" AND
cc.admin3_code = "" )
WHERE (a3.country_code = ? AND a3.admin1_code = ? AND a3.admin2_code = ? AND a3.admin3_code = ?)
''', (
geoname.country_code or "",
geoname.admin1_code or "",
geoname.admin2_code or "",
geoname.admin3_code or "",)))
for result in geoname_results:
prev_val = None
for idx, attr in enumerate(['country_name', 'admin1_name', 'admin2_name', 'admin3_name']):
val = result[idx]
if val == prev_val:
# Names are repeated for admin levels beyond that of
# the geoname.
break
setattr(geoname, attr, val)
prev_val = val
logger.info('admin names added')
geo_spans = []
for geoname in culled_geonames:
for span in geoname.spans:
geo_span = GeoSpan(
span.start, span.end, doc, geoname)
geo_spans.append(geo_span)
culled_geospans = AnnoTier(geo_spans).optimal_span_set(prefer=lambda x: (x.size(), x.geoname.score,))
logger.info('overlapping geospans removed')
return {'geonames': culled_geospans}
|
"""Processing profile in admin site page
"""
from django.contrib import admin
from .models import Profile
admin.site.register(Profile)
|
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_new_contact(app):
app.contact.add_new_contact(Contact(first_name="Fname", middle_name="Mname", last_name="Lname", nickname="Nickname", title="Test_title", company="Test_company", address="Test_address", tel_home="123456", tel_mobile="11223344", tel_work="11224433", tel_fax="11225566", email="test@mail.com", email2="test2@mail.com", email3="test3@mail.com", homepage="www.test.com", address2="second_address", phone2="123465798", notes="test_notes"))
def test_add_new_empty_contact(app):
app.contact.add_new_contact(Contact(first_name="", middle_name="", last_name="", nickname="", title="", company="", address="", tel_home="", tel_mobile="", tel_work="", tel_fax="", email="", email2="", email3="", homepage="", address2="", phone2="", notes=""))
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .forms import UserChangeForms, UserCreationForms
from .models import User
# Register your models here.
@admin.register(User)
class UserAdmin(UserAdmin): # type: ignore
form = UserChangeForms
add_form = UserCreationForms
model = User
fieldsets = UserAdmin.fieldsets + (
("campos personalizados", {"fields": ("bio",)}),
)
|
from __future__ import division
from typing import List, Optional
import numpy as np
import matplotlib.pyplot as plt
import random
import cv2
from PIL import Image, ImageDraw, ImageFont
import pickle
from pathlib import Path
import scipy.signal as ssig
import scipy.stats as sstat
import math
def sample_weighted(p_dict):
ps = list(p_dict.keys())
return p_dict[np.random.choice(ps, p=ps)]
def move_bb(bbs, t):
"""
Translate the bounding-boxes in by t_x,t_y.
BB : 2x4xn
T : 2-long np.array
"""
return bbs + t[:, None, None]
def crop_safe(arr, rect, bbs=[], pad=0):
"""
ARR : arr to crop
RECT: (x,y,w,h) : area to crop to
BBS : nx4 xywh format bounding-boxes
PAD : percentage to pad
Does safe cropping. Returns the cropped rectangle and
the adjusted bounding-boxes
"""
rect = np.array(rect)
rect[:2] -= pad
rect[2:] += 2*pad
x1, y1 = max(0, rect[0]), max(0, rect[1])
x2, y2 = [min(arr.shape[0], rect[0]+rect[2]),
min(arr.shape[1], rect[1]+rect[3])]
arr = arr[y1:y2, x1:x2]
if len(bbs) > 0:
for i in range(len(bbs)):
bbs[i, 0] -= x1
bbs[i, 1] -= y1
return arr, bbs
else:
return arr
class BaselineState(object):
A = [0.50, 0.05]
def __init__(self, a) -> None:
self.a = a
def curve(self, x):
return self.a*x**2
def differential(self, x):
return 2*self.a*x
@staticmethod
def get_sample():
"""
Returns the functions for the curve and differential for a and b
"""
sgn = 1.0
if np.random.rand() < 0.5:
sgn = -1
a = BaselineState.A[1]*np.random.randn() + sgn*BaselineState.A[0]
return BaselineState(a)
class RenderFont(object):
"""
Outputs a rasterized font sample.
Output is a binary mask matrix cropped closesly with the font.
Also, outputs ground-truth bounding boxes and text string
"""
def __init__(self, font_dir: Path, font_model_path: Path, text_path: Path):
# distribution over the type of text:
# whether to get a single word, paragraph or a line:
self.p_text = {1.0: 'WORD',
0.0: 'LINE',
0.0: 'PARA'}
# TEXT PLACEMENT PARAMETERS:
self.f_shrink = 0.90
self.max_shrink_trials = 5 # 0.9^5 ~= 0.6
# the minimum number of characters that should fit in a mask
# to define the maximum font height.
self.min_nchar = 2
self.min_font_h = 48 # px : 0.6*12 ~ 7px <= actual minimum height
self.max_font_h = 320 # px
self.p_flat = 0.10
# curved baseline:
self.p_curved = 1.0
self.baselinestate = BaselineState.get_sample()
# text-source : gets english text:
self.text_source = TextSource(min_nchar=self.min_nchar,
fn=text_path)
# get font-state object:
self.font_state = FontState(font_dir, font_model_path)
def render_multiline(self, font: ImageFont.FreeTypeFont, text):
"""
renders multiline TEXT on the pygame surface SURF with the
font style FONT.
A new line in text is denoted by \n, no other characters are
escaped. Other forms of white-spaces should be converted to space.
returns the updated surface, words and the character bounding boxes.
"""
# get the number of lines
lines = text.split('\n')
line_max_length = lines[np.argmax([len(l) for l in lines])]
LINE_W, LINE_H = font.getsize(line_max_length)
fsize = (round(2.0*LINE_W), round(1.25*LINE_H*len(lines)))
image = Image.new('L', fsize, color='black')
draw = ImageDraw.Draw(image)
char_bb = []
space_w = font.getsize('O')[0]
x, y = 0, 0
for line in lines:
x = 0 # carriage-return
for ch in line: # render each character
if ch.isspace(): # just shift
x += space_w
else:
# render the character
draw.text((x, y), ch, fill='white', font=font)
ch_size = font.getsize(ch)
char_bb.append((x, y, ch_size[0], ch_size[1]))
x += ch_size[0]
y += LINE_H # line-feed
crop_box_x = min([box[0] for box in char_bb])
crop_box_y = min([box[1] for box in char_bb])
crop_box_w = max([box[0]+box[2] for box in char_bb])
crop_box_h = max([box[1]+box[3] for box in char_bb])
crop_box = (crop_box_x, crop_box_y, crop_box_w, crop_box_h)
# debug = image.convert('RGB')
# draw = ImageDraw.Draw(debug)
# for (x, y, w, h) in char_bb:
# draw.rectangle((x, y, x+w, y+h), outline=(255, 0, 0))
# draw.rectangle(crop_box, outline=(0, 255, 0))
# debug.show()
words = ' '.join(text.split())
image = np.array(image.crop(crop_box))
char_bb = np.array(char_bb)
return image, words, char_bb
def render_curved(self, font: ImageFont.FreeTypeFont, word_text): # add lang
"""
use curved baseline for rendering word
"""
def draw_char(font: ImageFont.FreeTypeFont, ch: str, rotation: float):
offsetx, offsety, w, h = list(font.getbbox(ch))
ch_image = Image.new('RGBA', (w, h), (0, 0, 0, 0))
draw = ImageDraw.Draw(ch_image)
draw.text((0, 0), ch, font=font, fill=(255, 255, 255, 255))
ch_image = ch_image.crop((offsetx, offsety, w, h))
ch_image = ch_image.rotate(rotation, Image.BICUBIC, expand=True)
return ch_image
wl = len(word_text)
isword = len(word_text.split()) == 1
if not isword or wl > 10 or np.random.rand() > self.p_curved:
return self.render_multiline(font, word_text)
word_bound = font.getbbox(word_text)
fsize = (round(2.0*word_bound[2]), round(3*word_bound[3]))
image = Image.new('L', fsize, color='black')
# baseline state
mid_idx = wl//2
BS = BaselineState.get_sample()
curve = [BS.curve(i-mid_idx) for i in range(wl)]
curve[mid_idx] = -np.sum(curve) / (wl-1)
rots = [math.degrees(math.atan(BS.differential(i-mid_idx)/(font.size/2)))
for i in range(wl)]
# pillow
size = image.size
ch_image = draw_char(font, word_text[mid_idx], rots[mid_idx])
x = int((size[0] - ch_image.size[0]) / 2)
y = int((size[1] - ch_image.size[1]) / 2 - curve[mid_idx])
image.paste(ch_image, (x, y), mask=ch_image)
mid_ch_bb = (x, y, ch_image.size[0], ch_image.size[1])
char_bb = []
last_bb = mid_ch_bb
for i in range(wl):
# skip the middle character
if i == mid_idx:
last_bb = mid_ch_bb
char_bb.append(mid_ch_bb)
continue
elif i < mid_idx: # left-chars
i = mid_idx-1-i
elif i > mid_idx: # right-chars begin
pass
ch = word_text[i]
# draw a single character to a separate image
ch_bb = list(font.getbbox(ch))
ch_image = draw_char(font, ch, rots[i])
if i < mid_idx:
x = last_bb[0] - ch_bb[2]
elif i >= mid_idx:
x = last_bb[0] + last_bb[2]
y = int(last_bb[1] + 2 + curve[i])
image.paste(ch_image, (x, y), mask=ch_image)
ch_bb[0] = x
ch_bb[1] = y
last_bb = (x, y, ch_image.size[0], ch_image.size[1])
char_bb.append(last_bb)
crop_box_x = min([box[0] for box in char_bb])
crop_box_y = min([box[1] for box in char_bb])
crop_box_w = max([box[0]+box[2] for box in char_bb])
crop_box_h = max([box[1]+box[3] for box in char_bb])
crop_box = (crop_box_x, crop_box_y, crop_box_w, crop_box_h)
# debug = image.convert('RGB')
# draw = ImageDraw.Draw(debug)
# for (x, y, w, h) in char_bb:
# draw.rectangle((x, y, x+w, y+h), outline=(255, 0, 0))
# draw.rectangle(crop_box, outline=(0, 255, 0))
# debug.show()
# exit(0)
word_image = np.array(image.crop(crop_box))
char_bb = np.array(char_bb)
# update box coordinates after cropping
char_bb[:, 0] = char_bb[:, 0] - crop_box_x
char_bb[:, 1] = char_bb[:, 1] - crop_box_y
# plt.imshow(word_image)
# plt.show()
# exit()
return word_image, word_text, char_bb
def get_nline_nchar(self, mask_size, font_height, font_width):
"""
Returns the maximum number of lines and characters which can fit
in the MASK_SIZED image.
"""
H, W = mask_size
nline = int(np.ceil(H/(2*font_height)))
nchar = int(np.floor(W/font_width))
return nline, nchar
def place_text(self, text_arrs: List[np.ndarray], back_arr, bbs: List[np.ndarray]):
areas = [-np.prod(ta.shape) for ta in text_arrs]
order = np.argsort(areas)
locs = [None for i in range(len(text_arrs))]
out_arr = np.zeros_like(back_arr)
for i in order:
ba = np.clip(back_arr.copy().astype(np.float), 0, 255)
ta = np.clip(text_arrs[i].copy().astype(np.float), 0, 255)
ba[ba > 127] = 1e8
intersect = ssig.fftconvolve(ba, ta[:: -1, :: -1], mode='valid')
safemask = intersect < 1e8
if not np.any(safemask): # no collision-free position:
print("COLLISION!!!")
# warn("COLLISION!!!")
return back_arr, locs[: i], bbs[: i], order[: i]
minloc = np.transpose(np.nonzero(safemask))
loc = minloc[np.random.choice(minloc.shape[0]), :]
locs[i] = loc
# update the bounding-boxes:
bbs[i] = move_bb(bbs[i], loc[:: -1])
# blit the text onto the canvas
w, h = text_arrs[i].shape
out_arr[loc[0]: loc[0]+w, loc[1]: loc[1]+h] += text_arrs[i]
return out_arr, locs, bbs, order
def robust_HW(self, mask):
m = mask.copy()
m = (~mask).astype('float')/255
rH = np.median(np.sum(m, axis=0))
rW = np.median(np.sum(m, axis=1))
return rH, rW
def sample_font_height_px(self, h_min, h_max):
if np.random.rand() < self.p_flat:
rnd = np.random.rand()
else:
rnd = np.random.beta(2.0, 2.0)
h_range = h_max - h_min
f_h = np.floor(h_min + h_range*rnd)
return f_h
def bb_xywh2coords(self, bbs):
"""
Takes an nx4 bounding-box matrix specified in x,y,w,h
format and outputs a 2x4xn bb-matrix, (4 vertices per bb).
"""
n, _ = bbs.shape
coords = np.zeros((2, 4, n))
for i in range(n):
coords[:, :, i] = bbs[i, : 2][:, None]
coords[0, 1, i] += bbs[i, 2]
coords[:, 2, i] += bbs[i, 2: 4]
coords[1, 3, i] += bbs[i, 3]
return coords
def render_sample(self, font_name, font, mask):
"""
Places text in the "collision-free" region as indicated
in the mask -- 255 for unsafe, 0 for safe.
The text is rendered using FONT, the text content is TEXT.
"""
# H,W = mask.shape
H, W = self.robust_HW(mask)
# find the maximum height in pixels:
max_font_h = min(0.9*H, W/(self.min_nchar+1))
max_font_h = min(max_font_h, self.max_font_h)
if max_font_h < self.min_font_h: # not possible to place any text here
return # None
# let's just place one text-instance for now
# TODO : change this to allow multiple text instances?
i = 0
while i < self.max_shrink_trials and max_font_h > self.min_font_h:
# if i > 0:
# print colorize(Color.BLUE, "shrinkage trial : %d"%i, True)
# sample a random font-height:
f_h_px = self.sample_font_height_px(self.min_font_h, max_font_h)
# print "font-height : %.2f (min: %.2f, max: %.2f)"%(f_h_px, self.min_font_h,max_font_h)
# convert from pixel-height to font-point-size:
f_h = self.font_state.get_font_size(font_name, f_h_px)
# update for the loop
max_font_h = f_h_px
i += 1
# font.size = f_h # set the font-size
# compute the max-number of lines/chars-per-line:
nline, nchar = self.get_nline_nchar(mask.shape[: 2], f_h, f_h)
# print (' > nline = {}, nchar = {}'.format(nline, nchar))
if nchar < self.min_nchar:
return None
assert nline >= 1 and nchar >= self.min_nchar, f'nline={nline}, nchar={nchar}, min_nchar={self.min_nchar}'
# sample text:
text_type = sample_weighted(self.p_text)
text = self.text_source.sample(nline, nchar, text_type)
if len(text) == 0 or np.any([len(line) == 0 for line in text]):
continue
# print colorize(Color.GREEN, text)
# render the text:
txt_arr, txt, bb = self.render_curved(font, text)
bb = self.bb_xywh2coords(bb)
# debug = Image.fromarray(txt_arr).convert('RGB')
# draw = ImageDraw.Draw(debug)
# debug_boxes = bb.transpose()
# for box in debug_boxes:
# draw.polygon(box.flatten().tolist(), outline=(255,0,0))
# # for (x,y,w,h) in bb:
# # draw.rectangle([(x, y), (x+w, y+h)], outline=(255, 0, 0))
# debug.show()
# exit(0)
# make sure that the text-array is not bigger than mask array:
if np.any(np.r_[txt_arr.shape[:2]] > np.r_[mask.shape[:2]]):
# warn("text-array is bigger than mask")
continue
# position the text within the mask:
text_mask, loc, bb, _ = self.place_text([txt_arr], mask, [bb])
if len(loc) > 0: # successful in placing the text collision-free:
return text_mask, loc[0], bb[0], text
return # None
def visualize_bb(self, text_arr, bbs):
ta = text_arr.copy()
for r in bbs:
cv2.rectangle(ta, (r[0], r[1]), (r[0]+r[2],
r[1]+r[3]), color=128, thickness=1)
plt.imshow(ta, cmap='gray')
plt.show()
class FontState(object):
"""
Defines the random state of the font rendering
"""
# size = [50, 10] # normal dist mean, std
size = [30, 70] # normal dist mean, std
underline = 0.05
strong = 0.5
oblique = 0.2
wide = 0.5
strength = [0.05, 0.1] # uniform dist in this interval
underline_adjustment = [1.0, 2.0] # normal dist mean, std
# beta distribution alpha, beta, offset, range (mean is a/(a+b))
kerning = [2, 5, 0, 20]
border = 0.25
random_caps = -1 # don't recapitalize : retain the capitalization of the lexicon
# lower case, upper case, proper noun
capsmode = [str.lower, str.upper, str.capitalize]
curved = 0.2
random_kerning = 0.2
random_kerning_amount = 0.1
def __init__(self, font_dir: Path, font_model_path: Path, char_freq_path: Optional[Path] = None, create_model=False):
# get character-frequencies in the English language:
# with open(char_freq_path,'rb') as f:
# self.char_freq = cp.load(f)
# u = pickle._Unpickler(f)
# u.encoding = 'latin1'
# p = u.load()
# self.char_freq = p
# get the model to convert from pixel to font pt size:
with open(font_model_path, 'rb') as f:
self.font_model = pickle.load(f)
# get the names of fonts to use:
self.fonts = sorted(font_dir.glob('**/*.ttf'))
print(self.fonts)
print(f'Total: {len(self.fonts)} font(s)')
def get_aspect_ratio(self, font, size=None):
"""
Returns the median aspect ratio of each character of the font.
"""
if size is None:
size = 12 # doesn't matter as we take the RATIO
return 1.0
# chars = ''
# chars = ''.join(self.char_freq.keys())
# w = np.array(self.char_freq.values())
# get the [height,width] of each character:
try:
sizes = font.get_metrics(chars, size)
good_idx = [i for i in range(len(sizes)) if sizes[i] is not None]
sizes, w = [sizes[i] for i in good_idx], w[good_idx]
sizes = np.array(sizes).astype('float')[:, [3, 4]]
r = np.abs(sizes[:, 1]/sizes[:, 0]) # width/height
good = np.isfinite(r)
r = r[good]
w = w[good]
w /= np.sum(w)
r_avg = np.sum(w*r)
return r_avg
except:
return 1.0
def get_font_size(self, font_name, font_size_px):
"""
Returns the font-size which corresponds to FONT_SIZE_PX pixels font height.
"""
m = self.font_model[font_name]
return m[0]*font_size_px + m[1] # linear model
def sample(self):
"""
Samples from the font state distribution
"""
font = self.fonts[int(np.random.randint(0, len(self.fonts)))]
font_name = font.stem
return {
'font': font,
'name': font_name,
'size': np.random.randint(self.size[0], self.size[1]),
'underline': np.random.rand() < self.underline,
'underline_adjustment': max(2.0, min(-2.0, self.underline_adjustment[1]*np.random.randn() + self.underline_adjustment[0])),
'strong': np.random.rand() < self.strong,
'oblique': np.random.rand() < self.oblique,
'strength': (self.strength[1] - self.strength[0])*np.random.rand() + self.strength[0],
'char_spacing': int(self.kerning[3]*(np.random.beta(self.kerning[0], self.kerning[1])) + self.kerning[2]),
'border': np.random.rand() < self.border,
'random_caps': np.random.rand() < self.random_caps,
'capsmode': random.choice(self.capsmode),
'curved': np.random.rand() < self.curved,
'random_kerning': np.random.rand() < self.random_kerning,
'random_kerning_amount': self.random_kerning_amount,
}
class TextSource(object):
"""
Provides text for words, paragraphs, sentences.
"""
def __init__(self, min_nchar, fn):
"""
TXT_FN : path to file containing text data.
"""
self.min_nchar = min_nchar
self.fdict = {'WORD': self.sample_word,
'LINE': self.sample_line,
'PARA': self.sample_para}
with open(fn, 'r') as f:
self.txt = [l.strip() for l in f.readlines()]
# print(self.txt)
# distribution over line/words for LINE/PARA:
self.p_line_nline = np.array([0.85, 0.10, 0.05])
self.p_line_nword = [4, 3, 12] # normal: (mu, std)
self.p_para_nline = [1.0, 1.0] # [1.7,3.0] # beta: (a, b), max_nline
self.p_para_nword = [1.7, 3.0, 10] # beta: (a,b), max_nword
# probability to center-align a paragraph:
self.center_para = 0.5
def check_symb_frac(self, txt, f=0.35):
"""
T/F return : T iff fraction of symbol/special-charcters in
txt is less than or equal to f (default=0.25).
"""
return np.sum([not ch.isalnum() for ch in txt])/(len(txt)+0.0) <= f
def is_good(self, txt, f=0.35):
"""
T/F return : T iff the lines in txt (a list of txt lines)
are "valid".
A given line l is valid iff:
1. It is not empty.
2. symbol_fraction > f
3. Has at-least self.min_nchar characters
4. Not all characters are i,x,0,O,-
"""
def is_txt(l):
char_ex = ['i', 'I', 'o', 'O', '0', '-']
chs = [ch in char_ex for ch in l]
return not np.all(chs)
return [(len(l) > self.min_nchar
and self.check_symb_frac(l, f)
and is_txt(l)) for l in txt]
def center_align(self, lines):
"""
PADS lines with space to center align them
lines : list of text-lines.
"""
ls = [len(l) for l in lines]
max_l = max(ls)
for i in range(len(lines)):
l = lines[i].strip()
dl = max_l-ls[i]
lspace = dl//2
rspace = dl-lspace
lines[i] = ' '*lspace+l+' '*rspace
return lines
def get_lines(self, nline, nword, nchar_max, f=0.35, niter=100):
def h_lines(niter=100):
lines = ['']
iter = 0
while not np.all(self.is_good(lines, f)) and iter < niter:
iter += 1
line_start = np.random.choice(len(self.txt)-nline)
lines = [self.txt[line_start+i] for i in range(nline)]
return lines
lines = ['']
iter = 0
while not np.all(self.is_good(lines, f)) and iter < niter:
iter += 1
lines = h_lines(niter=100)
# get words per line:
nline = len(lines)
for i in range(nline):
words = lines[i].split()
dw = len(words)-nword[i]
if dw > 0:
first_word_index = random.choice(range(dw+1))
lines[i] = ' '.join(
words[first_word_index:first_word_index+nword[i]])
# chop-off characters from end:
while len(lines[i]) > nchar_max:
if not np.any([ch.isspace() for ch in lines[i]]):
lines[i] = ''
else:
lines[i] = lines[i][:len(
lines[i])-lines[i][::-1].find(' ')].strip()
if not np.all(self.is_good(lines, f)):
return # None
else:
return lines
def sample(self, nline_max, nchar_max, kind='WORD'):
return self.fdict[kind](nline_max, nchar_max)
def sample_word(self, nline_max, nchar_max, niter=100):
rand_line = self.txt[np.random.choice(len(self.txt))]
words = rand_line.split()
if len(words) == 0:
return []
rand_word = random.choice(words)
iter = 0
while iter < niter and (not self.is_good([rand_word])[0] or len(rand_word) > nchar_max):
rand_line = self.txt[np.random.choice(len(self.txt))]
words = rand_line.split()
if len(words) == 0:
continue
rand_word = random.choice(words)
iter += 1
if not self.is_good([rand_word])[0] or len(rand_word) > nchar_max:
return []
else:
return rand_word
def sample_line(self, nline_max, nchar_max):
nline = nline_max+1
while nline > nline_max:
nline = np.random.choice([1, 2, 3], p=self.p_line_nline)
# get number of words:
nword = [self.p_line_nword[2]*sstat.beta.rvs(a=self.p_line_nword[0], b=self.p_line_nword[1])
for _ in range(nline)]
nword = [max(1, int(np.ceil(n))) for n in nword]
lines = self.get_lines(nline, nword, nchar_max, f=0.35)
if lines is not None:
return '\n'.join(lines)
else:
return []
def sample_para(self, nline_max, nchar_max):
# get number of lines in the paragraph:
nline = nline_max * \
sstat.beta.rvs(a=self.p_para_nline[0], b=self.p_para_nline[1])
nline = max(1, int(np.ceil(nline)))
# get number of words:
nword = [self.p_para_nword[2]*sstat.beta.rvs(a=self.p_para_nword[0], b=self.p_para_nword[1])
for _ in range(nline)]
nword = [max(1, int(np.ceil(n))) for n in nword]
lines = self.get_lines(nline, nword, nchar_max, f=0.35)
if lines is not None:
# center align the paragraph-text:
if np.random.rand() < self.center_para:
lines = self.center_align(lines)
return '\n'.join(lines)
else:
return []
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes for reading/manipulating/writing QChem input files.
"""
import logging
import sys
from typing import Union, Dict, List, Optional, Tuple
from monty.io import zopen
from monty.json import MSONable
from pymatgen.core import Molecule
from .utils import lower_and_check_unique, read_pattern, read_table_pattern
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
__author__ = "Brandon Wood, Samuel Blau, Shyam Dwaraknath, Julian Self, Evan Spotte-Smith"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__email__ = "b.wood@berkeley.edu"
__credits__ = "Xiaohui Qu"
logger = logging.getLogger(__name__)
class QCInput(MSONable):
"""
An object representing a QChem input file. QCInput attributes represent different sections of a QChem input file.
To add a new section one needs to modify __init__, __str__, from_sting and add staticmethods
to read and write the new section i.e. section_template and read_section. By design, there is very little (or no)
checking that input parameters conform to the appropriate QChem format, this responsible lands on the user or a
separate error handling software.
"""
def __init__(
self,
molecule: Union[Molecule, Literal["read"]],
rem: Dict,
opt: Optional[Dict[str, List]] = None,
pcm: Optional[Dict] = None,
solvent: Optional[Dict] = None,
smx: Optional[Dict] = None,
scan: Optional[Dict[str, List]] = None,
van_der_waals: Optional[Dict[str, float]] = None,
vdw_mode: str = "atomic",
plots: Optional[Dict] = None,
):
"""
Args:
molecule (pymatgen Molecule object or "read"):
Input molecule. molecule can be set as either a pymatgen Molecule object or as the str "read".
"read" can be used in multi_job QChem input files where the molecule is read in from the
previous calculation.
rem (dict):
A dictionary of all the input parameters for the rem section of QChem input file.
Ex. rem = {'method': 'rimp2', 'basis': '6-31*G++' ... }
opt (dict of lists):
A dictionary of opt sections, where each opt section is a key and the corresponding
values are a list of strings. Stings must be formatted as instructed by the QChem manual.
The different opt sections are: CONSTRAINT, FIXED, DUMMY, and CONNECT
Ex. opt = {"CONSTRAINT": ["tors 2 3 4 5 25.0", "tors 2 5 7 9 80.0"], "FIXED": ["2 XY"]}
pcm (dict):
A dictionary of the PCM section, defining behavior for use of the polarizable continuum model.
Ex: pcm = {"theory": "cpcm", "hpoints": 194}
solvent (dict):
A dictionary defining the solvent parameters used with PCM.
Ex: solvent = {"dielectric": 78.39, "temperature": 298.15}
smx (dict):
A dictionary defining solvent parameters used with the SMD method, a solvent method that adds
short-range terms to PCM.
Ex: smx = {"solvent": "water"}
scan (dict of lists):
A dictionary of scan variables. Because two constraints of the same type are allowed (for instance, two
torsions or two bond stretches), each TYPE of variable (stre, bend, tors) should be its own key in the
dict, rather than each variable. Note that the total number of variable (sum of lengths of all lists)
CANNOT be
more than two.
Ex. scan = {"stre": ["3 6 1.5 1.9 0.1"], "tors": ["1 2 3 4 -180 180 15"]}
van_der_waals (dict):
A dictionary of custom van der Waals radii to be used when construcing cavities for the PCM
model or when computing, e.g. Mulliken charges. They keys are strs whose meaning depends on
the value of vdw_mode, and the values are the custom radii in angstroms.
vdw_mode (str): Method of specifying custom van der Waals radii - 'atomic' or 'sequential'.
In 'atomic' mode (default), dict keys represent the atomic number associated with each
radius (e.g., 12 = carbon). In 'sequential' mode, dict keys represent the sequential
position of a single specific atom in the input structure.
"""
self.molecule = molecule
self.rem = lower_and_check_unique(rem)
self.opt = opt
self.pcm = lower_and_check_unique(pcm)
self.solvent = lower_and_check_unique(solvent)
self.smx = lower_and_check_unique(smx)
self.scan = lower_and_check_unique(scan)
self.van_der_waals = lower_and_check_unique(van_der_waals)
self.vdw_mode = vdw_mode
self.plots = lower_and_check_unique(plots)
# Make sure rem is valid:
# - Has a basis
# - Has a method or DFT exchange functional
# - Has a valid job_type or jobtype
valid_job_types = [
"opt",
"optimization",
"sp",
"freq",
"frequency",
"force",
"nmr",
"ts",
"pes_scan",
]
if "basis" not in self.rem:
raise ValueError("The rem dictionary must contain a 'basis' entry")
if "method" not in self.rem:
if "exchange" not in self.rem:
raise ValueError("The rem dictionary must contain either a 'method' entry or an 'exchange' entry")
if "job_type" not in self.rem:
raise ValueError("The rem dictionary must contain a 'job_type' entry")
if self.rem.get("job_type").lower() not in valid_job_types:
raise ValueError("The rem dictionary must contain a valid 'job_type' entry")
# Still to do:
# - Check that the method or functional is valid
# - Check that basis is valid
# - Check that basis is defined for all species in the molecule
# - Validity checks specific to job type?
# - Check OPT and PCM sections?
def __str__(self):
combined_list = []
# molecule section
combined_list.append(self.molecule_template(self.molecule))
combined_list.append("")
# rem section
combined_list.append(self.rem_template(self.rem))
combined_list.append("")
# opt section
if self.opt:
combined_list.append(self.opt_template(self.opt))
combined_list.append("")
# pcm section
if self.pcm:
combined_list.append(self.pcm_template(self.pcm))
combined_list.append("")
# solvent section
if self.solvent:
combined_list.append(self.solvent_template(self.solvent))
combined_list.append("")
if self.smx:
combined_list.append(self.smx_template(self.smx))
combined_list.append("")
# section for pes_scan
if self.scan:
combined_list.append(self.scan_template(self.scan))
combined_list.append("")
# section for van_der_waals radii
if self.van_der_waals:
combined_list.append(self.van_der_waals_template(self.van_der_waals, self.vdw_mode))
combined_list.append("")
# plots section
if self.plots:
combined_list.append(self.plots_template(self.plots))
combined_list.append("")
return "\n".join(combined_list)
@staticmethod
def multi_job_string(job_list: List["QCInput"]) -> str:
"""
Args:
job_list (): List of jobs
Returns:
(str) String representation of multi job input file.
"""
multi_job_string = str()
for i, job_i in enumerate(job_list):
if i < len(job_list) - 1:
multi_job_string += job_i.__str__() + "\n@@@\n\n"
else:
multi_job_string += job_i.__str__()
return multi_job_string
@classmethod
def from_string(cls, string: str) -> "QCInput":
"""
Read QcInput from string.
Args:
string (str): String input.
Returns:
QcInput
"""
sections = cls.find_sections(string)
molecule = cls.read_molecule(string)
rem = cls.read_rem(string)
# only molecule and rem are necessary everything else is checked
opt = None
pcm = None
solvent = None
smx = None
scan = None
plots = None
vdw = None
vdw_mode = "atomic"
if "opt" in sections:
opt = cls.read_opt(string)
if "pcm" in sections:
pcm = cls.read_pcm(string)
if "solvent" in sections:
solvent = cls.read_solvent(string)
if "smx" in sections:
smx = cls.read_smx(string)
if "scan" in sections:
scan = cls.read_scan(string)
if "plots" in sections:
plots = cls.read_plots(string)
if "van_der_waals" in sections:
vdw_mode, vdw = cls.read_vdw(string)
return cls(
molecule,
rem,
opt=opt,
pcm=pcm,
solvent=solvent,
smx=smx,
scan=scan,
plots=plots,
van_der_waals=vdw,
vdw_mode=vdw_mode,
)
def write_file(self, filename: str):
"""
Write QcInput to file.
Args:
filename (str): Filename
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def write_multi_job_file(job_list: List["QCInput"], filename: str):
"""
Write a multijob file.
Args:
job_list (): List of jobs.
filename (): Filename
"""
with zopen(filename, "wt") as f:
f.write(QCInput.multi_job_string(job_list))
@staticmethod
def from_file(filename: str) -> "QCInput":
"""
Create QcInput from file.
Args:
filename (str): Filename
Returns:
QcInput
"""
with zopen(filename, "rt") as f:
return QCInput.from_string(f.read())
@classmethod
def from_multi_jobs_file(cls, filename: str) -> List["QCInput"]:
"""
Create list of QcInput from a file.
Args:
filename (str): Filename
Returns:
List of QCInput objects
"""
with zopen(filename, "rt") as f:
# the delimiter between QChem jobs is @@@
multi_job_strings = f.read().split("@@@")
# list of individual QChem jobs
input_list = [cls.from_string(i) for i in multi_job_strings]
return input_list
@staticmethod
def molecule_template(molecule: Union[Molecule, Literal["read"]]) -> str:
"""
Args:
molecule (Molecule): molecule
Returns:
(str) Molecule template.
"""
# todo: add ghost atoms
mol_list = []
mol_list.append("$molecule")
if isinstance(molecule, str):
if molecule == "read":
mol_list.append(" read")
else:
raise ValueError('The only acceptable text value for molecule is "read"')
else:
mol_list.append(
" {charge} {spin_mult}".format(charge=int(molecule.charge), spin_mult=molecule.spin_multiplicity)
)
for site in molecule.sites:
mol_list.append(
" {atom} {x: .10f} {y: .10f} {z: .10f}".format(
atom=site.species_string, x=site.x, y=site.y, z=site.z
)
)
mol_list.append("$end")
return "\n".join(mol_list)
@staticmethod
def rem_template(rem: Dict) -> str:
"""
Args:
rem ():
Returns:
(str)
"""
rem_list = []
rem_list.append("$rem")
for key, value in rem.items():
rem_list.append(" {key} = {value}".format(key=key, value=value))
rem_list.append("$end")
return "\n".join(rem_list)
@staticmethod
def opt_template(opt: Dict[str, List]) -> str:
"""
Optimization template.
Args:
opt ():
Returns:
(str)
"""
opt_list = []
opt_list.append("$opt")
# loops over all opt sections
for key, value in opt.items():
opt_list.append("{section}".format(section=key))
# loops over all values within the section
for i in value:
opt_list.append(" {val}".format(val=i))
opt_list.append("END{section}".format(section=key))
opt_list.append("")
# this deletes the empty space after the last section
del opt_list[-1]
opt_list.append("$end")
return "\n".join(opt_list)
@staticmethod
def pcm_template(pcm: Dict) -> str:
"""
Pcm run template.
Args:
pcm ():
Returns:
(str)
"""
pcm_list = []
pcm_list.append("$pcm")
for key, value in pcm.items():
pcm_list.append(" {key} {value}".format(key=key, value=value))
pcm_list.append("$end")
return "\n".join(pcm_list)
@staticmethod
def solvent_template(solvent: Dict) -> str:
"""
Solvent template.
Args:
solvent ():
Returns:
(str)
"""
solvent_list = []
solvent_list.append("$solvent")
for key, value in solvent.items():
solvent_list.append(" {key} {value}".format(key=key, value=value))
solvent_list.append("$end")
return "\n".join(solvent_list)
@staticmethod
def smx_template(smx: Dict) -> str:
"""
Args:
smx ():
Returns:
(str)
"""
smx_list = []
smx_list.append("$smx")
for key, value in smx.items():
if value == "tetrahydrofuran":
smx_list.append(" {key} {value}".format(key=key, value="thf"))
else:
smx_list.append(" {key} {value}".format(key=key, value=value))
smx_list.append("$end")
return "\n".join(smx_list)
@staticmethod
def scan_template(scan: Dict[str, List]) -> str:
"""
Args:
scan (dict): Dictionary with scan section information.
Ex: {"stre": ["3 6 1.5 1.9 0.1"], "tors": ["1 2 3 4 -180 180 15"]}
Returns:
String representing Q-Chem input format for scan section
"""
scan_list = list()
scan_list.append("$scan")
total_vars = sum([len(v) for v in scan.values()])
if total_vars > 2:
raise ValueError("Q-Chem only supports PES_SCAN with two or less " "variables.")
for var_type, variables in scan.items():
if variables not in [None, list()]:
for var in variables:
scan_list.append(" {var_type} {var}".format(var_type=var_type, var=var))
scan_list.append("$end")
return "\n".join(scan_list)
@staticmethod
def van_der_waals_template(radii: Dict[str, float], mode: str = "atomic") -> str:
"""
Args:
radii (dict): Dictionary with custom van der Waals radii, in
Angstroms, keyed by either atomic number or sequential
atom number (see 'mode' kwarg).
Ex: {1: 1.20, 12: 1.70}
mode: 'atomic' or 'sequential'. In 'atomic' mode (default), dict keys
represent the atomic number associated with each radius (e.g., '12' = carbon).
In 'sequential' mode, dict keys represent the sequential position of
a single specific atom in the input structure.
**NOTE: keys must be given as strings even though they are numbers!**
Returns:
String representing Q-Chem input format for van_der_waals section
"""
vdw_list = list()
vdw_list.append("$van_der_waals")
if mode == "atomic":
vdw_list.append("1")
elif mode == "sequential":
vdw_list.append("2")
else:
raise ValueError(f"Invalid value {mode} given for 'mode' kwarg.")
for num, radius in radii.items():
vdw_list.append(f" {num} {radius}")
vdw_list.append("$end")
return "\n".join(vdw_list)
@staticmethod
def plots_template(plots: Dict) -> str:
"""
Args:
plots ():
Returns:
(str)
"""
plots_list = []
plots_list.append("$plots")
for key, value in plots.items():
plots_list.append(" {key} {value}".format(key=key, value=value))
plots_list.append("$end")
return "\n".join(plots_list)
@staticmethod
def find_sections(string: str) -> List:
"""
Find sections in the string.
Args:
string (str): String
Returns:
List of sections.
"""
patterns = {"sections": r"^\s*?\$([a-z_]+)", "multiple_jobs": r"(@@@)"}
matches = read_pattern(string, patterns)
# list of the sections present
sections = [val[0] for val in matches["sections"]]
# remove end from sections
sections = [sec for sec in sections if sec != "end"]
# this error should be replaced by a multi job read function when it is added
if "multiple_jobs" in matches.keys():
raise ValueError("Output file contains multiple qchem jobs please parse separately")
if "molecule" not in sections:
raise ValueError("Output file does not contain a molecule section")
if "rem" not in sections:
raise ValueError("Output file does not contain a rem section")
print(sections)
return sections
@staticmethod
def read_molecule(string: str) -> Union[Molecule, Literal["read"]]:
"""
Read molecule from string.
Args:
string (str): String
Returns:
Molecule
"""
charge = None
spin_mult = None
patterns = {
"read": r"^\s*\$molecule\n\s*(read)",
"charge": r"^\s*\$molecule\n\s*((?:\-)*\d+)\s+\d",
"spin_mult": r"^\s*\$molecule\n\s(?:\-)*\d+\s*(\d)",
}
matches = read_pattern(string, patterns)
if "read" in matches.keys():
return "read"
if "charge" in matches.keys():
charge = float(matches["charge"][0][0])
if "spin_mult" in matches.keys():
spin_mult = int(matches["spin_mult"][0][0])
header = r"^\s*\$molecule\n\s*(?:\-)*\d+\s*\d"
row = r"\s*((?i)[a-z]+)\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)"
footer = r"^\$end"
mol_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
species = [val[0] for val in mol_table[0]]
coords = [[float(val[1]), float(val[2]), float(val[3])] for val in mol_table[0]]
if charge is None:
mol = Molecule(species=species, coords=coords)
else:
mol = Molecule(species=species, coords=coords, charge=charge, spin_multiplicity=spin_mult)
return mol
@staticmethod
def read_rem(string: str) -> Dict:
"""
Parse rem from string.
Args:
string (str): String
Returns:
(dict) rem
"""
header = r"^\s*\$rem"
row = r"\s*([a-zA-Z\_]+)\s*=?\s*(\S+)"
footer = r"^\s*\$end"
rem_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
return dict(rem_table[0])
@staticmethod
def read_opt(string: str) -> Dict[str, List]:
"""
Read opt section from string.
Args:
string (str): String
Returns:
(dict) Opt section
"""
patterns = {
"CONSTRAINT": r"^\s*CONSTRAINT",
"FIXED": r"^\s*FIXED",
"DUMMY": r"^\s*DUMMY",
"CONNECT": r"^\s*CONNECT",
}
opt_matches = read_pattern(string, patterns)
opt_sections = list(opt_matches.keys())
opt = {}
if "CONSTRAINT" in opt_sections:
c_header = r"^\s*CONSTRAINT\n"
c_row = r"(\w.*)\n"
c_footer = r"^\s*ENDCONSTRAINT\n"
c_table = read_table_pattern(string, header_pattern=c_header, row_pattern=c_row, footer_pattern=c_footer)
opt["CONSTRAINT"] = [val[0] for val in c_table[0]]
if "FIXED" in opt_sections:
f_header = r"^\s*FIXED\n"
f_row = r"(\w.*)\n"
f_footer = r"^\s*ENDFIXED\n"
f_table = read_table_pattern(
string,
header_pattern=f_header,
row_pattern=f_row,
footer_pattern=f_footer,
)
opt["FIXED"] = [val[0] for val in f_table[0]]
if "DUMMY" in opt_sections:
d_header = r"^\s*DUMMY\n"
d_row = r"(\w.*)\n"
d_footer = r"^\s*ENDDUMMY\n"
d_table = read_table_pattern(
string,
header_pattern=d_header,
row_pattern=d_row,
footer_pattern=d_footer,
)
opt["DUMMY"] = [val[0] for val in d_table[0]]
if "CONNECT" in opt_sections:
cc_header = r"^\s*CONNECT\n"
cc_row = r"(\w.*)\n"
cc_footer = r"^\s*ENDCONNECT\n"
cc_table = read_table_pattern(
string,
header_pattern=cc_header,
row_pattern=cc_row,
footer_pattern=cc_footer,
)
opt["CONNECT"] = [val[0] for val in cc_table[0]]
return opt
@staticmethod
def read_pcm(string: str) -> Dict:
"""
Read pcm parameters from string.
Args:
string (str): String
Returns:
(dict) PCM parameters
"""
header = r"^\s*\$pcm"
row = r"\s*([a-zA-Z\_]+)\s+(\S+)"
footer = r"^\s*\$end"
pcm_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if not pcm_table:
print("No valid PCM inputs found. Note that there should be no '=' chracters in PCM input lines.")
return {}
return dict(pcm_table[0])
@staticmethod
def read_vdw(string: str) -> Tuple[str, Dict]:
"""
Read van der Waals parameters from string.
Args:
string (str): String
Returns:
(str, dict) vdW mode ('atomic' or 'sequential') and dict of van der Waals radii.
"""
header = r"^\s*\$van_der_waals"
row = r"[^\d]*(\d+).?(\d+.\d+)?.*"
footer = r"^\s*\$end"
vdw_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if not vdw_table:
print("No valid vdW inputs found. Note that there should be no '=' chracters in vdW input lines.")
return "", {}
if vdw_table[0][0][0] == 2:
mode = "sequential"
else:
mode = "atomic"
return mode, dict(vdw_table[0][1:])
@staticmethod
def read_solvent(string: str) -> Dict:
"""
Read solvent parameters from string.
Args:
string (str): String
Returns:
(dict) Solvent parameters
"""
header = r"^\s*\$solvent"
row = r"\s*([a-zA-Z\_]+)\s+(\S+)"
footer = r"^\s*\$end"
solvent_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if not solvent_table:
print("No valid solvent inputs found. Note that there should be no '=' chracters in solvent input lines.")
return {}
return dict(solvent_table[0])
@staticmethod
def read_smx(string: str) -> Dict:
"""
Read smx parameters from string.
Args:
string (str): String
Returns:
(dict) SMX parameters.
"""
header = r"^\s*\$smx"
row = r"\s*([a-zA-Z\_]+)\s+(\S+)"
footer = r"^\s*\$end"
smx_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if not smx_table:
print("No valid smx inputs found. Note that there should be no '=' chracters in smx input lines.")
return {}
smx = {}
for key, val in smx_table[0]:
smx[key] = val
if smx["solvent"] == "tetrahydrofuran":
smx["solvent"] = "thf"
return smx
@staticmethod
def read_scan(string: str) -> Dict[str, List]:
"""
Read scan section from a string.
Args:
string: String to be parsed
Returns:
Dict representing Q-Chem scan section
"""
header = r"^\s*\$scan"
row = r"\s*(stre|bend|tors|STRE|BEND|TORS)\s+((?:[\-\.0-9]+\s*)+)"
footer = r"^\s*\$end"
scan_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if scan_table == list():
print("No valid scan inputs found. Note that there should be no '=' chracters in scan input lines.")
return dict()
stre = list()
bend = list()
tors = list()
for row in scan_table[0]:
if row[0].lower() == "stre":
stre.append(row[1].replace("\n", "").rstrip())
elif row[0].lower() == "bend":
bend.append(row[1].replace("\n", "").rstrip())
elif row[0].lower() == "tors":
tors.append(row[1].replace("\n", "").rstrip())
if len(stre) + len(bend) + len(tors) > 2:
raise ValueError("No more than two variables are allows in the scan section!")
return {"stre": stre, "bend": bend, "tors": tors}
@staticmethod
def read_plots(string: str) -> Dict:
"""
Read plots parameters from string.
Args:
string (str): String
Returns:
(dict) plots parameters.
"""
header = r"^\s*\$plots"
row = r"\s*([a-zA-Z\_]+)\s+(\S+)"
footer = r"^\s*\$end"
plots_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if plots_table == []:
print("No valid plots inputs found. Note that there should be no '=' chracters in plots input lines.")
return {}
plots = {}
for key, val in plots_table[0]:
plots[key] = val
return plots
|
'''
Atakan Ayaşlı 170401066
'''
import socket
import time
import os
import sys
def Dosya_listele():
msg = "LİSTE Komutu doğru"
msgEn = msg.encode('utf-8')
s.sendto(msgEn, clientAddr)
F = os.listdir(
path="C:\Users\Atakan\Desktop\170401066\server")
""" lütfen server.py dosyasının olduğu path'i yazınız eğer unicode hatası alıyorsanız pathi yazarken başlangıca r koyunuz örnek= r"C:/Users/...."
"""
Lists = []
for file in F:
Lists.append(file)
ListsStr = str(Lists)
ListsEn = ListsStr.encode('utf-8')
s.sendto(ListsEn, clientAddr)
def PUT():
msg = "Kabul edilen PUT komutu"
msgEn = msg.encode('utf-8')
s.sendto(msgEn, clientAddr)
if t2[0] == "PUT":
BigSAgain = open(t2[1], "wb")
d = 0
print("Transfer başlıyor")
try:
Count, countaddress = s.recvfrom(4096)
except ConnectionResetError:
print(
"Port numaraları uyuşmuyor.")
sys.exit()
except:
print("Bilinmeyen Hata")
sys.exit()
tillI = Count.decode('utf8')
tillI = int(tillI)
while tillI != 0:
ServerData, serverAddr = s.recvfrom(4096)
dataS = BigSAgain.write(ServerData)
d += 1
tillI = tillI - 1
print("Alınan Paket numarası:" + str(d))
BigSAgain.close()
print("Son")
def Exit():
print("Çıkış")
s.close()
sys.exit()
def ipkontrol():
if len(sys.argv) != 2:
print(
"lütfen ip giriniz(server.py 127.0.0.1)!/\n Eğer server.py dosyasınının 16. satırınının dosya pathini manuel ayarlamadıysanız hata verebilir path'i server.py'ın bulunduğu klasörün pathini manuel yazınız ")
sys.exit()
def GET(g):
msg = "Kabul edilen komut "
msgEn = msg.encode('utf-8')
s.sendto(msgEn, clientAddr)
if os.path.isfile(g):
msg = "Dosya bulundu. "
msgEn = msg.encode('utf-8')
s.sendto(msgEn, clientAddr)
c = 0
sizeS = os.stat(g)
sizeSS = sizeS.st_size
print("File size in bytes:" + str(sizeSS))
NumS = int(sizeSS / 4096)
NumS = NumS + 1
tillSS = str(NumS)
tillSSS = tillSS.encode('utf8')
s.sendto(tillSSS, clientAddr)
check = int(NumS)
GetRunS = open(g, "rb")
while check != 0:
RunS = GetRunS.read(4096)
s.sendto(RunS, clientAddr)
c += 1
check -= 1
print("Paket numarası:" + str(c))
GetRunS.close()
print("Sn")
else:
msg = "Hata:dosya bulunamadı"
msgEn = msg.encode('utf-8')
s.sendto(msgEn, clientAddr)
def Else():
msg = "istediğniz " + \
t2[0] + " server tarafından anlaşılmadı"
msgEn = msg.encode('utf-8')
s.sendto(msgEn, clientAddr)
host = ""
ipkontrol()
port=42
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((host, port))
print("Başarılı birleşme. Şimdi client bekleniyor.")
except socket.error:
print("Hata")
sys.exit()
while True:
try:
data, clientAddr = s.recvfrom(4096)
except ConnectionResetError:
print(
"Port numaraları uyuşmuyor")
sys.exit()
text = data.decode('utf8')
t2 = text.split()
if t2[0] == "GET":
GET(t2[1])
elif t2[0] == "PUT":
PUT()
elif t2[0] == "LİSTE":
Dosya_listele()
elif t2[0] == "exit":
Exit()
else:
Else()
print("Program sonlandırılıyor ")
quit()
|
import torch
import numpy as np
import streamlit as st
from encode import encode_long_text, decode_long_text,colorize
import difflib
@st.cache(ignore_hash=True)
def load_models():
en2fr = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de.single_model', tokenizer='moses', bpe='fastbpe')
fr2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.de-en.single_model', tokenizer='moses', bpe='fastbpe')
en2fr.cuda();
fr2en.cuda();
return (en2fr,fr2en)
def token2color(token,added=True):
if token[0]=='+':
return '```{}```'.format(token[2:]) if added else ''#" b`{}`b ".format(token[1:]) if (added) else ''#colorize(token[1:],'green') if added else ''
elif token[0]=='-':
return '' if added else '```{}```'.format(token[2:])#`{}".format(token[1:])#colorize(token[1:],'red')
elif token[0]=='?':
return ''
else:
return token[2:]
models = load_models()
T = st.sidebar.slider("Temperature", min_value=0.7, max_value=1.5, value=1.05, step=None, format=None)
topk = st.sidebar.slider("topk", min_value=5, max_value=200, value=25, step=5, format=None)
decode = st.sidebar.checkbox('Decode?')
if not decode:
st.markdown('# Encode')
else:
st.markdown('# Decode')
phrase="Japanese researchers began studying transistors three months after they were invented at America’s Bell Labs in 1947. Japanese companies then used transistors and other electronic parts and components to produce radios, television sets, Sony Walkmans, video cassette recorders, and computers. As the yen appreciated by 60% following the 1985 Plaza Accord, Japanese companies lost competitiveness in final electronics goods and moved upstream in electronics value chains. They focused on exporting electronic parts and components and capital goods to producers of final electronics goods abroad. "
cover_text = st.text_area("Cover Text", phrase)
if not decode:
secret_message = st.text_area("Secret Message","secret code here: blah blah")
encoded_text, bits_encoded, otherlang_text = encode_long_text(cover_text,secret_message,models,temperature=T,sampling_topk=topk)
diff = st.sidebar.checkbox('Diff?',False)
if diff:
diff = list(difflib.Differ().compare(cover_text.split(' '),encoded_text.split(' ')))
highlighted_diff_p = ' '.join([token2color(tok,True) for tok in diff])
highlighted_diff_m = ' '.join([token2color(tok,False) for tok in diff]).replace('```','~~')
st.markdown('## Cover:')
st.markdown(highlighted_diff_m)
st.markdown('## Cover + Payload:')
st.markdown(highlighted_diff_p)
else:
st.markdown('## Cover + Payload:')
st.write(encoded_text)
st.write(f'`{bits_encoded}` payload bits delivered at a bitrate of `{100*bits_encoded/(8*len(encoded_text)):.2f}`%')
if st.sidebar.checkbox('Show German?'):
st.markdown('## German Intermediary')
st.write(otherlang_text)
if decode:
example_encoded = "Three months after Japanese researchers invented transistors at American Bell Labs in 1947, they began research into transistors. Japanese companies used transistors and other electronic components in manufacturing radios, TVs, Sony Walkmans, video cassette recorders and computers. When Japanese companies regained 60% of the value value of Japanese goods under the 1985 Plaza agreement, they lost their competitive position and advanced into electronic value chains, focusing on exports of electronic components and capital goods to end-product manufacturers overseas."
encoded_text = st.text_area("Cover + Payload", example_encoded)
decoded_text = decode_long_text(cover_text,encoded_text,models,temperature=T,sampling_topk=topk)
st.markdown('## Decoded Text')
st.write(str(decoded_text)[1:]+'\n')
|
from biicode.common.edition.hive_manager import HiveManager
from biicode.client.exception import ConnectionErrorException, ClientException, NotInAHiveException
from biicode.client.checkout.snapshotbuilder import compute_files, compute_deps_files
from biicode.common.exception import BiiException
from biicode.client.command.printers.command_printer import Printer
from biicode.common.utils.bii_logging import logger
from biicode.client.hooks import handle_hooks
import traceback
import shutil
import os
from biicode.common.migrations.biiconfig_migration import delete_migration_files
from biicode.client.workspace.bii_paths import SRC_DIR, DEP_DIR, BII_DIR, BII_HIVE_DB
from biicode.common.utils.file_utils import save
from biicode.common.model.brl.complex_name import ComplexName
def init_hive(bii, project_name=None, layout=None):
""" Initializes an empty project
"""
user_cache = bii.user_cache
out = bii.user_io.out
bii_paths = bii.bii_paths
if bii_paths.current_dir.startswith(bii_paths.user_bii_home):
raise BiiException('Cannot create a project inside the user .biicode folder')
try:
bii_paths.project_root
raise ClientException('Cannot create project inside other project')
except NotInAHiveException:
pass
if project_name:
name = ComplexName(project_name)
current_dir = os.path.join(bii_paths.current_dir, name)
bii_paths.current_dir = current_dir
else:
current_dir = bii_paths.current_dir
ComplexName(os.path.basename(current_dir))
for root, _, _ in os.walk(current_dir):
if os.path.exists(os.path.join(root, BII_DIR, BII_HIVE_DB)):
if root == current_dir:
project_name = os.path.basename(current_dir)
raise ClientException('Project "%s" already exists' % project_name)
raise ClientException('Cannot create project with other project inside:\n%s' % root)
hive_disk_image = bii.hive_disk_image
hive_disk_image.initialize()
try:
hive_disk_image.hivedb.read_edition_contents()
out.success('Successfully initialized biicode project %s' % (project_name or ""))
# If an exception is launched, the hive folder is deleted
except BaseException as e:
out.error('An error occurred while creating the project %s' % str(e))
logger.error(traceback.format_exc())
if project_name and os.path.exists(current_dir):
hive_disk_image.hivedb.disconnect()
shutil.rmtree(current_dir)
else:
layout_content = user_cache.layout(layout)
if layout_content:
save(os.path.join(hive_disk_image.paths.bii, "layout.bii"), layout_content)
class ClientHiveManager(HiveManager):
""" The main entry point for business logic in client
"""
def __init__(self, bii):
self.bii = bii
self.user_io = self.bii.user_io
self.hive_disk_image = self.bii.hive_disk_image
super(ClientHiveManager, self).__init__(self.hive_disk_image.hivedb, bii.biiapi,
bii.user_io.out)
@property
def paths(self):
return self.hive_disk_image.paths
def _process(self):
""" always the first step in every command
"""
files = self.hive_disk_image.get_src_files()
settings = self.hive_disk_image.settings
self.user_io.out.info('Processing changes...')
deleted_migration = self.process(settings, files)
delete_migration_files(deleted_migration, self.hive_disk_image.paths.blocks)
self._checkout()
self._checkout_deps()
def work(self):
self._process()
self._handle_hooks('post_proc')
def _handle_hooks(self, stage):
""" will manage user defined hooks. It has a problem, it works only if
project is processed. So for clean, it has to detect if there are hooks,
then execute a work first
"""
handle_hooks(stage, self.hive_holder, self.closure, self.bii)
def _checkout(self, allow_delete_block=None):
'''
Checks-out HiveDB into disk
Params:
delete: BlockName or None
if BlockName it will delete that block_name
'''
if allow_delete_block:
self.hive_disk_image.delete_removed(SRC_DIR, self.hive_holder.resources,
block_filter=allow_delete_block)
settings = self.hive_disk_image.settings
update_files = compute_files(self.hive_holder, self.user_io.out, settings)
self.hive_disk_image.save(SRC_DIR, update_files)
def _checkout_deps(self):
if self.closure is not None:
files = compute_deps_files(self.closure)
if files:
self.hive_disk_image.save(DEP_DIR, files)
self.hive_disk_image.delete_removed(DEP_DIR, files)
def new(self, block_name=None, hello_lang=None):
root_block = self.hive_disk_image.paths.root_block
auto_root_path = self.hive_disk_image.paths.auto_root_block
if block_name and not block_name == root_block:
new_block_path = self.hive_disk_image.create_new_block(block_name)
elif auto_root_path or root_block:
new_block_path = self.hive_disk_image.paths.project_root
else:
raise ClientException("Too few arguments, specify a block name "
"or add in your lauout.bii auto-root-path: True "
"or a root-block: my_user/my_block")
# If user has entered -- hello cpp, we create a main file with hello world cpp template
if hello_lang:
from biicode.client.dev.wizards import get_main_file_template
hello_lang = ''.join(hello_lang)
file_name, content = get_main_file_template(hello_lang)
self.hive_disk_image.create_new_file(new_block_path, file_name, content)
def clean(self):
# TODO: Check that there are no changes in deps
if self.hive_disk_image.clean_hooks():
self._process()
self._handle_hooks('clean')
self.hive_disk_image.clean()
def publish(self, block_name, tag, msg, versiontag, publish_all, origin):
self._process()
parents = [b.parent for b in self.hive_holder.block_holders if b.parent.time != -1]
self.bii.biiapi.check_valid(parents)
HiveManager.publish(self, block_name, tag, msg, versiontag,
publish_all=publish_all, origin=origin)
self._checkout()
# Check again, in case some parent outdated DEV => STABLE
parents = [b.parent for b in self.hive_holder.block_holders if b.parent.time != -1]
self.bii.biiapi.check_valid(parents, publish=False)
def find(self, **find_args):
self._process()
try:
policies = self.hive_disk_image.policies
find_result = super(ClientHiveManager, self).find(policies, **find_args)
Printer(self.user_io.out).print_find_result(find_result)
self.apply_find_result(find_result)
self._checkout()
self._checkout_deps()
except ConnectionErrorException:
self.user_io.out.error('Unable to connect to server to find deps')
def update(self, block, time):
self._process()
parents = [b.parent for b in self.hive_holder.block_holders if b.parent.time != -1]
self.bii.biiapi.check_valid(parents, publish=False)
block = super(ClientHiveManager, self).update(block, time)
self._checkout(allow_delete_block=block)
self._checkout_deps()
def open(self, block_name, track, time, version_tag):
'''
Params:
block_version. It time is None last version will be retrieved
'''
self._process()
opened_version = HiveManager.open(self, block_name, track, time, version_tag)
self._checkout()
self._checkout_deps()
if os.path.exists(os.path.join(self.hive_disk_image.paths.deps, block_name)):
raise BiiException("Unable to remove %s from 'deps' folder. Maybe there exist "
"temporary files, or some file is locked by other "
"application. Check it and delete manually 'deps/%s' folder."
% (block_name, block_name))
self.bii.user_io.out.write('Opened %s\n' % str(opened_version))
def close(self, block_name, force):
self._process()
HiveManager.close(self, block_name, self.hive_disk_image.settings, force)
self._checkout(allow_delete_block=block_name)
self._checkout_deps() # When closing a block we might have less dependencies
if os.path.exists(os.path.join(self.hive_disk_image.paths.blocks, block_name)):
raise BiiException("Unable to remove %s from '%s' folder. Maybe there exist "
"temporary or ignored files, or some file is locked by an open "
"application. Check it and delete manually 'blocks/%s' folder."
% (block_name, SRC_DIR, block_name))
self.bii.user_io.out.write('%s closed\n' % block_name)
def diff(self, block_name, version_child, version_parent, short):
from biicode.client.command.printers.diff_printer import print_diff
self._process()
print_diff(self.bii.user_io.out,
self.hive_holder,
self._biiapi,
block_name,
version_child,
version_parent,
short)
def deps(self, block_name=None, details=False, files=False):
''' Command to show all the dependencies in a project '''
from biicode.client.command.printers.deps_printer import print_deps
self._process()
print_deps(self.bii.user_io.out, self.hive_holder, block_name, details, files)
|
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url('^$', views.index, name='home'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^singleimage/(\d+)', views.single_photo, name='singleImage'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
"""
import csv
import os
class GenericCar:
def __init__(self, car_type, brand, photo_file_name, carrying):
self.car_type = car_type
self.brand = brand
self.photo_file_name = photo_file_name
self.carrying = float(carrying)
def get_photo_file_ext(self):
_, file_extension = os.path.splitext(self.photo_file_name)
return file_extension
class Car(GenericCar):
def __init__(self, car_type, brand, photo_file_name, carrying, passenger_seats_count):
super().__init__(car_type, brand, photo_file_name, carrying)
self.passenger_seats_count = int(passenger_seats_count)
class Truck(GenericCar):
def __init__(self, car_type, brand, photo_file_name, carrying, body_whl):
super().__init__(car_type, brand, photo_file_name, carrying)
self.body_length, self.body_width, self.body_height = 0.0, 0.0, 0.0
if body_whl != '':
dimensions = body_whl.split('x')
self.body_length, self.body_width, self.body_height = [float(dim) for dim in dimensions]
def get_body_volume(self):
return self.body_length * self.body_width * self.body_height
class SpecialCar(GenericCar):
def __init__(self, car_type, brand, photo_file_name, carrying, extra):
super().__init__(car_type, brand, photo_file_name, carrying)
self.extra = extra
def get_car_list(csv_file_name):
car_list = []
with open(csv_file_name) as f:
reader = csv.reader(f, delimiter=';')
next(reader) # skip header
for row in reader:
try:
car_type = row[0]
if car_type == 'car':
car_list.append(Car(car_type, brand=row[1], photo_file_name=row[3], carrying=row[5], passenger_seats_count=row[2]))
elif car_type == 'truck':
car_list.append(Truck(car_type, brand=row[1], photo_file_name=row[3], carrying=row[5], body_whl=row[4]))
elif car_type == 'spec_machine':
car_list.append(SpecialCar(car_type, brand=row[1], photo_file_name=row[3], carrying=row[5], extra=row[6]))
else:
pass
except Exception:
pass
return car_list
if __name__ == '__main__':
print(get_car_list('coursera_week3_cars.csv'))
"""
# Teachers way
import csv
import sys
import os.path
class CarBase:
"""Базовый класс с общими методами и атрибутами"""
# индексы полей, которые соответствуют колонкам в исходном csv-файле
ix_car_type = 0
ix_brand = 1
ix_passenger_seats_count = 2
ix_photo_file_name = 3
ix_body_whl = 4
ix_carrying = 5
ix_extra = 6
def __init__(self, brand, photo_file_name, carrying):
self.brand = brand
self.photo_file_name = photo_file_name
self.carrying = float(carrying)
def get_photo_file_ext(self):
_, ext = os.path.splitext(self.photo_file_name)
return ext
class Car(CarBase):
"""Класс легковой автомобиль"""
car_type = "car"
def __init__(self, brand, photo_file_name, carrying, passenger_seats_count):
super().__init__(brand, photo_file_name, carrying)
self.passenger_seats_count = int(passenger_seats_count)
@classmethod
def from_tuple(cls, row):
""" Метод для создания экземпляра легкового автомобиля
из строки csv-файла"""
return cls(
row[cls.ix_brand],
row[cls.ix_photo_file_name],
row[cls.ix_carrying],
row[cls.ix_passenger_seats_count],
)
class Truck(CarBase):
"""Класс грузовой автомобиль"""
car_type = "truck"
def __init__(self, brand, photo_file_name, carrying, body_whl):
super().__init__(brand, photo_file_name, carrying)
# обрабатываем поле body_whl
try:
length, width, height = (float(c) for c in body_whl.split("x", 2))
except ValueError:
length, width, height = .0, .0, .0
self.body_length = length
self.body_width = width
self.body_height = height
def get_body_volume(self):
return self.body_width * self.body_height * self.body_length
@classmethod
def from_tuple(cls, row):
return cls(
row[cls.ix_brand],
row[cls.ix_photo_file_name],
row[cls.ix_carrying],
row[cls.ix_body_whl],
)
class SpecMachine(CarBase):
"""Класс спецтехника"""
car_type = "spec_machine"
def __init__(self, brand, photo_file_name, carrying, extra):
super().__init__(brand, photo_file_name, carrying)
self.extra = extra
@classmethod
def from_tuple(cls, row):
return cls(
row[cls.ix_brand],
row[cls.ix_photo_file_name],
row[cls.ix_carrying],
row[cls.ix_extra],
)
def get_car_list(csv_filename):
with open(csv_filename) as csv_fd:
# создаем объект csv.reader для чтения csv-файла
reader = csv.reader(csv_fd, delimiter=';')
# пропускаем заголовок csv
next(reader)
# это наш список, который будем возвращать
car_list = []
# объявим словарь, ключи которого - тип автомобиля (car_type),
# а значения - класс, объект которого будем создавать
create_strategy = {car_class.car_type: car_class
for car_class in (Car, Truck, SpecMachine)}
# обрабатываем csv-файл построчно
for row in reader:
try:
# определяем тип автомобиля
car_type = row[CarBase.ix_car_type]
except IndexError:
# если не хватает колонок в csv - игнорируем строку
continue
try:
# получаем класс, объект которого нужно создать
# и добавить в итоговый список car_list
car_class = create_strategy[car_type]
except KeyError:
# если car_type не извесен, просто игнорируем csv-строку
continue
try:
# создаем и добавляем объект в car_list
car_list.append(car_class.from_tuple(row))
except (ValueError, IndexError):
# если данные некорректны, то игнорируем их
pass
return car_list
if __name__ == "__main__":
print(get_car_list(sys.argv[1]))
|
import os
from builtins import str
from builtins import object
from builtins import range
from retriever.lib.models import Engine
from retriever import DATA_DIR, open_fr, open_fw
from retriever.lib.tools import xml2csv, sort_csv
class DummyConnection(object):
def cursor(self):
pass
def commit(self):
pass
def rollback(self):
pass
def close(self):
pass
class DummyCursor(DummyConnection):
pass
class engine(Engine):
"""Engine instance for writing data to a XML file."""
name = "XML"
abbreviation = "xml"
datatypes = {
"auto": "INTEGER",
"int": "INTEGER",
"bigint": "INTEGER",
"double": "REAL",
"decimal": "REAL",
"char": "TEXT",
"bool": "INTEGER",
}
required_opts = [
("table_name",
"Format of table name",
os.path.join(DATA_DIR, "{db}_{table}.xml")),
]
table_names = []
def create_db(self):
"""Override create_db since there is no database just an XML file"""
return None
def create_table(self):
"""Create the table by creating an empty XML file"""
self.output_file = open_fw(self.table_name())
self.output_file.write(u'<?xml version="1.0" encoding="UTF-8"?>')
self.output_file.write(u'\n<root>')
self.table_names.append((self.output_file, self.table_name()))
self.auto_column_number = 1
def disconnect(self):
"""Close out the xml files
Close all the file objects that have been created
Re-write the files stripping off the last comma and then close with a closing tag)
"""
if self.table_names:
for output_file_i, file_name in self.table_names:
output_file_i.close()
current_input_file = open_fr(file_name)
file_contents = current_input_file.readlines()
current_input_file.close()
file_contents[-1] = file_contents[-1].strip(',')
current_output_file = open_fw(file_name)
current_output_file.writelines(file_contents)
current_output_file.write(u'\n</root>')
current_output_file.close()
self.table_names = []
def execute(self, statement, commit=True):
"""Write a line to the output file"""
self.output_file.writelines(statement)
def format_insert_value(self, value, datatype):
"""Formats a value for an insert statement"""
v = Engine.format_insert_value(self, value, datatype, escape=False, processed=True)
if v == 'null':
return ""
try:
if len(v) > 1 and v[0] == v[-1] == "'":
v = '"%s"' % v[1:-1]
except:
pass
return v
def insert_statement(self, values):
if not hasattr(self, 'auto_column_number'):
self.auto_column_number = 1
keys = self.table.get_insert_columns(join=False, create=True)
if self.table.columns[0][1][0][3:] == 'auto':
newrows = []
for rows in values:
insert_stmt = [self.auto_column_number] + rows
newrows.append(insert_stmt)
self.auto_column_number += 1
else:
newrows = values
xml_lines = ['\n<row>\n{}</row>'.format(self._format_single_row(keys, line_data)) for line_data in newrows]
return xml_lines
def _format_single_row(self, keys, line_data):
return ''.join(' <{key}>{value}</{key}>\n'.format(key=key, value=value) for key, value in zip(keys, line_data))
def table_exists(self, dbname, tablename):
"""Check to see if the data file currently exists"""
tablename = self.table_name(name=tablename, dbname=dbname)
return os.path.exists(tablename)
def to_csv(self):
"""Export table from xml engine to CSV file"""
for keys in list(self.script.tables):
table_name = self.opts['table_name'].format(db=self.db_name, table=keys)
header = self.script.tables[keys].get_insert_columns(join=False, create=True)
csv_outfile = xml2csv(table_name, header_values=header)
sort_csv(csv_outfile)
def get_connection(self):
"""Gets the db connection."""
self.get_input()
return DummyConnection()
|
"""A data structure for a set of coronal hole tracking (CHT) algorithm.
Module purposes: (1) used as a holder for a window of frames and (2) matches coronal holes between frames.
Last Modified: July 21st, 2021 (Opal).
"""
import json
import numpy as np
import datetime as dt
import pickle
from chmap.coronal_holes.tracking.src.frame import Frame
from chmap.coronal_holes.tracking.src.knn import KNN
from chmap.coronal_holes.tracking.src.time_interval import *
from chmap.coronal_holes.tracking.src.areaoverlap import area_overlap, max_area_overlap
from chmap.coronal_holes.tracking.src.graph import CoronalHoleGraph
class CoronalHoleDB:
""" Coronal Hole Object Data Structure."""
# contour binary threshold.
BinaryThreshold = 0.7
# coronal hole area threshold.
AreaThreshold = 5E-3
# window to match coronal holes.
window = 1000
# window time interval (time-delta)
window_time_interval = dt.timedelta(days=7)
# parameter for longitude dilation (this should be changed for larger image dimensions).
gamma = 20
# parameter for latitude dilation (this should be changed for larger image dimensions).
beta = 10
# connectivity threshold.
ConnectivityThresh = 0.1
# connectivity threshold.
AreaMatchThresh = 5E-3
# knn k hyper parameter
kHyper = 10
# knn thresh
kNNThresh = 1E-3
# MeshMap with information about the input image mesh grid and pixel area.
Mesh = None
def __init__(self):
# connectivity graph.
self.Graph = CoronalHoleGraph()
# data holder for previous *window* frames.
self.window_holder = [None] * self.window
def __str__(self):
return json.dumps(
self.json_dict(), indent=4, default=lambda o: o.json_dict())
def json_dict(self):
return {
'num_frames': self.Graph.frame_num,
'num_coronal_holes': self.Graph.total_num_of_coronal_holes,
'num_of_nodes': self.Graph.G.number_of_nodes(),
'num_of_edges': self.Graph.G.number_of_edges(),
}
def _assign_id_coronal_hole(self, ch):
"""Assign a *unique* ID number to a coronal hole based on its class association."
Parameters
----------
ch: CoronalHole() object
Returns
-------
ch: with assigned id.
"""
# set the index id.
ch.id = self.Graph.total_num_of_coronal_holes + 1
# update coronal hole holder.
self.Graph.total_num_of_coronal_holes += 1
return ch
def _assign_color_coronal_hole(self, ch):
"""Assign a unique color (RBG) to coronal hole"
Parameters
----------
ch: CoronalHole() object
Returns
-------
ch: with assigned color.
"""
ch.color = self.generate_ch_color()
return ch
@staticmethod
def _assign_count_coronal_hole(ch, contour_list):
"""Assign a count to coronal hole, the number "
Parameters
----------
ch: CoronalHole() object
contour_list: list of contours found in previous frame.
Returns
-------
ch: with assigned count.
"""
count = 0
for contour in contour_list:
if contour.id == ch.id and contour != ch:
count += 1
ch.count = count
return ch
def update_previous_frames(self, frame):
"""Update *window* previous frame holders.
Parameters
----------
frame: new frame object Frame() see frame.py
Returns
-------
None
"""
# append the new frame to the end of the list.
self.window_holder.append(frame)
def adjust_window_size(self, mean_timestamp, list_of_timestamps):
"""Update the window holder as we add a new frame info.
:param mean_timestamp: the current timestamp.
:param list_of_timestamps: list of all the timestamps in the database.
:return: N/A
"""
# get window of frames that are within the time interval
new_window_size = get_number_of_frames_in_interval(curr_time=mean_timestamp,
time_window=self.window_time_interval,
list_of_timestamps=list_of_timestamps)
# if the window holder is now smaller then before.
if new_window_size < self.window:
self.window_holder = self.window_holder[-new_window_size:]
# this is not really possible - there is an error in the database.
elif new_window_size > self.window + 1:
raise ArithmeticError('The window size is invalid. ')
# update window to be the new window size.
self.window = new_window_size
def initialize_window_holder(self, db_session, query_start, query_end, map_vars, map_methods, prev_run_path):
"""Initialize the previous run history.
:param db_session: database session.
:param query_start: start-time timestamp.
:param query_end: end-time timestamp.
:param map_vars: map variables.
:param map_methods: define map type and grid to query.
:param prev_run_path: path to previous run pickle files.
:return: N/A
"""
# get list of timestamps in the window interval.
list_of_timestamps = get_time_interval_list(db_session, query_start,
query_end - dt.timedelta(seconds=1), map_vars, map_methods)
# update the window holder.
self.window_holder = read_prev_run_pkl_results(ordered_time_stamps=list_of_timestamps,
prev_run_path=prev_run_path)
@staticmethod
def generate_ch_color():
"""Generate a random color.
:return: list of 3 integers between 0 and 255.
"""
return np.random.randint(low=0, high=255, size=(3,)).tolist()
def assign_new_coronal_holes(self, contour_list, timestamp=None):
"""Match coronal holes to previous *window* of frames.
Parameters
----------
contour_list:
(list) current frame Contour() list.
timestamp:
(str) frame timestamp
Returns
-------
N/A
"""
# if this is the first frame in the video sequence then just save coronal holes.
if self.Graph.frame_num == 1:
for ii in range(len(contour_list)):
# assign a unique class ID to the contour object.
contour_list[ii] = self._assign_id_coronal_hole(ch=contour_list[ii])
# assign a unique color (RBG) to the contour object.
contour_list[ii] = self._assign_color_coronal_hole(ch=contour_list[ii])
# update the color dictionary.
self.Graph.color_dict[contour_list[ii].id] = contour_list[ii].color
# add coronal hole as a node to graph.
self.Graph.insert_node(node=contour_list[ii])
# this is *NOT* the first frame - then we need to match the new contours.
else:
# match coronal holes to previous *window* frames.
match_list, contour_list, area_overlap_results, area_check_list = \
self.global_matching_algorithm(contour_list=contour_list)
for ii in range(len(contour_list)):
# new coronal hole
if match_list[ii] == 0:
# assign a unique class ID number to the contour.
contour_list[ii] = self._assign_id_coronal_hole(ch=contour_list[ii])
# assign a unique color (RBG) to the contour.
contour_list[ii] = self._assign_color_coronal_hole(ch=contour_list[ii])
# update the color dictionary.
self.Graph.color_dict[contour_list[ii].id] = contour_list[ii].color
# existing coronal hole
else:
# assign a unique class ID number to the contour that resulted in the best match
# highest area overlapping ratio.
contour_list[ii].id = match_list[ii]
# assign a the corresponding color that all contours of this class have.
contour_list[ii].color = self.Graph.color_dict[contour_list[ii].id]
# assign count to contour.
contour_list[ii] = self._assign_count_coronal_hole(ch=contour_list[ii], contour_list=contour_list)
# add coronal hole as a node to graph.
self.Graph.insert_node(node=contour_list[ii])
# update graph edges -- connectivity.
self.update_connectivity_prev_frame(contour_list=contour_list)
# update the latest frame index number in graph.
self.Graph.max_frame_num = self.Graph.frame_num
# update window holder.
self.update_previous_frames(frame=Frame(contour_list=contour_list, identity=self.Graph.frame_num,
timestamp=timestamp, map_mesh=self.Mesh))
def global_matching_algorithm(self, contour_list):
"""Match coronal holes between sequential frames using KNN and area overlap probability.
Parameters
----------
contour_list: list of new coronal holes (identified in the latest frame, yet to be classified).
Returns
-------
1 List of all corresponding ID to each coronal hole in contour_list.
Note the corresponding ID is in order of coronal holes in contour_list.
"0" means "new class"
2 contour list
3 area overlap results
4 area check list
"""
# ==============================================================================================================
# KNN - K nearest neighbors for contour centroid location.
# ==============================================================================================================
# prepare dataset for K nearest neighbor algorithm.
X_train, Y_train, X_test = self.prepare_knn_data(contour_list=contour_list)
# fit the training data and classify.
classifier = KNN(X_train=X_train, X_test=X_test, Y_train=Y_train, K=self.kHyper, thresh=self.kNNThresh)
# ==============================================================================================================
# Area Overlap - Pixel overlap (connectivity and ID matching).
# ==============================================================================================================
# if probability > KNN threshold then check its overlap of pixels area (functions are in areaoverlap.py)
area_check_list = classifier.check_list
# compute the average area overlap ratio, this will be used for matching coronal holes and connectivity edges.
area_overlap_results = self.get_area_overlap_ratio_list(area_check_list=area_check_list,
contour_list=contour_list)
# return list of coronal holes corresponding unique ID.
match_list = max_area_overlap(area_check_list=area_check_list, area_overlap_results=area_overlap_results,
threshold=self.AreaMatchThresh)
# assign count for each contour.
return match_list, contour_list, area_overlap_results, area_check_list
def prepare_knn_data(self, contour_list):
""" Prepare X_train, Y_train, X_test for KNN algorithm.
Parameters
----------
contour_list: list of the contours identified in the latest frame.
Returns
-------
X_train, Y_train, X_test
"""
# prepare knn test dataset containing all the new coronal hole centroids in spherical coordinates. [theta, phi]
# initialize X_test
X_test = [ch.phys_centroid for ch in contour_list]
# prepare X_train and Y_train saved in self.window_holder.
X_train = []
Y_train = []
for frame in self.window_holder:
if frame is not None:
X_train.extend(frame.centroid_list)
Y_train.extend(frame.label_list)
return X_train, Y_train, X_test
def get_all_instances_of_class(self, class_id):
"""A list of all instances of contours in the last *window* of frames that have the specific class_id.
Parameters
----------
class_id: (int) class identification number.
Returns
-------
list of Contour() with class_id in the *window* of frames.
"""
res = []
for frame in self.window_holder:
if frame is not None:
for ch in frame.contour_list:
if ch.id == class_id:
res.append(ch)
return res
def get_area_overlap_ratio_list(self, area_check_list, contour_list):
"""Results of area overlap between the new coronal holes found in the latest frame and the coronal holes
saved in window_holder.
Parameters
----------
contour_list: list of new coronal holes (identified in the latest frame, yet to be classified).
area_check_list: list of coronal holes that need to be checked.
Returns
-------
A list of area overlap probability corresponding to area_check_list
"""
# initialize the returned list containing the average area overlap.
area_overlap_ratio_list = []
# iterator
ii = 0
# loop over suggested matches from KNN.
for ch_list in area_check_list:
# corresponding average ratio.
holder = []
# loop over every "suggested match" based on KNN.
# weighted mean.
for identity in ch_list:
# find all contours with "identity" labelling in the previous *window* of frames.
coronal_hole_list = self.get_all_instances_of_class(class_id=identity)
# save all ratios in this list and then average the elements.
p = []
# keep track of weight sum
weight_sum = 0
for ch in coronal_hole_list:
p1, p2 = area_overlap(ch1=ch, ch2=contour_list[ii], da=self.Mesh.da)
# weight is based on frame timestamp proximity measured in units of hours.
weight = time_distance(time_1=contour_list[ii].frame_timestamp, time_2=ch.frame_timestamp)
# weighted average.
p.append(weight * (p1 + p2) / 2)
# keep track of the sum of weights.
weight_sum += weight
# save the weighted average -> later used to dictate the ch id number.
holder.append(sum(p) / weight_sum)
area_overlap_ratio_list.append(holder)
ii += 1
return area_overlap_ratio_list
def update_connectivity_prev_frame(self, contour_list):
"""Update connectivity graph by checking area overlap with the previous frame contours.
Parameters
----------
contour_list: list of new coronal holes (identified in the latest frame, yet to be classified).
Returns
-------
N/A
"""
for curr_contour in contour_list:
for prev_contour in self.window_holder[-1].contour_list:
self.add_weighted_edge(contour1=prev_contour, contour2=curr_contour)
if curr_contour.id < self.Graph.total_num_of_coronal_holes:
prev_list = self.find_latest_contour_in_window(identity=curr_contour.id)
for prev_contour in prev_list:
self.add_weighted_edge(contour1=prev_contour, contour2=curr_contour)
def add_weighted_edge(self, contour1, contour2):
"""Add a weighted edge between two contours based on their area overlap.
Parameters
----------
contour1: Contour()
contour2: Contour()
Returns
-------
N/A
"""
p1, p2 = area_overlap(ch1=contour1, ch2=contour2, da=self.Mesh.da)
if (p1 + p2) / 2 > self.ConnectivityThresh:
self.Graph.insert_edge(node_1=contour1, node_2=contour2, weight=round((p1 + p2) / 2, 3))
def find_latest_contour_in_window(self, identity):
"""Find the latest contour of a specific id, in the window frame holder.
Parameters
----------
identity: (int)
Contour() ID number.
Returns
-------
Contour() object.
"""
ii = self.window - 1
while ii >= 0:
frame = self.window_holder[ii]
# initialize contour list.
node_list = []
if frame is None:
# exit- the frame is fairly new and there is no point in continuing the iterations.
return []
else:
for contour in frame.contour_list:
if contour.id == identity:
node_list.append(contour)
# check if contour list is empty.
if len(node_list) > 0:
return node_list
ii += -1
return []
|
# Copyright (c) 2021 Sebastian Pipping <sebastian@pipping.org>
# Licensed under the MIT license
import base64
import hashlib
import os
import tempfile
from argparse import ArgumentParser
import requests
def download_url_to_file(url):
response = requests.get(url, allow_redirects=True)
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(response.content)
return f.name
def obtain_hash_for_local_file(filename: str) -> str:
h = hashlib.sha256()
with open(filename, 'br') as f:
h.update(f.read())
bytes_digest = b'sha256-' + base64.b64encode(h.digest())
return bytes_digest.decode('ascii')
def run(config):
delete_file = False
if config.filename_or_url.startswith('https://'):
filename = download_url_to_file(config.filename_or_url)
delete_file = True
else:
filename = config.filename_or_url
try:
print(obtain_hash_for_local_file(filename))
finally:
if delete_file:
os.remove(filename)
def main():
parser = ArgumentParser()
parser.add_argument('filename_or_url',
metavar='FILE|URL',
help='File or https:// URL to compute checksum for')
config = parser.parse_args()
run(config)
if __name__ == '__main__':
main()
|
from django import forms
from django.forms.fields import DateField
from .models import Exam, Question, AnsweredQuestion, Recommendation
BIRTH_YEAR_CHOICES = range(1940, 2019)
class TakeExamForm(forms.ModelForm):
birthdate = forms.DateField(
widget=forms.SelectDateWidget(years=BIRTH_YEAR_CHOICES, empty_label=("Escoja un año", "Escoja un mes", "Escoja un día"),), label="Fecha de nacimiento")
class Meta:
model = Exam
exclude = ['answered_questions', 'user', 'active', 'recommendation']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for question in Question.objects.filter(active=True):
self.fields[question.slug] = forms.ModelChoiceField(
queryset=question.answers, empty_label="Seleccione una respuesta", label=question.question_text)
def clean(self):
# Nothing to clean
pass
def save(self, user):
score = 0
exam = self.instance
exam.user = user
exam.save()
for question in Question.objects.filter(active=True):
score += self.cleaned_data[question.slug].weight
exam.answered_questions.add(
question, through_defaults={'answer': self.cleaned_data[question.slug]})
print(score)
try:
exam.recommendation = Recommendation.objects.get(
upper__gte=score, lower__lte=score)
except Exception:
exam.recommendation = None
exam.save(update_fields=['recommendation', ])
return exam
|
EMOTIONS = {
0:"anger",
1:"disgust",
2:"fear",
3:"happy",
4:"sad",
5:"surprise",
6:"neutral"
}
EMOTION2INTEGER = {
"anger":0,
"disgust":1,
"fear":2,
"happy":3,
"sad":4,
"surprise":5,
"neutral":6
}
|
""" Contains the Parameter class.
Copyright (c) 2014 Kenn Takara
See LICENSE for details
"""
from sympy import Symbol
from pysolve import InvalidNameError
from pysolve.variable import Variable
class Parameter(Symbol):
""" This class contains a 'parameter'. This is an exogenous
variable. The solver is not allowed to change this value
when solving a set of equations.
Attributes:
symbol:
name:
desc:
default:
value:
"""
# pylint: disable=too-many-ancestors
def __init__(self, name, desc=None, default=None):
if name in Variable.ILLEGAL_NAMES:
raise InvalidNameError(name, 'Name already used by sympy')
super().__init__()
self.name = name
self.desc = desc
self.default = default
self.model = None
self._index = None
self._value = default
@property
def value(self):
""" Getter accessor for parameter value """
return self._value
@value.setter
def value(self, val):
""" Setter accessor for parameter value """
self._value = val
class SeriesParameter(Parameter):
""" A parameter that can access the previous solution values.
Attributes:
name:
variable:
iteration:
default:
"""
# pylint: disable=too-many-ancestors
def __init__(self, name, variable=None, iteration=None, default=None):
super(SeriesParameter, self).__init__(name, default=default)
if variable is None or iteration is None:
raise ValueError('variable and iteration cannot be none')
self.variable = variable
self.iteration = iteration
@property
def value(self):
""" Returns the value of a variable at a another iteration.
If the iteration value is out-of-range, the variable's
default value is returned.
"""
try:
return self.variable.model.get_value(
self.variable, self.iteration)
except IndexError:
return self.variable.value or self.variable.default
|
# Copyright 2016-2019 Douglas G. Moore. All rights reserved.
# Use of this source code is governed by a MIT
# license that can be found in the LICENSE file.
"""
All of the currently implemented time series measures are only defined on
discretely-valued time series. However, in practice continuously-valued time
series are ubiquitous. There are two approaches to accomodating continuous
values.
The simplest is to *bin* the time series, forcing the values into discrete
states. This method has its downsides, namely that the binning is often a bit
unphysical and it can introduce bias. What's more, without some kind of guiding
principle it can be difficult to decide exactly which binning approach.
The second approach attempts to infer condinuous probability distributions from
continuous data. This is potentially more robust, but more technically
difficult. Unfortunately, PyInform does not yet have an implementation of
information measures on continous distributions.
This module (:py:mod:`pyinform.utils.binning`) provides a basic binning facility
via the :py:func:`.bin_series` function.
"""
import numpy as np
from ctypes import byref, c_double, c_int, c_ulong, POINTER
from pyinform import _inform
from pyinform.error import ErrorCode, error_guard
def series_range(series):
"""
Compute the range of a continuously-valued time series.
Examples:
.. doctest:: utils
>>> utils.series_range([0,1,2,3,4,5])
(5.0, 0.0, 5.0)
>>> utils.series_range([-0.1, 8.5, 0.02, -6.3])
(14.8, -6.3, 8.5)
:param sequence series: the time series
:returns: the range and the minimum/maximum values
:rtype: 3-tuple (float, float, float)
:raises InformError: if an error occurs within the ``inform`` C call
"""
xs = np.ascontiguousarray(series, dtype=np.float64)
data = xs.ctypes.data_as(POINTER(c_double))
min, max = c_double(), c_double()
e = ErrorCode(0)
rng = _inform_range(data, c_ulong(xs.size),
byref(min), byref(max), byref(e))
error_guard(e)
return rng, min.value, max.value
def bin_series(series, b=None, step=None, bounds=None):
"""
Bin a continously-valued times series.
The binning can be performed in any one of three ways.
.. rubric:: 1. Specified Number of Bins
The first is binning the time series into *b* uniform bins (with *b* an
integer).
.. doctest:: utils
>>> import numpy as np
>>> np.random.seed(2019)
>>> xs = 10 * np.random.rand(20)
>>> xs
array([9.03482214, 3.93080507, 6.23969961, 6.37877401, 8.80499069,
2.99172019, 7.0219827 , 9.03206161, 8.81381926, 4.05749798,
4.52446621, 2.67070324, 1.6286487 , 8.89214695, 1.48476226,
9.84723485, 0.32361219, 5.15350754, 2.01129047, 8.86010874])
>>> utils.bin_series(xs, b=2)
(array([1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1],
dtype=int32), 2, 4.761811327822174)
>>> utils.bin_series(xs, b=3)
(array([2, 1, 1, 1, 2, 0, 2, 2, 2, 1, 1, 0, 0, 2, 0, 2, 0, 1, 0, 2],
dtype=int32), 3, 3.1745408852147823)
With this approach the binned sequence (as an ``numpy.ndarray``), the number
of bins, and the size of each bin are returned.
This binning method is useful if, for example, the user wants to bin several
time series to the same base.
.. rubric:: 2. Fixed Size Bins
The second type of binning produces bins of a specific size *step*.
.. doctest:: utils
>>> utils.bin_series(xs, step=4.0)
(array([2, 0, 1, 1, 2, 0, 1, 2, 2, 0, 1, 0, 0, 2, 0, 2, 0, 1, 0, 2],
dtype=int32), 3, 4.0)
>>> utils.bin_series(xs, step=2.0)
(array([4, 1, 2, 3, 4, 1, 3, 4, 4, 1, 2, 1, 0, 4, 0, 4, 0, 2, 0, 4],
dtype=int32), 5, 2.0)
As in the previous case the binned sequence, the number of bins, and the
size of each bin are returned.
This approach is appropriate when the system at hand has a particular
sensitivity or precision, e.g. if the system is sensitive down to 5.0mV
changes in potential.
.. rubric:: 3. Thresholds
The third type of binning is breaks the real number line into segments with
specified boundaries or thresholds, and the time series is binned according
to this partitioning. The bounds are expected to be provided in ascending
order.
.. doctest:: utils
>>> utils.bin_series(xs, bounds=[2.0, 7.5])
(array([2, 1, 1, 1, 2, 1, 1, 2, 2, 1, 1, 1, 0, 2, 0, 2, 0, 1, 1, 2],
dtype=int32), 3, [2.0, 7.5])
>>> utils.bin_series(xs, bounds=[2.0])
(array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1],
dtype=int32), 2, [2.0])
Unlike the previous two types of binning, this approach returns the specific
bounds rather than the bin sizes. The other two returns, the binned
sequence and the number of bins, are returned as before.
This approach is useful in situations where the system has natural
thesholds, e.g. the polarized/hyperpolarized states of a neuron.
:param sequence series: the continuously-valued time series
:param int b: the desired number of uniform bins
:param float step: the desired size of each uniform bin
:param sequence bounds: the (finite) bounds of each bin
:return: the binned sequence, the number of bins and either the bin sizes or bin bounds
:rtype: either (``numpy.ndarray``, int, float) or (``numpy.ndarray``, int, sequence)
:raises ValueError: if no keyword argument is provided
:raises ValueError: if more than one keyword argument is provided
:raises InformError: if an error occurs in the ``inform`` C call
"""
if b is None and step is None and bounds is None:
raise ValueError(
"must provide either number of bins, step size, or bin boundaries")
elif b is not None and step is not None:
raise ValueError("cannot provide both number of bins and step size")
elif b is not None and bounds is not None:
raise ValueError(
"cannot provide both number of bins and bin boundaries")
elif step is not None and bounds is not None:
raise ValueError("cannot provide both step size and bin boundaries")
xs = np.ascontiguousarray(series, dtype=np.float64)
data = xs.ctypes.data_as(POINTER(c_double))
binned = np.empty(xs.shape, dtype=np.int32)
out = binned.ctypes.data_as(POINTER(c_int))
e = ErrorCode(0)
if b is not None:
spec = _inform_bin(data, c_ulong(xs.size), c_int(b), out, byref(e))
elif step is not None:
spec = step
b = _inform_bin_step(data, c_ulong(xs.size),
c_double(step), out, byref(e))
elif bounds is not None:
boundaries = np.ascontiguousarray(bounds, dtype=np.float64)
bnds = boundaries.ctypes.data_as(POINTER(c_double))
spec = bounds
b = _inform_bin_bounds(data, c_ulong(
xs.size), bnds, c_ulong(boundaries.size), out, byref(e))
error_guard(e)
return binned, b, spec
_inform_range = _inform.inform_range
_inform_range.argtypes = [POINTER(c_double), c_ulong, POINTER(
c_double), POINTER(c_double), POINTER(c_int)]
_inform_range.restype = c_double
_inform_bin = _inform.inform_bin
_inform_bin.argtypes = [
POINTER(c_double), c_ulong, c_int, POINTER(c_int), POINTER(c_int)]
_inform_bin.restype = c_double
_inform_bin_step = _inform.inform_bin_step
_inform_bin_step.argtypes = [
POINTER(c_double), c_ulong, c_double, POINTER(c_int), POINTER(c_int)]
_inform_bin_step.restype = c_int
_inform_bin_bounds = _inform.inform_bin_bounds
_inform_bin_bounds.argtypes = [POINTER(c_double), c_ulong, POINTER(
c_double), c_ulong, POINTER(c_int), POINTER(c_int)]
_inform_bin_bounds.restype = c_int
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-07 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('skaba', '0005_event_date'),
]
operations = [
migrations.RemoveField(
model_name='attendance',
name='modified',
),
migrations.AddField(
model_name='attendance',
name='verified',
field=models.BooleanField(default=False),
),
]
|
import hashlib
import json
import time
from tools import logger as log
from tools import auction_house_data_logger as ah_log
import strategies
from strategies import support_functions
def auctionh_get_prices(**kwargs):
"""
A strategy to get prices from the auction house.
:param kwargs: strategy, listener, and orders_queue
:return: the input strategy with a report
"""
strategy = kwargs['strategy']
listener = kwargs['listener']
orders_queue = kwargs['orders_queue']
assets = kwargs['assets']
logger = log.get_logger(__name__, strategy['bot'])
ah_logger = ah_log.get_logger(__name__, strategy['bot'] + '_data')
global_start, start = time.time(), time.time()
n_new_entries = 0
sample_timestamp = int(time.time())
if 'sample_timestamp' in strategy['parameters']:
sample_timestamp = strategy['parameters']['sample_timestamp']
# Check that the auction house is open. If it is, close it and open it again.
# This is to prevent some edge case of item or type selection. See todos, lower in this script.
if not listener.game_state['auction_house_info']:
sub_strategy = strategies.auctionh_open.auctionh_open(
listener=listener,
orders_queue=orders_queue,
assets=assets,
strategy={
"bot": strategy['bot'],
"parameters": {
"mode": "buy"
}
}
)
if not sub_strategy['report']['success']:
strategy['report'] = {
'success': False,
'details': {'Execution time': time.time() - start, 'Reason': sub_strategy['report']}
}
log.close_logger(logger)
ah_log.close_logger(ah_logger)
return strategy
else:
sub_strategy = strategies.auctionh_close.auctionh_close(
listener=listener,
orders_queue=orders_queue,
assets=assets,
strategy={
"bot": strategy['bot'],
}
)
if not sub_strategy['report']['success']:
strategy['report'] = {
'success': False,
'details': {'Execution time': time.time() - start, 'Reason': sub_strategy['report']}
}
log.close_logger(logger)
ah_log.close_logger(ah_logger)
return strategy
sub_strategy = strategies.auctionh_open.auctionh_open(
listener=listener,
orders_queue=orders_queue,
assets=assets,
strategy={
"bot": strategy['bot'],
"parameters": {
"mode": "buy"
}
}
)
if not sub_strategy['report']['success']:
strategy['report'] = {
'success': False,
'details': {'Execution time': time.time() - start, 'Reason': sub_strategy['report']}
}
log.close_logger(logger)
ah_log.close_logger(ah_logger)
return strategy
if 'parameters' in strategy.keys() and 'general_ids_list' in strategy['parameters']:
if strategy['parameters']['general_ids_list'] not in [None, 'all']:
ids = strategy['parameters']['general_ids_list']
else:
ids = 'all'
else:
ids = 'all'
all = False
if ids == 'all':
all = True
actual_ids = []
for item_id, type_id in assets['id_2_type'].items():
if type_id in listener.game_state['auction_house_info']['buyerDescriptor']['types']:
item_level = assets['id_2_level'][str(item_id)]
if item_level <= 60 or listener.game_state['sub_end']:
actual_ids.append(int(item_id))
ids = actual_ids
id_with_types = {}
for item_id in ids:
type_id = assets['id_2_type'][str(item_id)]
if type_id in id_with_types.keys():
id_with_types[type_id].append(item_id)
else:
id_with_types[type_id] = [item_id]
results = {}
for type_id, item_ids in id_with_types.items():
previous_available_ids = listener.game_state['auction_house_info']['items_available'] if 'items_available' in listener.game_state['auction_house_info'].keys() else []
order = {
"command": "auctionh_select_category",
"parameters": {
"category_id": type_id
}
}
logger.info('Sending order to bot API: {}'.format(order))
orders_queue.put((json.dumps(order),))
start = time.time()
timeout = 10 if 'timeout' not in strategy.keys() else strategy['timeout']
waiting = True
while waiting and time.time() - start < timeout:
if 'auction_house_info' in listener.game_state.keys() and 'items_available' in listener.game_state['auction_house_info']:
# FIXME: This test is going to wrongly fail if asked to switch from a category to the same one
if listener.game_state['auction_house_info']['items_available'] != previous_available_ids:
waiting = False
time.sleep(0.05)
execution_time = time.time() - start
if waiting:
logger.warn('Failed to change categories in {}s'.format(execution_time))
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Failed to change categories'}
}
log.close_logger(logger)
ah_log.close_logger(ah_logger)
return strategy
for item_id in item_ids:
if item_id in listener.game_state['auction_house_info']['items_available']:
previous_available_ids = str(listener.game_state['auction_house_info']['item_selected'][-1]) if 'item_selected' in listener.game_state['auction_house_info'].keys() else []
order = {
"command": "auctionh_select_item",
"parameters": {
"general_id": item_id
}
}
logger.info('Sending order to bot API: {}'.format(order))
orders_queue.put((json.dumps(order),))
start = time.time()
timeout = 10 if 'timeout' not in strategy.keys() else strategy['timeout']
waiting = True
while waiting and time.time() - start < timeout:
if 'auction_house_info' in listener.game_state.keys() and 'item_selected' in listener.game_state['auction_house_info']:
# FIXME: This test is going to wrongly fail if asked to switch from an item to the same one
if str(listener.game_state['auction_house_info']['item_selected'][-1]) != str(previous_available_ids):
waiting = False
time.sleep(0.05)
execution_time = time.time() - start
if waiting:
logger.warn('Failed to select item')
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Failed to select item {}/{}'.format(item_id, assets['id_2_names'][str(item_id)])}
}
log.close_logger(logger)
ah_log.close_logger(ah_logger)
return strategy
item_name = assets['id_2_names'][str(item_id)]
object_type = 'item' if int(item_id) in assets['hdv_2_id']['Equipements'] else 'resource'
objects = listener.game_state['auction_house_info']['actual_item_selected']
for object in objects:
if object_type == 'item':
formatted_object = {
'item_id': item_id,
'item_name': item_name,
'item_type': object_type,
'server': listener.game_state['server'],
'price_1': object['prices'][0],
'price_10': object['prices'][1],
'price_100': object['prices'][2],
'stats': object['effects'],
'hash': hashlib.sha256((item_name + str(object['effects']) + str(object['prices'][0]) + str(object['prices'][1]) + str(object['prices'][2])).encode('utf8')).hexdigest(),
'sample_id': int(sample_timestamp)
}
if object_type == 'resource':
formatted_object = {
'item_id': item_id,
'item_name': item_name,
'item_type': object_type,
'server': listener.game_state['server'],
'price_1': object['prices'][0],
'price_10': object['prices'][1],
'price_100': object['prices'][2],
'sample_id': int(sample_timestamp)
}
ah_logger.info(json.dumps(formatted_object, ensure_ascii=False))
n_new_entries += 1
results[item_id] = {
'item_name': assets['id_2_names'][str(item_id)],
'items_stats': listener.game_state['auction_house_info']['actual_item_selected']
}
if all:
strategy['report'] = {
'success': True,
'details': {'Execution time': time.time() - global_start, 'Number of new entries': n_new_entries}
}
else:
strategy['report'] = {
'success': True,
'details': {'Execution time': time.time() - global_start, 'Results': results}
}
log.close_logger(logger)
ah_log.close_logger(ah_logger)
return strategy
|
# -*- coding: utf-8 -*-
"""
Allow us to use lib as a package
"""
|
# this script needs either the ViennaRNA python bindings or the RNAfold binary to be
# available to predict RNA secondary structures
def measure_rbp(entry):
import os
from time import time
from pysster import utils
output_folder = entry[4] + "_pysster/"
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
start = time()
# predict secondary structures
utils.predict_structures(entry[0], entry[0]+".struct", annotate=True)
utils.predict_structures(entry[1], entry[1]+".struct", annotate=True)
utils.predict_structures(entry[2], entry[2]+".struct", annotate=True)
utils.predict_structures(entry[3], entry[3]+".struct", annotate=True)
from pysster.Data import Data
from pysster.Model import Model
# load data
data = Data([entry[0]+".struct", entry[1]+".struct"], ("ACGU", "HIMS"))
data.train_val_test_split(0.8, 0.1999) # we need to have at least one test sequence, even though we have a separate test object
# training
params = {"kernel_len": 8}
model = Model(params, data)
model.train(data)
# load and predict test data
data_test = Data([entry[2]+".struct", entry[3]+".struct"], ("ACGU", "HIMS"))
predictions = model.predict(data_test, "all")
stop = time()
print("{}, time in seconds: {}".format(entry[4], stop-start))
# performance evaluation
labels = data_test.get_labels("all")
utils.plot_roc(labels, predictions, output_folder+"roc.pdf")
utils.plot_prec_recall(labels, predictions, output_folder+"prec.pdf")
# get motifs
activations = model.get_max_activations(data_test, "all")
_ = model.visualize_all_kernels(activations, data_test, output_folder)
# save model to drive
utils.save_model(model, "{}model.pkl".format(output_folder))
if __name__ == "__main__":
from multiprocessing import Process
rbps = [("data/pum2.train.positive.fasta", "data/pum2.train.negative.fasta",
"data/pum2.test.positive.fasta", "data/pum2.test.negative.fasta", "PUM2"),
("data/qki.train.positive.fasta", "data/qki.train.negative.fasta",
"data/qki.test.positive.fasta", "data/qki.test.negative.fasta", "QKI"),
("data/igf2bp123.train.positive.fasta", "data/igf2bp123.train.negative.fasta",
"data/igf2bp123.test.positive.fasta", "data/igf2bp123.test.negative.fasta", "IGF2BP123"),
("data/srsf1.train.positive.fasta", "data/srsf1.train.negative.fasta",
"data/srsf1.test.positive.fasta", "data/srsf1.test.negative.fasta", "SRSF1"),
("data/taf2n.train.positive.fasta", "data/taf2n.train.negative.fasta",
"data/taf2n.test.positive.fasta", "data/taf2n.test.negative.fasta", "TAF2N"),
("data/nova.train.positive.fasta", "data/nova.train.negative.fasta",
"data/nova.test.positive.fasta", "data/nova.test.negative.fasta", "NOVA")]
for entry in rbps:
p = Process(target=measure_rbp, args=(entry,))
p.start()
p.join()
|
from PIL import Image
def resize(file_path="./.tmp/bg.jpg", size=(128, 128)):
try:
outfile = "./.tmp/bg_{}x{}.jpg".format(size[0], size[1])
im = Image.open(file_path)
im.thumbnail(size, Image.ANTIALIAS)
im.save(outfile, "JPEG")
return outfile
except Exception:
return None
|
'''
Authors: Alex Wong <alexw@cs.ucla.edu>, Safa Cicek <safacicek@ucla.edu>
If this code is useful to you, please cite the following paper:
A. Wong, S. Cicek, and S. Soatto. Learning topology from synthetic data for unsupervised depth completion.
In the Robotics and Automation Letters (RA-L) 2021 and Proceedings of International Conference on Robotics and Automation (ICRA) 2021
@article{wong2021learning,
title={Learning topology from synthetic data for unsupervised depth completion},
author={Wong, Alex and Cicek, Safa and Soatto, Stefano},
journal={IEEE Robotics and Automation Letters},
volume={6},
number={2},
pages={1495--1502},
year={2021},
publisher={IEEE}
}
'''
import tensorflow as tf
def remove_outliers(sparse_depth, threshold=1.5, kernel_size=7):
'''
Outlier removal by filtering those points with large distance discrepancy
Args:
sparse_depth : tensor
N x H x W x 1 sparse depth map
threshold : float
threshold to consider a point an outlier
kernel_size : int
kernel size to use for filtering outliers
Returns:
tensor : N x H x W x 1 validity map
'''
max_val = tf.reduce_max(sparse_depth) + 100.0
# We only care about min, so we remove all zeros by setting to max
sparse_depth_mod = tf.where(
sparse_depth <= 0.0,
max_val * tf.ones_like(sparse_depth),
sparse_depth)
# Find the neighborhood minimum
n_pad = int(kernel_size / 2)
sparse_depth_mod = tf.pad(
sparse_depth_mod,
paddings=[[0, 0], [n_pad, n_pad], [n_pad, n_pad], [0, 0]],
mode='CONSTANT',
constant_values=max_val)
patches = tf.extract_image_patches(
sparse_depth_mod,
ksizes=[1, kernel_size, kernel_size, 1],
strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1],
padding='VALID')
sparse_depth_min = tf.reduce_min(patches, axis=-1, keepdims=True)
# Find mark all possible occlusions as zeros
return tf.where(
sparse_depth_min < sparse_depth - threshold,
tf.zeros_like(sparse_depth),
sparse_depth)
|
# coding: utf-8
import numpy as np
import pandas as pd
##################################################
# メイン
##################################################
if __name__ == '__main__':
df = pd.DataFrame( {
"id": range(1,7),
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e'],
"categorical_grade": pd.Categorical(['a', 'b', 'b', 'a', 'a', 'e'])
} )
print("------------------------------------------------------------------")
print("df:")
print(df)
print("------------------------------------------------------------------")
print("Convert the raw grades to a categorical data type.")
print("----------------------------------------")
df['grade'] = df['raw_grade'].astype('category')
print("df['raw_grade'].astype('category')")
print(df['grade'])
print("------------------------------------------------------------------")
print("Rename the categories to more meaningful names (assigning to Series.cat.categories is inplace!).")
print("----------------------------------------")
df['grade'].cat.categories = ['very good', 'good', 'very bad']
print("[df['grade'].cat.categories = ['very good', 'good', 'very bad']]")
print(df['grade'])
print("------------------------------------------------------------------")
print("Reorder the categories and simultaneously add the missing categories")
print("(methods under Series .cat return a new Series by default).")
print("----------------------------------------")
df['grade'] = df['grade'].cat.set_categories(
['very bad', 'bad', 'medium', 'good', 'very good'])
print(df['grade'])
print("------------------------------------------------------------------")
print("Sorting is per order in the categories, not lexical order.")
print("----------------------------------------")
df_sorted = df.sort_values(by='grade')
print("[df.sort_values(by='grade')]")
print(df_sorted)
print("------------------------------------------------------------------")
print("Grouping by a categorical column also shows empty categories.")
print("----------------------------------------")
df_grouped = df.groupby('grade').size()
print("[df.groupby('grade').size()]")
print(df_grouped)
|
#!/usr/bin/env python
"""Splunk App for Pagerduty."""
__author__ = 'Greg Albrecht <gba@onbeep.com>'
__copyright__ = 'Copyright 2014 OnBeep, Inc.'
__license__ = 'Apache License, Version 2.0'
from .pagerduty import (PagerDutyException, PagerDuty, extract_events, # NOQA
trigger_pagerduty, get_pagerduty_api_key)
|
from . import data_access_layer as DAL
import web
from web import Appication,NestableBlueprint,jsonify
# from flask.views import MethodView,View
import json
from web.restful import Resource
class UserAPI(Resource):
Model=DAL.User
def create_app():
app=Appication(__name__)
app=UserAPI.register_api(app,endpoint='user_api',url='/users/',pk='resource_id',pk_type='string')
return app
|
# -*- coding: utf-8 -*-
#
# GIS Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3/s3gis.py
import unittest
import datetime
from gluon import *
from gluon.storage import Storage
from s3 import *
from unit_tests import run_suite
# =============================================================================
class S3LocationTreeTests(unittest.TestCase):
""" Location Tree update tests """
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
current.auth.override = True
spatialdb = current.deployment_settings.get_gis_spatialdb()
table = current.s3db.gis_location
fields = [table.inherited,
table.lat,
table.lon,
table.lat_min,
table.lat_max,
table.lon_min,
table.lon_max,
table.parent,
table.path,
table.wkt,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5,
]
if spatialdb:
fields.append(table.the_geom)
cls.spatialdb = spatialdb
cls.table = table
cls.fields = fields
cls.L0 = "New Country"
cls.L1 = "Normal L1"
cls.L2 = "Normal L2"
cls.L3 = "Normal L3"
cls.L4 = "Normal L4"
cls.L5 = "Normal L5"
cls.ids = {}
# -------------------------------------------------------------------------
def testL0_with_level(self):
""" Test updating a Country with Polygon - including level optimisation """
self._testL0(with_level=True)
# -------------------------------------------------------------------------
def testL0_without_level(self):
""" Test updating a Country with Polygon - without level optimisation """
self._testL0(with_level=False)
# -------------------------------------------------------------------------
def testL1_with_level(self):
""" Test updating a normal L1, with L0 Parent - including level optimisation """
self._testL1(with_level=True)
# -------------------------------------------------------------------------
def testL1_without_level(self):
""" Test updating a normal L1, with L0 Parent - without level optimisation """
self._testL1(with_level=False)
# -------------------------------------------------------------------------
def testL2_with_level(self):
""" Test updating a normal L2, with normal L1 Parent - including level optimisation """
self._testL2(with_level=True)
# -------------------------------------------------------------------------
def testL2_without_level(self):
""" Test updating a normal L2, with normal L1 Parent - without level optimisation """
self._testL2(with_level=False)
# -------------------------------------------------------------------------
def testL3_with_level(self):
""" Test updating a normal L3, with normal L2 Parent - including level optimisation """
self._testL3(with_level=True)
# -------------------------------------------------------------------------
def testL3_without_level(self):
""" Test updating a normal L3, with normal L2 Parent - without level optimisation """
self._testL3(with_level=False)
# -------------------------------------------------------------------------
def testL4_with_level(self):
""" Test updating a normal L4, with normal L3 Parent - including level optimisation """
self._testL4(with_level=True)
# -------------------------------------------------------------------------
def testL4_without_level(self):
""" Test updating a normal L4, with normal L3 Parent - without level optimisation """
self._testL4(with_level=False)
# -------------------------------------------------------------------------
def testL5_with_level(self):
""" Test updating a normal L5, with normal L4 Parent - including level optimisation """
self._testL5(with_level=True)
# -------------------------------------------------------------------------
def testL5_without_level(self):
""" Test updating a normal L5, with normal L4 Parent - without level optimisation """
self._testL5(with_level=False)
# -------------------------------------------------------------------------
def testL3_L1_parent_with_level(self):
""" Test updating an L3, without an L2 Parent, going straight to L1 - including level optimisation """
self._testL3_L1_parent(with_level=True)
# -------------------------------------------------------------------------
def testL3_L1_parent_without_level(self):
""" Test updating an L3, without an L2 Parent, going straight to L1 - without level optimisation """
self._testL3_L1_parent(with_level=False)
# -------------------------------------------------------------------------
def testULT1_update_location_tree_disabled(self):
""" Test inserting a location during prepop with location tree updates disabled """
from s3.s3gis import GIS
table = self.table
db = current.db
gis = current.gis
#GIS = s3base.GIS
# Insert a country
L0_lat = 10.0
L0_lon = -10.0
L0_id = table.insert(level = "L0",
name = "s3gis.testULT1.L0",
lat = L0_lat,
lon = L0_lon,
)
# Insert a child location
L1_id = table.insert(level = "L1",
name = "s3gis.testULT1.L1",
parent = L0_id,
)
# When disable_update_location_tree is set to True, update_location_tree
# should just return without making changes.
GIS.disable_update_location_tree = True
L1_feature = dict(id = L1_id)
gis.update_location_tree(L1_feature)
# Verify that the path, lat, lon are unset and inherited is False.
L1_record = db(table.id == L1_id).select(*self.fields,
limitby=(0, 1)
).first()
self.assertEqual(L1_record.inherited, False)
self.assertEqual(L1_record.path, None)
self.assertEqual(L1_record.lat, None)
self.assertEqual(L1_record.lon, None)
# Update again, this time in the normal case with location tree
# updates active.
GIS.disable_update_location_tree = False
gis.update_location_tree(L1_feature)
# Verify that the path, lat, lon, inherited are properly set.
L1_record = db(table.id == L1_id).select(*self.fields,
limitby=(0, 1)
).first()
self.assertEqual(L1_record.inherited, True)
self.assertEqual(L1_record.path, "%s/%s" %(L0_id, L1_id))
self.assertEqual(L1_record.lat, L0_lat)
self.assertEqual(L1_record.lon, L0_lon)
# -------------------------------------------------------------------------
def testULT2_update_location_tree_all_locations(self):
""" Test that the all locations update updates locations """
from s3.s3gis import GIS
table = self.table
db = current.db
gis = current.gis
# Mimic doing prepopulate by turning off location tree updates.
# Has no effect here as we're not doing validation, but leave this in
# in case someone modifies this test so it does call validation.
GIS.disable_update_location_tree = True
# Insert a country
L0_lat = 10.0
L0_lon = -10.0
L0_id = table.insert(level = "L0",
name = "s3gis.testULT2.L0",
lat = L0_lat,
lon = L0_lon,
)
# Insert a child location
L1_id = table.insert(level = "L1",
name = "s3gis.testULT2.L1",
parent = L0_id,
)
# And a child of that child
L2_id = table.insert(level = "L2",
name = "s3gis.testULT2.L2",
parent = L1_id,
)
# And a specific location at the end
specific_id = table.insert(
name = "s3gis.testULT2.specific",
parent = L2_id,
)
# After prepop data is loaded, an update of all locations is run.
GIS.disable_update_location_tree = False
gis.update_location_tree()
# Verify that the path, lat, lon, and inherited are set for the
# descendent locations. (Note we are verifying *that* update was run
# on the descendents, rather than checking in detail what the update
# did to each field.)
L1_record = db(table.id == L1_id).select(*self.fields,
limitby=(0, 1)
).first()
self.assertEqual(L1_record.inherited, True)
self.assertEqual(L1_record.path, "%s/%s" % (L0_id, L1_id))
self.assertEqual(L1_record.lat, L0_lat)
self.assertEqual(L1_record.lon, L0_lon)
L2_record = db(table.id == L2_id).select(*self.fields,
limitby=(0, 1)
).first()
self.assertEqual(L2_record.inherited, True)
self.assertEqual(L2_record.path, "%s/%s/%s" % (L0_id, L1_id, L2_id))
self.assertEqual(L2_record.lat, L0_lat)
self.assertEqual(L2_record.lon, L0_lon)
specific_record = db(table.id == specific_id).select(*self.fields,
limitby=(0, 1)
).first()
self.assertEqual(specific_record.inherited, True)
self.assertEqual(specific_record.path, "%s/%s/%s/%s" % (L0_id, L1_id, L2_id, specific_id))
self.assertEqual(specific_record.lat, L0_lat)
self.assertEqual(specific_record.lon, L0_lon)
# -------------------------------------------------------------------------
def testULT3_update_location_tree_all_locations_no_infinite_recursion(self):
""" Test that the all locations update does not get a "too much recursion" error. """
# NB: This was found to happen if there was a hierarchy location that
# pointed to a parent that is not the immediate next level.
table = self.table
# Set up a pattern of locations known to have provoked this error.
# Insert a country
L0_id = table.insert(level = "L0",
name = "s3gis.testULT3.L0",
lat = 10.0,
lon = -10.0,
)
# Insert an L1 child location
L1_id = table.insert(level = "L1",
name = "s3gis.testULT3.L1",
parent = L0_id,
)
# And a child of that child, but skipping over L2 to L3
L3_id = table.insert(level = "L3",
name = "s3gis.testULT3.L3",
parent = L1_id,
)
# And a specific location at the end
specific_id = table.insert(
name = "s3gis.testULT3.specific",
parent = L3_id,
)
# Capture log messages.
log_recorder = current.log.recorder()
# Run an update.
current.gis.update_location_tree()
# Retrieve the log messages.
log_messages = log_recorder.stop()
# Did we get the recursion error?
self.assertNotIn("too much recursion", log_messages)
# -------------------------------------------------------------------------
def testULT4_get_parents(self):
""" Test get_parents in a case that causes it to call update_location_tree. """
table = self.table
gis = current.gis
# Add locations with parents, but don't include a path. Skip one level.
# (This is a test of get_parents itself. as update_location_tree was not
# known to cause a problem when run on one location.)
# Insert a country
L0_id = table.insert(level = "L0",
name = "s3gis.testULT4.L0",
lat = 10.0,
lon = -10.0,
)
# Insert an L1 child location
L1_id = table.insert(level = "L1",
name = "s3gis.testULT4.L1",
parent = L0_id,
)
# And a child of that child, but skipping over L2 to L3
L3_id = table.insert(level = "L3",
name = "s3gis.testULT4.L3",
parent = L1_id,
)
# And a specific location at the end
specific_id = table.insert(
name = "s3gis.testULT4.specific",
parent = L3_id,
)
# Ask for the parents of the specific location -- this has the side
# effect of calling update_location_tree and filling in the paths.
parents = gis.get_parents(specific_id)
# Expected parents.
expected_parents = [L0_id, L1_id, L3_id]
for parent in parents:
parent_id = parent.id
self.assertIn(parent_id, expected_parents)
expected_parents.remove(parent_id)
# We should have seen all the expected parents.
self.assertEqual(len(expected_parents), 0)
# -------------------------------------------------------------------------
def _testL0(self, with_level):
""" Test updating a Country with Polygon """
LEVEL = "L0"
POLYGON = "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))"
L0 = "%s_%s" % (self.L0, with_level)
table = self.table
# Insert a new Country
form = Storage(vars=Storage(level=LEVEL,
name=L0,
wkt=POLYGON,
),
errors=None,
)
current.gis.wkt_centroid(form)
L0_id = table.insert(**form.vars)
# Store for future tests
S3LocationTreeTests.ids[L0] = L0_id
# Update the Location Tree for the L0
feature = dict(id=L0_id)
if with_level:
feature["level"] = LEVEL
current.gis.update_location_tree(feature)
# Read the results
record = current.db(table.id == L0_id).select(*self.fields,
limitby=(0, 1)
).first()
# Compare to what we expect
self.assertEqual(record.inherited, False)
self.assertAlmostEqual(record.lat, 26.969696969696972, 13)
self.assertAlmostEqual(record.lon, 25.454545454545453, 13)
self.assertEqual(record.lat_min, 10)
self.assertEqual(record.lat_max, 40)
self.assertEqual(record.lon_min, 10)
self.assertEqual(record.lon_max, 40)
self.assertEqual(record.parent, None)
self.assertEqual(record.path, "%s" % L0_id)
self.assertEqual(record.wkt, POLYGON)
self.assertEqual(record.L0, L0)
self.assertEqual(record.L1, None)
self.assertEqual(record.L2, None)
self.assertEqual(record.L3, None)
self.assertEqual(record.L4, None)
self.assertEqual(record.L5, None)
if self.spatialdb:
self.assertTrue(record.the_geom is not None)
# -------------------------------------------------------------------------
def _testL1(self, with_level):
""" Test updating a normal L1, with L0 Parent """
LEVEL = "L1"
POLYGON = "POLYGON ((30 11, 39 39, 21 39, 11 20, 30 11))"
L0 = "%s_%s" % (self.L0, with_level)
L1 = "%s_%s" % (self.L1, with_level)
L0_id = self.ids[L0]
table = self.table
# Insert a new L1
form = Storage(vars=Storage(level=LEVEL,
name=L1,
parent=L0_id,
wkt=POLYGON,
),
errors=None,
)
current.gis.wkt_centroid(form)
L1_id = table.insert(**form.vars)
# Store for future tests
S3LocationTreeTests.ids[L1] = L1_id
# Update the Location Tree for the L1
feature = dict(id=L1_id)
if with_level:
feature["level"] = LEVEL
current.gis.update_location_tree(feature)
# Read the results
record = current.db(table.id == L1_id).select(*self.fields,
limitby=(0, 1)
).first()
# Compare to what we expect
self.assertEqual(record.inherited, False)
self.assertAlmostEqual(record.lat, 26.675741710296684, 13)
self.assertAlmostEqual(record.lon, 25.59232111692845, 13)
self.assertEqual(record.lat_min, 11)
self.assertEqual(record.lat_max, 39)
self.assertEqual(record.lon_min, 11)
self.assertEqual(record.lon_max, 39)
self.assertEqual(record.parent, L0_id)
self.assertEqual(record.path, "%s/%s" % (L0_id, L1_id))
self.assertEqual(record.wkt, POLYGON)
self.assertEqual(record.L0, L0)
self.assertEqual(record.L1, L1)
self.assertEqual(record.L2, None)
self.assertEqual(record.L3, None)
self.assertEqual(record.L4, None)
self.assertEqual(record.L5, None)
if self.spatialdb:
self.assertTrue(record.the_geom is not None)
# -------------------------------------------------------------------------
def _testL2(self, with_level):
""" Test updating a normal L2, with normal L1 Parent """
LEVEL = "L2"
POLYGON = "POLYGON ((30 12, 38 38, 22 38, 12 20, 30 12))"
L0 = "%s_%s" % (self.L0, with_level)
L1 = "%s_%s" % (self.L1, with_level)
L2 = "%s_%s" % (self.L2, with_level)
L0_id = self.ids[L0]
L1_id = self.ids[L1]
table = self.table
# Insert a new L2
form = Storage(vars=Storage(level=LEVEL,
name=L2,
parent=L1_id,
wkt=POLYGON,
),
errors=None,
)
current.gis.wkt_centroid(form)
L2_id = table.insert(**form.vars)
# Store for future tests
S3LocationTreeTests.ids[L2] = L2_id
# Update the Location Tree for the L2
feature = dict(id=L2_id)
if with_level:
feature["level"] = LEVEL
current.gis.update_location_tree(feature)
# Read the results
record = current.db(table.id == L2_id).select(*self.fields,
limitby=(0, 1)
).first()
# Compare to what we expect
self.assertEqual(record.inherited, False)
self.assertAlmostEqual(record.lat, 26.37723577235772, 13)
self.assertAlmostEqual(record.lon, 25.73008130081301, 13)
self.assertEqual(record.lat_min, 12)
self.assertEqual(record.lat_max, 38)
self.assertEqual(record.lon_min, 12)
self.assertEqual(record.lon_max, 38)
self.assertEqual(record.parent, L1_id)
self.assertEqual(record.path, "%s/%s/%s" % (L0_id, L1_id, L2_id))
self.assertEqual(record.wkt, POLYGON)
self.assertEqual(record.L0, L0)
self.assertEqual(record.L1, L1)
self.assertEqual(record.L2, L2)
self.assertEqual(record.L3, None)
self.assertEqual(record.L4, None)
self.assertEqual(record.L5, None)
if self.spatialdb:
self.assertTrue(record.the_geom is not None)
# -------------------------------------------------------------------------
def _testL3(self, with_level):
""" Test updating a normal L3, with normal L2 Parent """
LEVEL = "L3"
POLYGON = "POLYGON ((30 13, 37 37, 23 37, 13 20, 30 13))"
L0 = "%s_%s" % (self.L0, with_level)
L1 = "%s_%s" % (self.L1, with_level)
L2 = "%s_%s" % (self.L2, with_level)
L3 = "%s_%s" % (self.L3, with_level)
L0_id = self.ids[L0]
L1_id = self.ids[L1]
L2_id = self.ids[L2]
table = self.table
# Insert a new L3
form = Storage(vars=Storage(level=LEVEL,
name=L3,
parent=L2_id,
wkt=POLYGON,
),
errors=None,
)
current.gis.wkt_centroid(form)
L3_id = table.insert(**form.vars)
# Store for future tests
S3LocationTreeTests.ids[L3] = L3_id
# Update the Location Tree for the L3
feature = dict(id=L3_id)
if with_level:
feature["level"] = LEVEL
current.gis.update_location_tree(feature)
# Read the results
record = current.db(table.id == L3_id).select(*self.fields,
limitby=(0, 1)
).first()
# Compare to what we expect
self.assertEqual(record.inherited, False)
self.assertAlmostEqual(record.lat, 26.072901678657075, 13)
self.assertAlmostEqual(record.lon, 25.867625899280576, 13)
self.assertEqual(record.lat_min, 13)
self.assertEqual(record.lat_max, 37)
self.assertEqual(record.lon_min, 13)
self.assertEqual(record.lon_max, 37)
self.assertEqual(record.parent, L2_id)
self.assertEqual(record.path, "%s/%s/%s/%s" % (L0_id, L1_id, L2_id, L3_id))
self.assertEqual(record.wkt, POLYGON)
self.assertEqual(record.L0, L0)
self.assertEqual(record.L1, L1)
self.assertEqual(record.L2, L2)
self.assertEqual(record.L3, L3)
self.assertEqual(record.L4, None)
self.assertEqual(record.L5, None)
if self.spatialdb:
self.assertTrue(record.the_geom is not None)
# -------------------------------------------------------------------------
def _testL4(self, with_level):
""" Test updating a normal L4, with normal L3 Parent """
LEVEL = "L4"
POLYGON = "POLYGON ((30 14, 36 36, 24 36, 14 20, 30 14))"
L0 = "%s_%s" % (self.L0, with_level)
L1 = "%s_%s" % (self.L1, with_level)
L2 = "%s_%s" % (self.L2, with_level)
L3 = "%s_%s" % (self.L3, with_level)
L4 = "%s_%s" % (self.L4, with_level)
L0_id = self.ids[L0]
L1_id = self.ids[L1]
L2_id = self.ids[L2]
L3_id = self.ids[L3]
table = self.table
# Insert a new L4
form = Storage(vars=Storage(level=LEVEL,
name=L4,
parent=L3_id,
wkt=POLYGON,
),
errors=None,
)
current.gis.wkt_centroid(form)
L4_id = table.insert(**form.vars)
# Store for future tests
S3LocationTreeTests.ids[L4] = L4_id
# Update the Location Tree for the L4
feature = dict(id=L4_id)
if with_level:
feature["level"] = LEVEL
current.gis.update_location_tree(feature)
# Read the results
record = current.db(table.id == L4_id).select(*self.fields,
limitby=(0, 1)
).first()
# Compare to what we expect
self.assertEqual(record.inherited, False)
self.assertAlmostEqual(record.lat, 25.760919540229885, 13)
self.assertAlmostEqual(record.lon, 26.004597701149425, 13)
self.assertEqual(record.lat_min, 14)
self.assertEqual(record.lat_max, 36)
self.assertEqual(record.lon_min, 14)
self.assertEqual(record.lon_max, 36)
self.assertEqual(record.parent, L3_id)
self.assertEqual(record.path, "%s/%s/%s/%s/%s" % (L0_id, L1_id, L2_id, L3_id, L4_id))
self.assertEqual(record.wkt, POLYGON)
self.assertEqual(record.L0, L0)
self.assertEqual(record.L1, L1)
self.assertEqual(record.L2, L2)
self.assertEqual(record.L3, L3)
self.assertEqual(record.L4, L4)
self.assertEqual(record.L5, None)
if self.spatialdb:
self.assertTrue(record.the_geom is not None)
# -------------------------------------------------------------------------
def _testL5(self, with_level):
""" Test updating a normal L5, with normal L4 Parent """
LEVEL = "L5"
POLYGON = "POLYGON ((30 15, 35 35, 23 35, 15 20, 30 15))"
L0 = "%s_%s" % (self.L0, with_level)
L1 = "%s_%s" % (self.L1, with_level)
L2 = "%s_%s" % (self.L2, with_level)
L3 = "%s_%s" % (self.L3, with_level)
L4 = "%s_%s" % (self.L4, with_level)
L5 = "%s_%s" % (self.L5, with_level)
L0_id = self.ids[L0]
L1_id = self.ids[L1]
L2_id = self.ids[L2]
L3_id = self.ids[L3]
L4_id = self.ids[L4]
table = self.table
# Insert a new L5
form = Storage(vars=Storage(level=LEVEL,
name=L5,
parent=L4_id,
wkt=POLYGON,
),
errors=None,
)
current.gis.wkt_centroid(form)
L5_id = table.insert(**form.vars)
# Update the Location Tree for the L5
feature = dict(id=L5_id)
if with_level:
feature["level"] = LEVEL
current.gis.update_location_tree(feature)
# Read the results
record = current.db(table.id == L5_id).select(*self.fields,
limitby=(0, 1)
).first()
# Compare to what we expect
self.assertEqual(record.inherited, False)
self.assertAlmostEqual(record.lat, 25.70957095709571, 13)
self.assertAlmostEqual(record.lon, 25.834983498349835, 13)
self.assertEqual(record.lat_min, 15)
self.assertEqual(record.lat_max, 35)
self.assertEqual(record.lon_min, 15)
self.assertEqual(record.lon_max, 35)
self.assertEqual(record.parent, L4_id)
self.assertEqual(record.path, "%s/%s/%s/%s/%s/%s" % (L0_id, L1_id, L2_id, L3_id, L4_id, L5_id))
self.assertEqual(record.wkt, POLYGON)
self.assertEqual(record.L0, L0)
self.assertEqual(record.L1, L1)
self.assertEqual(record.L2, L2)
self.assertEqual(record.L3, L3)
self.assertEqual(record.L4, L4)
self.assertEqual(record.L5, L5)
if self.spatialdb:
self.assertTrue(record.the_geom is not None)
# -------------------------------------------------------------------------
def _testL3_L1_parent(self, with_level):
""" Test updating an L3, without an L2 Parent, going straight to L1
- this is like Cotabato City & Isabela City in the Philippines
"""
LEVEL = "L3"
POLYGON = "POLYGON ((30 13, 37 37, 23 37, 13 20, 30 13))"
L0 = "%s_%s" % (self.L0, with_level)
L1 = "%s_%s" % (self.L1, with_level)
L3 = "Test of Cotabato City"
L0_id = self.ids[L0]
L1_id = self.ids[L1]
table = self.table
# Insert a new L3
form = Storage(vars=Storage(level=LEVEL,
name=L3,
parent=L1_id,
wkt=POLYGON,
),
errors=None,
)
current.gis.wkt_centroid(form)
L3_id = table.insert(**form.vars)
# Update the Location Tree for the L3
feature = dict(id=L3_id)
if with_level:
feature["level"] = LEVEL
current.gis.update_location_tree(feature)
# Read the results
record = current.db(table.id == L3_id).select(*self.fields,
limitby=(0, 1)
).first()
# Compare to what we expect
self.assertEqual(record.inherited, False)
self.assertAlmostEqual(record.lat, 26.072901678657075, 13)
self.assertAlmostEqual(record.lon, 25.867625899280576, 13)
self.assertEqual(record.lat_min, 13)
self.assertEqual(record.lat_max, 37)
self.assertEqual(record.lon_min, 13)
self.assertEqual(record.lon_max, 37)
self.assertEqual(record.parent, L1_id)
self.assertEqual(record.path, "%s/%s/%s" % (L0_id, L1_id, L3_id))
self.assertEqual(record.wkt, POLYGON)
self.assertEqual(record.L0, L0)
self.assertEqual(record.L1, L1)
self.assertEqual(record.L2, None)
self.assertEqual(record.L3, L3)
self.assertEqual(record.L4, None)
self.assertEqual(record.L5, None)
if self.spatialdb:
self.assertTrue(record.the_geom is not None)
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
current.auth.override = False
current.db.rollback()
# =============================================================================
class S3NoGisConfigTests(unittest.TestCase):
"""
Tests for handling of missing GIS config
"""
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
# Replace get_config with dummy that always returns None
cls.original_get_config = staticmethod(GIS.get_config)
GIS.get_config = staticmethod(lambda: None)
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
# Restore original get_config method
GIS.get_config = staticmethod(cls.original_get_config)
# -------------------------------------------------------------------------
def testMapSetup(self):
""" Verify that MAP setup without config produces an error message """
map = MAP()
setup_result = map._setup()
self.assertIsNone(setup_result)
self.assertIsNotNone(map.error_message)
def testMap2Xml(self):
""" Verify that MAP2 rendering without config produces an error message """
map = MAP2()
xml = map.xml()
self.assertTrue(b"Map cannot display without GIS config!" in xml)
# =============================================================================
if __name__ == "__main__":
run_suite(
S3LocationTreeTests,
S3NoGisConfigTests,
)
# END ========================================================================
|
class LibtoolPackage (GnuPackage):
def __init__(self):
GnuPackage.__init__(self, 'libtool', '2.4.2', override_properties={
'build_dependency': False})
def install(self):
Package.install(self)
self.sh('rm -f "%{staged_prefix}/bin/glibtool"')
self.sh('ln -s libtool "%{staged_prefix}/bin/glibtool"')
self.sh('rm -f "%{staged_prefix}/bin/glibtoolize"')
self.sh('ln -s libtoolize "%{staged_prefix}/bin/glibtoolize"')
LibtoolPackage()
|
#!/usr/bin/env python
'''
TACO: Multi-sample transcriptome assembly from RNA-Seq
Utility script that profiles the splice junctions in a BED file
Requirements: pysam library
'''
import os
import sys
import argparse
import logging
import operator
from collections import Counter
from taco.lib.base import Strand
from taco.lib.transfrag import Transfrag
from taco.lib.pysam.cfaidx import FastaFile
__author__ = "Matthew Iyer, Yashar Niknafs, and Balaji Pandian"
__copyright__ = "Copyright 2012-2018"
__credits__ = ["Matthew Iyer", "Yashar Niknafs", "Balaji Pandian"]
__license__ = "MIT"
__version__ = "0.7.3"
__maintainer__ = "Yashar Niknafs"
__email__ = "yniknafs@umich.edu"
__status__ = "Development"
rev_comp_dict = {'A':'T', 'T':'A', 'G':'C', 'C':'G', 'N': 'N'}
def dna_reverse_complement(seq):
return ''.join(rev_comp_dict[x] for x in reversed(seq))
def main():
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('genome_fasta_file')
parser.add_argument('bed_file')
args = parser.parse_args()
# check args
if not os.path.exists(args.genome_fasta_file):
parser.error('genome fasta file %s not found' % args.genome_fasta_file)
if not os.path.exists(args.bed_file):
parser.error('bed file %s not found' % args.bed_file)
logging.info('genome fasta file: %s' % args.genome_fasta_file)
logging.info('bed file: %s' % args.bed_file)
# process bed file to get junctions
logging.info('Reading Junctions')
splice_juncs = set()
fasta_fh = FastaFile(args.genome_fasta_file)
with open(args.bed_file) as bed_fh:
for line in bed_fh:
t = Transfrag.from_bed(line)
if t.chrom not in fasta_fh:
continue
for start, end in t.iterintrons():
splice_juncs.add((t.chrom, start, end, t.strand))
logging.info('Read %d Junctions' % (len(splice_juncs)))
logging.info('Profiling Splice Motifs')
motif_counter = Counter()
for chrom, start, end, strand in splice_juncs:
s = fasta_fh.fetch(chrom, start, start + 2)
s += fasta_fh.fetch(chrom, end - 2, end)
if strand == Strand.NEG:
s = dna_reverse_complement(s)
motif_counter[s] += 1
fasta_fh.close()
# report statistics
total = sum(motif_counter.values())
print '\t'.join(['motif', 'count', 'frac'])
for motif, count in motif_counter.most_common():
print '\t'.join([motif, str(count), str(float(count) / total)])
logging.info('Done')
if __name__ == '__main__':
sys.exit(main())
|
import os
from pathlib import Path
from h2o import h2o
from ..models.AIModel import AIModel
class RequestModel:
x_names = []
x_values = []
def __init__(self, x_values, x_names):
self.x_values = x_values
self.x_names = x_names
def get_frame(self):
return h2o.H2OFrame(dict(zip(self.x_names, self.x_values)))
class H2oManager:
def predict(self, request: RequestModel, modelAI: AIModel):
parent = os.path.join(Path(__file__).parents[2], "models")
path = os.path.join(parent, modelAI.uuid)
f = None
h2oModel = None
try:
f = open(path, "w+b")
f.write(modelAI.file)
h2oModel = h2o.load_model(path)
f.close()
os.remove(path)
except:
os.remove(path)
f.close()
raise Exception
return h2oModel.predict(request.get_frame())
def check_model(self, model, uuid):
parent = os.path.join(Path(__file__).parents[2], "models")
path = os.path.join(parent, uuid)
f = None
try:
f = open(path, "w+b")
f.write(model)
h2o.load_model(path)
f.close()
os.remove(path)
return True
except:
os.remove(path)
f.close()
return False
h2oManager = H2oManager()
|
from django.conf.urls import url, include
from rest_framework_nested import routers
from auv.urls import router
from .views import TripViewSet, WayPointViewSet
auv_router = routers.NestedSimpleRouter(router, r'auvs', lookup='auv')
# api/auvs/{auv_pk}/trips
auv_router.register(r'trips', TripViewSet, base_name='trips')
trip_router = routers.NestedSimpleRouter(auv_router, r'trips', lookup='trip')
# api/auvs/{auv_pk}/trips/{trip_pk}/waypoints
trip_router.register(r'waypoints', WayPointViewSet, base_name='waypoints')
urlpatterns = (
url(r'^', include(auv_router.urls)),
url(r'^', include(trip_router.urls)),
)
|
from unittest import TestCase
from scipy.io import wavfile
import torch
import torch.autograd
from torch.nn import ConstantPad2d
import torch.nn.functional as F
from torch.autograd import Variable, gradcheck
from wavenet_training import DilatedQueue, custom_padding
from pathlib import Path
class Test_dilated_queue(TestCase):
def test_enqueue(self):
queue = DilatedQueue(max_length=8, num_channels=3)
e = torch.zeros((3))
for i in range(11):
e = e + 1
queue.enqueue(e)
data = queue.data[0, :]
print('data: ', data)
assert data[0] == 9
assert data[2] == 11
assert data[7] == 8
def test_dequeue(self):
queue = DilatedQueue(max_length=8, num_channels=1)
e = torch.zeros((1))
for i in range(11):
e = e + 1
queue.enqueue(e)
print('data: ', queue.data)
for i in range(9):
d = queue.dequeue(num_deq=3, dilation=2)
print(d)
assert d[0][0] == 5
assert d[0][1] == 7
assert d[0][2] == 9
def test_combined(self):
queue = DilatedQueue(max_length=12, num_channels=1)
e = torch.zeros((1))
for i in range(30):
e = e + 1
queue.enqueue(e)
d = queue.dequeue(num_deq=3, dilation=4)
assert d[0][0] == max(i - 7, 0)
class Test_wav_files(TestCase):
def test_wav_read(self):
p = Path(__file__).parents[1] / 'train_samples/violin.wav'
data = wavfile.read(str(p))[1]
print(data)
# [0.1, -0.53125...
class Test_padding(TestCase):
def test_1d(self):
x = torch.ones((2, 3, 4), requires_grad=True)
res = custom_padding(x, 5, dimension=0, pad_start=False)
assert res.size() == (5, 3, 4)
assert res[-1, 0, 0] == 0
def test_2d(self):
pad = ConstantPad2d((5, 0, 0, 0), 0)
x = Variable(torch.ones((2, 3, 4, 5)))
res = pad(x)
print(res.size())
|
import numpy
import torch
import torch.nn.functional as F
from torch_rl.algos.base import BaseAlgo
class PPOAlgo(BaseAlgo):
"""The class for the Proximal Policy Optimization algorithm
([Schulman et al., 2015](https://arxiv.org/abs/1707.06347))."""
def __init__(self, envs, acmodel, num_frames_per_proc=None, discount=0.99, lr=7e-4, gae_lambda=0.95,
entropy_coef=0.01, value_loss_coef=0.5, max_grad_norm=0.5, recurrence=4,
adam_eps=1e-5, clip_eps=0.2, epochs=4, batch_size=256, preprocess_obss=None,
reshape_reward=None):
num_frames_per_proc = num_frames_per_proc or 128
super().__init__(envs, acmodel, num_frames_per_proc, discount, lr, gae_lambda, entropy_coef,
value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward)
self.clip_eps = clip_eps
self.epochs = epochs
self.batch_size = batch_size
assert self.batch_size % self.recurrence == 0
self.optimizer = torch.optim.Adam(self.acmodel.parameters(), lr, eps=adam_eps)
self.batch_num = 0
def update_parameters(self):
# Collect experiences
exps, logs = self.collect_experiences()
for _ in range(self.epochs):
# Initialize log values
log_entropies = []
log_values = []
log_policy_losses = []
log_value_losses = []
log_grad_norms = []
for inds in self._get_batches_starting_indexes():
# Initialize batch values
batch_entropy = 0
batch_value = 0
batch_policy_loss = 0
batch_value_loss = 0
batch_loss = 0
# Initialize memory
if self.acmodel.recurrent:
memory = exps.memory[inds]
for i in range(self.recurrence):
# Create a sub-batch of experience
sb = exps[inds + i]
# Compute loss
if self.acmodel.recurrent:
dist, value, memory = self.acmodel(sb.obs, memory * sb.mask)
else:
dist, value = self.acmodel(sb.obs)
# compute mean entropy
steer_dist, acc_dist = dist
steer_entropy = steer_dist.entropy().mean()
acc_entropy = acc_dist.entropy().mean()
# compute steer policy loss
ratio = torch.exp(steer_dist.log_prob(sb.steer_action) - sb.steer_log_prob)
surr1 = ratio * sb.advantage
surr2 = torch.clamp(ratio, 1.0 - self.clip_eps, 1.0 + self.clip_eps) * sb.advantage
steer_policy_loss = -torch.min(surr1, surr2).mean()
# compute acceleration policy loss
ratio = torch.exp(acc_dist.log_prob(sb.acc_action) - sb.acc_log_prob)
surr1 = ratio * sb.advantage
surr2 = torch.clamp(ratio, 1.0 - self.clip_eps, 1.0 + self.clip_eps) * sb.advantage
acc_policy_loss = -torch.min(surr1, surr2).mean()
# compute value loss
value_clipped = sb.value + torch.clamp(value - sb.value, -self.clip_eps, self.clip_eps)
surr1 = (value - sb.returnn).pow(2)
surr2 = (value_clipped - sb.returnn).pow(2)
value_loss = torch.max(surr1, surr2).mean()
# compute loss
loss = steer_policy_loss + acc_policy_loss - self.entropy_coef * (steer_entropy + acc_entropy) + self.value_loss_coef * value_loss
# Update batch values
batch_entropy += steer_entropy.item() + acc_entropy.item()
batch_value += value.mean().item()
batch_policy_loss += steer_policy_loss.item() + acc_policy_loss.item()
batch_value_loss += value_loss.item()
batch_loss += loss
# Update memories for next epoch
if self.acmodel.recurrent and i < self.recurrence - 1:
exps.memory[inds + i + 1] = memory.detach()
# Update batch values
batch_entropy /= self.recurrence
batch_value /= self.recurrence
batch_policy_loss /= self.recurrence
batch_value_loss /= self.recurrence
batch_loss /= self.recurrence
# Update actor-critic
self.optimizer.zero_grad()
batch_loss.backward()
grad_norm = sum(p.grad.data.norm(2).item() ** 2 for p in self.acmodel.parameters()) ** 0.5
torch.nn.utils.clip_grad_norm_(self.acmodel.parameters(), self.max_grad_norm)
self.optimizer.step()
# Update log values
log_entropies.append(batch_entropy)
log_values.append(batch_value)
log_policy_losses.append(batch_policy_loss)
log_value_losses.append(batch_value_loss)
log_grad_norms.append(grad_norm)
# Log some values
logs["entropy"] = numpy.mean(log_entropies)
logs["value"] = numpy.mean(log_values)
logs["policy_loss"] = numpy.mean(log_policy_losses)
logs["value_loss"] = numpy.mean(log_value_losses)
logs["grad_norm"] = numpy.mean(log_grad_norms)
return logs
def _get_batches_starting_indexes(self):
"""Gives, for each batch, the indexes of the observations given to
the model and the experiences used to compute the loss at first.
First, the indexes are the integers from 0 to `self.num_frames` with a step of
`self.recurrence`, shifted by `self.recurrence//2` one time in two for having
more diverse batches. Then, the indexes are splited into the different batches.
Returns
-------
batches_starting_indexes : list of list of int
the indexes of the experiences to be used at first for each batch
"""
indexes = numpy.arange(0, self.num_frames, self.recurrence)
indexes = numpy.random.permutation(indexes)
# Shift starting indexes by self.recurrence//2 half the time
if self.batch_num % 2 == 1:
indexes = indexes[(indexes + self.recurrence) % self.num_frames_per_proc != 0]
indexes += self.recurrence // 2
self.batch_num += 1
num_indexes = self.batch_size // self.recurrence
batches_starting_indexes = [indexes[i:i+num_indexes] for i in range(0, len(indexes), num_indexes)]
return batches_starting_indexes
|
dist = input("Enter the distance travelled by youto and fro in kms : ")
f_avg = input("Enter the fuel average in your area [km/litre]: ")
cost_of_diesel = input("Enter the cost of diesel [int INR]: ")
f_cons = float(dist) / float(f_avg)
cost = float(f_cons) * float(cost_of_diesel)
print("The cost of travel is : ", cost)
|
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
from tests import settings
settings.INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.sites',
'paloma',
)
def run_tests(settings):
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_runner = TestRunner(interactive=False)
failures = test_runner.run_tests(['paloma'])
return failures
def main():
failures = run_tests(settings)
sys.exit(failures)
if __name__ == '__main__':
main()
|
from core.terraform.resources.aws.load_balancer import LoadBalancerResource
from resources.vpc.security_group import InfraSecurityGroupResource
from core.config import Settings
class ApplicationLoadBalancer(LoadBalancerResource):
name = ""
internal = True
load_balancer_type = "application"
security_groups = [InfraSecurityGroupResource.get_output_attr('id')]
subnets = Settings.get('VPC')['SUBNETS']
OUTPUT_LIST = ['dns_name']
@classmethod
def get_http_url(cls):
return "http://%s" % cls.get_output_attr('dns_name')
@classmethod
def get_api_base_url(cls):
return "http://%s/api" % cls.get_output_attr('dns_name')
@classmethod
def get_api_version_url(cls, service):
version_url = cls.get_api_server_url(service)
return version_url if service == "auth" else version_url + "/v1"
@classmethod
def get_api_server_url(cls, service):
return "%s/%s" % (cls.get_api_base_url(), service)
def render_output(self, outputs):
if self.resource_in_tf_output(outputs):
return {
'Pacbot Domain': outputs[self.get_resource_id()]['dns_name'],
'Admin': Settings.PACBOT_LOGIN_CREDENTIALS['Admin'],
'User': Settings.PACBOT_LOGIN_CREDENTIALS['User']
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ["db"]
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
|
# Step with a savebutton for saving a specified datacollect param
# with the perform_save() method
#
# Works only in datacollect mode. Must be late enough
# in the checklist that checklist filename has already been determined
import os
import sys
import posixpath
try:
# py2.x
from urllib import pathname2url
from urllib import url2pathname
from urllib import quote
from urllib import unquote
pass
except ImportError:
# py3.x
from urllib.request import pathname2url
from urllib.request import url2pathname
from urllib.parse import quote
from urllib.parse import unquote
pass
if "gi" in sys.modules: # gtk3
import gi
gi.require_version('Gtk','3.0')
from gi.repository import Gtk as gtk
from gi.repository import GObject as gobject
pass
else :
# gtk2
import gtk
import gobject
pass
from .. import paramdb2 as pdb
from .. import dc_value
from .. import dc2_misc
from .buttonreadoutstep import buttonreadoutstep
from .. import canonicalize_path
class simpleobj:
name=None
pass
__pychecker__="no-import no-argsused"
# gtk superclass should be first of multiple inheritances
class hrefbuttonstep(buttonreadoutstep):
__gtype_name__="hrefbuttonstep"
__gproperties__ = {
"paramname": (gobject.TYPE_STRING,
"parameter",
"datacollect parameter to show and to use for save. The named parameter should store an hrefvalue with a controller that supports the perform_save method. Only this parameter is shown in the readout (the others are invisible)",
"", # default value
gobject.PARAM_READWRITE), # flags
# NOTE: "buttonlabel" parameter handled by buttonreadoutstep superclass
"intermediate": (gobject.TYPE_BOOLEAN,
"intermediate parameter setting",
"Intermediate parameter setting: Intermediate step parameters are saved to the XML checklist file when the step is checked, and the widgets freeze when the checklist is read-only or once the checkbox ix checked",
False, # default value
gobject.PARAM_READWRITE), # flags
# also "buttonlabel" and "description" properties
# inherited from buttonreadoutstep
}
__dcvalue_xml_properties={} # dictionary by property of dc_value class to be transmitted as a serialized xmldoc
__dcvalue_href_properties=frozenset([]) # set of properties to be transmitted as an hrefvalue with the checklist context as contexthref
paramnotify=None
# self.paramdb and self.dc_gui_io defined by buttonreadoutstep, set by buttonreadoutstep's dc_gui_init()
def __init__(self,checklist,step,xmlpath):
buttonreadoutstep.__init__(self,checklist,step,xmlpath)
# paramhandler.__init__(self,super(adjustparamstep,self),self.__proplist)# .__gproperties__)
# gobject.GObject.__init__(self)
self.myprops["paramname"]=""
self.myprops["intermediate"]=False
self.set_property("readoutparam",self.myprops["paramname"])
self.set_property("buttonlabel","Save DGS Snapshot")
self.set_property("intermediate",False)
pass
def destroystep(self):
if len(self.myprops["paramname"]) > 0:
self.paramdb.remnotify(self.myprops["paramname"],self.paramnotify)
pass
self.paramnotify=None
pass
def dc_gui_init(self,guistate):
# Set up notifications...
# call superclass
buttonreadoutstep.dc_gui_init(self,guistate)
if len(self.myprops["paramname"]) > 0:
self.paramnotify=self.paramdb.addnotify(self.myprops["paramname"],self.changedcallback,pdb.param.NOTIFY_NEWVALUE)
pass
pass
def do_set_property(self,gproperty,value):
#print "set_property(%s,%s)" % (gproperty.name,str(value))
if gproperty.name=="paramname":
# print "paramname=%s" % value
self.myprops["paramname"]=value
#import pdb as pythondb
#pythondb.set_trace()
nameobj=simpleobj()
nameobj.name="readoutparam"
buttonreadoutstep.do_set_property(self,nameobj,value)
pass
elif gproperty.name=="intermediate":
# print "paramname=%s" % value
self.myprops["intermediate"]=value
pass
else :
#sys.stderr.write("calling buttonreadoutstep set_property()\n")
buttonreadoutstep.do_set_property(self,gproperty,value)
pass
pass
def do_get_property(self,property):
if property.name == "paramname":
return self.myprops["paramname"]
if property.name == "intermediate":
return self.myprops["intermediate"]
return buttonreadoutstep.do_get_property(self,property)
def do_save_param_datacollect(self,paramname):
raise ValueError("This function is never called from anywhere... not sure if it makes sense")
if paramname is None or paramname=="":
return
desthref=self.paramdb["dest"].dcvalue
# suggest a filename
chklistfilename=self.checklist.xmldoc.filehref.get_bare_unquoted_filename()
chklistbasename=posixpath.splitext(chklistfilename)[0]
filename="%s_%s.%s" % (chklistbasename,paramname,self.paramdb[paramname].save_extension)
savefilehref=dc_value.hrefvalue(quote(filename),contexthref=desthref)
#import pdb as pythondb
#pythondb.set_trace()
if (os.path.exists(savefilehref.getpath())) :
if hasattr(gtk,"MessageType") and hasattr(gtk.MessageType,"WARNING"):
# gtk3
existsdialog=gtk.MessageDialog(type=gtk.MessageType.ERROR,buttons=gtk.ButtonsType.NONE)
pass
else :
existsdialog=gtk.MessageDialog(type=gtk.MESSAGE_ERROR,buttons=gtk.BUTTONS_NONE)
pass
existsdialog.set_markup("Error: File %s exists." % (savefilehref.getpath()))
existsdialog.add_button("Overwrite",1)
existsdialog.add_button("Cancel operation",0)
existsdialogval=existsdialog.run()
existsdialog.destroy()
if existsdialogval==0:
# Cancel
return
pass
#import pdb as pythondb
#pythondb.set_trace()
# NOTE: perform_save() now takes an extra parameter: saveparamdictoverride
self.paramdb[paramname].perform_save(savefilehref)
pass
def find_file_dialog(self,paramname,desthref=None):
if desthref is not None:
reference_href=desthref
pass
else:
reference_href=self.checklist.xmldoc.getcontexthref().leafless()
pass
reference_path=reference_href.getpath()
if hasattr(gtk,"FileChooserAction") and hasattr(gtk.FileChooserAction,"OPEN"):
# gtk3
Chooser=gtk.FileChooserDialog(title="Open...",action=gtk.FileChooserAction.OPEN,buttons=(gtk.STOCK_CANCEL,gtk.ResponseType.CANCEL,gtk.STOCK_OPEN,gtk.ResponseType.OK))
ResponseOK=gtk.ResponseType.OK
pass
else:
# gtk2
Chooser=gtk.FileChooserDialog(title="Open...",action=gtk.FILE_CHOOSER_ACTION_OPEN,buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
ResponseOK=gtk.RESPONSE_OK
pass
Chooser.set_modal(True)
Chooser.set_current_folder(reference_path)
datafilter=gtk.FileFilter()
datafilter.set_name(self.paramdb[self.myprops["paramname"]].save_extension.upper() + " files")
datafilter.add_pattern("*." + self.paramdb[self.myprops["paramname"]].save_extension)
Chooser.add_filter(datafilter)
allfilter=gtk.FileFilter()
allfilter.set_name("All files")
allfilter.add_pattern("*")
Chooser.add_filter(allfilter)
response=Chooser.run()
outfilename=Chooser.get_filename()
Chooser.hide()
Chooser.destroy()
if response != ResponseOK:
return
# datafilename should be relative to desthref
relpath=canonicalize_path.relative_path_to(reference_path,outfilename)
filehref=dc_value.hrefvalue(pathname2url(relpath),contexthref=reference_href)
self.paramdb[paramname].requestval(filehref)
pass
def buttoncallback(self,*args):
if self.checklist.readonly:
return
if self.is_fixed(): # determined by buttonreadoutstep superclass
return
#if not self.checklist.checknotcurrent():
# return
# In datacollectmode the checklist autosaves once enough steps
# have been checked to determine the filename, so we have to be
# past this point
if self.checklist.datacollectmode:
if self.checklist.xmldoc.filehref is None:
raise ValueError("Save button step is too early -- checklist not yet saved to a file, so impossible to determine filename")
self.find_file_dialog(self.myprops["paramname"])
pass
else :
# not datacollect mode...
# determine filename automatically if possible, but ask user
self.checklist.xmldoc.lock_ro()
try :
destelement=self.checklist.xmldoc.xpathsingle("chx:dest")
desthref=dc_value.hrefvalue.fromxml(self.checklist.xmldoc,destelement)
finally:
self.checklist.xmldoc.unlock_ro()
pass
self.find_file_dialog(self.myprops["paramname"],desthref)
pass
#self.update_xml()
#self.set_fixed()
#self.setbuttonbgcolor("green") # indicate that we have been pushed
pass
def is_fixed(self): # note overridden by savebuttonstep
if self.paramdb is None:
return True # fixed during initialization
# param readout is NEVER fixed when parameter intermediate is False
# ... non-intermediate params are saved in the experiment log,
# not in the checklist
if not self.myprops["intermediate"]:
return False
# param readout is fixed when checklist is marked as
# readonly or when checkbox is checked.
return self.checklist.readonly or self.step.gladeobjdict["checkbutton"].get_property("active")
# override set_fixed() so underlying widget is ALWAYS fixed
def set_fixed(self):
fixed=self.is_fixed()
(value,displayfmt)=self.value_from_xml()
# sys.stderr.write("savebuttonstep: set_fixed: %s\n" % (str(value)))
self.gladeobjdict["readout"].set_fixed(fixed,value,displayfmt)
self.gladeobjdict["pushbutton"].set_sensitive(not fixed)
if not fixed:
self.update_xml()
pass
pass
def changedcallback(self,param,condition):
if not self.is_fixed():
self.setbuttonbgcolor("green") # indicate that we have been pushed
self.update_xml()
pass
pass
def value_from_xml(self):
if not "paramname" in self.myprops:
# not fully initialized
return ("",None)
# same as value_from_xml from dc_paramstep but iterates over paramnames...
# Since the text box is read-only and this is just used for the
retval=dc_value.stringvalue("")
retfmt=None
(gotvalue,gotdisplayfmt)=(dc_value.stringvalue(""),None)
if self.myprops["paramname"] is not None:
(gotvalue,gotdisplayfmt)=dc2_misc.stepwidget_value_from_xml(self,self.myprops["paramname"])
pass
retval=gotvalue
retfmt=gotdisplayfmt
# We only show the first param
return (retval,retfmt)
def update_xml(self): # ... save as update_xml from dc_paramstep but iterates over params
if self.is_fixed():
return
# only intermediate params are saved to the checklist XML
if not self.myprops["intermediate"]:
return
if self.guistate is None or self.paramdb is None:
return
if self.myprops["paramname"] != None and len(self.myprops["paramname"]) > 0:
newvalue=self.paramdb[self.myprops["paramname"]].dcvalue
dc2_misc.stepwidget_update_xml(self,self.myprops["paramname"],newvalue)
pass
return
def resetchecklist(self):
buttonreadoutstep.resetchecklist(self)
self.setbuttonbgcolor("gray") # indicate that we have not been pushed
# reset parameter values
#if self.paramdb is not None:
# if self.myprops["paramname"] is not None and len(self.myprops["paramname"]) > 0:
# self.paramdb[self.myprops["paramname"]].requestvalstr_sync("")
# pass
# if self.myprops["paramname2"] is not None and len(self.myprops["paramname2"]) > 0:
# self.paramdb[self.myprops["paramname2"]].requestvalstr_sync("")
# pass
# if self.myprops["paramname3"] is not None and len(self.myprops["paramname3"]) > 0:
# self.paramdb[self.myprops["paramname3"]].requestvalstr_sync("")
# pass
# pass
# clear
#self.update_xml()
self.set_fixed()
pass
pass
gobject.type_register(hrefbuttonstep) # required since we are defining new properties/signals
|
import requests
import json
import sys
import time
from api import *
blockTime = get_block_time()
difficulty = get_difficulty()
usd_price = get_usd_price()
data = {
'blockTime': blockTime,
'difficulty': difficulty,
'priceUsd': usd_price,
'lastUpdate': time.time(),
}
file(sys.argv[1], 'w').write('ethereumStats = ' + json.dumps(data) + ';')
|
import tensorflow as tf
from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, MaxPool2D
from tensorflow.keras import Model
class CNN(object):
def __init__(self, height, width, channel, num_class, leaning_rate=1e-3, ckpt_dir='./Checkpoint'):
print("\nInitializing Neural Network...")
self.height, self.width, self.channel = height, width, channel
self.num_class, self.k_size = num_class, 3
self.leaning_rate = leaning_rate
self.ckpt_dir = ckpt_dir
self.model = SKNet(num_class=self.num_class)
self.model(tf.zeros([1, self.height, self.width, self.channel]), training=False, verbose=True)
self.optimizer = tf.optimizers.Adam(self.leaning_rate)
self.summary_writer = tf.summary.create_file_writer(self.ckpt_dir)
def step(self, x, y, iteration=0, train=False):
with tf.GradientTape() as tape:
logits = self.model(x, training=train)
smce = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.math.reduce_mean(smce)
score = tf.nn.softmax(logits)
pred = tf.argmax(score, 1)
correct_pred = tf.equal(pred, tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
if(train):
gradients = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
with self.summary_writer.as_default():
tf.summary.scalar('CNN/loss', loss, step=iteration)
tf.summary.scalar('CNN/accuracy', accuracy, step=iteration)
return loss, accuracy, score
def save_params(self):
self.model.save_weights("%s/model.h5" %(self.ckpt_dir))
def load_params(self):
self.model.load_weights("%s/model.h5" %(self.ckpt_dir))
class SKNet(Model):
def __init__(self, num_class):
super(SKNet, self).__init__()
self.num_class = num_class
# Block 1
self.conv1_a = Conv2D(filters=16, kernel_size=3, strides=1, padding="SAME")
self.bn1_a = BatchNormalization()
self.conv1_b = Conv2D(filters=16, kernel_size=5, strides=1, padding="SAME")
self.bn1_b = BatchNormalization()
self.fc1 = Dense(8, activation=None)
self.bn1_fc = BatchNormalization()
self.fc1_a = Dense(16, activation=None)
# Block 2
self.conv2_a = Conv2D(filters=32, kernel_size=3, strides=1, padding="SAME")
self.bn2_a = BatchNormalization()
self.conv2_b = Conv2D(filters=32, kernel_size=5, strides=1, padding="SAME")
self.bn2_b = BatchNormalization()
self.fc2 = Dense(16, activation=None)
self.bn2_fc = BatchNormalization()
self.fc2_a = Dense(32, activation=None)
# Block 2
self.conv3_a = Conv2D(filters=64, kernel_size=3, strides=1, padding="SAME")
self.bn3_a = BatchNormalization()
self.conv3_b = Conv2D(filters=64, kernel_size=5, strides=1, padding="SAME")
self.bn3_b = BatchNormalization()
self.fc3 = Dense(32, activation=None)
self.bn3_fc = BatchNormalization()
self.fc3_a = Dense(64, activation=None)
# FC
self.fc_out = Dense(self.num_class, activation=None)
self.maxpool = MaxPool2D(pool_size=(2, 2))
def call(self, x, training=False, verbose=False):
if(verbose): print(x.shape)
""" Conv-1 """
# Split-1
u1_a = tf.keras.activations.relu(self.bn1_a(self.conv1_a(x), training=training))
u1_b = tf.keras.activations.relu(self.bn1_b(self.conv1_b(x), training=training))
# Fuse-1
u1 = u1_a + u1_b
s1 = tf.math.reduce_sum(u1, axis=(1, 2))
z1 = tf.keras.activations.relu(self.bn1_fc(self.fc1(s1), training=training))
# Select-1
a1 = tf.keras.activations.softmax(self.fc1_a(z1))
a1 = tf.expand_dims(a1, 1)
a1 = tf.expand_dims(a1, 1)
b1 = 1 - a1
v1 = (u1_a * a1) + (u1_b * b1)
if(verbose): print(v1.shape)
p1 = self.maxpool(v1)
if(verbose): print(p1.shape)
""" Conv-2 """
# Split-2
u2_a = tf.keras.activations.relu(self.bn2_a(self.conv2_a(p1), training=training))
u2_b = tf.keras.activations.relu(self.bn2_b(self.conv2_b(p1), training=training))
# Fuse-2
u2 = u2_a + u2_b
s2 = tf.math.reduce_sum(u2, axis=(1, 2))
z2 = tf.keras.activations.relu(self.bn2_fc(self.fc2(s2), training=training))
# Select-2
a2 = tf.keras.activations.softmax(self.fc2_a(z2))
a2 = tf.expand_dims(a2, 1)
a2 = tf.expand_dims(a2, 1)
b2 = 1 - a2
v2 = (u2_a * a2) + (u2_b * b2)
if(verbose): print(v2.shape)
p2 = self.maxpool(v2)
if(verbose): print(p2.shape)
""" Conv-3 """
# Split-3
u3_a = tf.keras.activations.relu(self.bn3_a(self.conv3_a(p2), training=training))
u3_b = tf.keras.activations.relu(self.bn3_b(self.conv3_b(p2), training=training))
# Fuse-3
u3 = u3_a + u3_b
s3 = tf.math.reduce_sum(u3, axis=(1, 2))
z3 = tf.keras.activations.relu(self.bn3_fc(self.fc3(s3), training=training))
# Select-3
a3 = tf.keras.activations.softmax(self.fc3_a(z3))
a3 = tf.expand_dims(a3, 1)
a3 = tf.expand_dims(a3, 1)
b3 = 1 - a3
v3 = (u3_a * a3) + (u3_b * b3)
if(verbose): print(v3.shape)
gap = tf.math.reduce_sum(v3, axis=(1, 2))
if(verbose): print(gap.shape)
out = self.fc_out(gap)
if(verbose): print(out.shape)
return out
|
import cv2
import numpy as np
import glob
import os
import argparse
parser = argparse.ArgumentParser(description='prepare captures for annotation')
parser.add_argument('--input_folder', help='the input folder', required=True)
parser.add_argument('--video_name', help='the top view video name ... e.g. view2-color.mp4', default="view2-color.mp4")
parser.add_argument('--ouput_folder_name', help='annotation data folder name. default: annotation_data', default="annotation_data")
args = parser.parse_args()
print(args.input_folder)
video_id = args.input_folder.split("/")
if len(video_id) > 1:
video_id = video_id[-1]
else:
video_id = video_id[0]
print("video id:", video_id)
print("load video:", f"{args.input_folder}/{args.video_name}")
cap = cv2.VideoCapture(f'{args.input_folder}/{args.video_name}')
if args.ouput_folder_name not in os.listdir(f'{args.input_folder}'):
os.mkdir(f'{args.input_folder}/{args.ouput_folder_name}')
if "video" not in os.listdir(f'{args.input_folder}/{args.ouput_folder_name}'):
os.mkdir(f'{args.input_folder}/{args.ouput_folder_name}/video')
os.mkdir(f'{args.input_folder}/{args.ouput_folder_name}/video/{video_id}')
with open(f'{args.input_folder}/{args.ouput_folder_name}/video_{video_id}.txt', "w") as f:
f.write("URLID,URL\n")
f.write(f"{video_id},{video_id}\n")
with open(f'{args.input_folder}/{args.ouput_folder_name}/frames_{video_id}.txt', "w") as f:
f.write("URLID,Frame,Time\n")
frame_number = 0
while cap.isOpened():
print("export frame:", frame_number)
ret, frame = cap.read()
if not ret:
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# cv2.imshow('frame', frame)
frame_name = str(frame_number).zfill(12)
frame_name = f"{frame_name}.jpg"
frame = frame[:, 255:-305] # crop to ROI
# frame = cv2.resize(frame, (480, 480)) # resize img
frame = cv2.resize(frame, (224, 224)) # resize img
cv2.imwrite(f'{args.input_folder}/{args.ouput_folder_name}/video/{video_id}/{frame_name}', frame)
f.write(f"{video_id},{frame_name},{frame_number}\n")
frame_number += 1
cap.release()
cv2.destroyAllWindows()
|
import fxpt_experiments as fe
num_procs = 10
test_data_ids = [
"big1024_base",
"big512_base",
"big256_base",
"full_base",
]
for test_data_id in test_data_ids:
_ = fe.run_TvB_stability_experiments(test_data_id,num_procs)
|
"""
Load the Kepler light curve from FITS files. Also do a bit of preprocessing.
"""
import inputs as inp
def loadlc(files, usepdc=False, **kwargs):
"""
Load Kepler light curves.
Parameters
----------
files : list of strings
The locations of the light curves to load together.
usepdc : bool, optional
Set to True to load the PDC light curves instead of the
default SAP.
Returns
-------
time : ndarray
Kepler times of center of exposure
flux : ndarray
Kepler normalized fluxes
fluxerr : ndarray
Kepler flux errors
cadence : ndarray
Kepler cadence number
quarter : ndarray
Kepler quarter
quality : ndarray
Kepler quality flag
"""
import astropy.io.fits as pyfits
import numpy as np
# load the first file
ifile = files[0]
data = pyfits.getdata(ifile)
# get the times and fluxes
time = data['time']+54833e0
flux = data['sap_flux']
fluxerr = data['sap_flux_err']
if usepdc:
flux = data['pdcsap_flux']
fluxerr = data['pdcsap_flux_err']
# where the times and fluxes are finite
good = (np.isfinite(time) & np.isfinite(flux))
# get the good values of everything
time = time[good]
flux = flux[good]
fluxerr = fluxerr[good]
quality = data['sap_quality'][good]
cadence = data['cadenceno'][good]
# pull the quarter from the header and set it up as an array
quart = pyfits.getval(ifile, 'quarter', 0)
quarter = np.zeros(len(time)) + quart
# normalize the fluxes
fluxerr /= np.median(flux)
flux /= np.median(flux)
# add in subsequent files
for i in np.arange(len(files)-1)+1:
ifile = files[i]
data = pyfits.getdata(ifile)
# get the times and fluxes
itime = data['time']+54833e0
iflux = data['sap_flux']
ifluxerr = data['sap_flux_err']
if usepdc:
iflux = data['pdcsap_flux']
ifluxerr = data['pdcsap_flux_err']
# where the times and fluxes are finite
good = (np.isfinite(itime) & np.isfinite(iflux))
# get the good values of everything
itime = itime[good]
iflux = iflux[good]
ifluxerr = ifluxerr[good]
iquality = data['sap_quality'][good]
icadence = data['cadenceno'][good]
# pull the quarter from the header and set it up as an array
quart = pyfits.getval(ifile, 'quarter', 0)
iquarter = np.zeros(len(itime)) + quart
# normalize the fluxes
ifluxerr /= np.median(iflux)
iflux /= np.median(iflux)
time = np.concatenate((time, itime))
flux = np.concatenate((flux, iflux))
fluxerr = np.concatenate((fluxerr, ifluxerr))
quality = np.concatenate((quality, iquality))
cadence = np.concatenate((cadence, icadence))
quarter = np.concatenate((quarter, iquarter))
# guarantee the light curve in sequential order
order = np.argsort(time)
time = time[order]
flux = flux[order]
fluxerr = fluxerr[order]
quality = quality[order]
cadence = cadence[order]
quarter = quarter[order]
return time, flux, fluxerr, cadence, quarter, quality
def preparelc(KIC, dataloc=inp.keplerdata, fill=True,
badflags=(128, 2048), ignorelist=inp.baddata,
**kwargs):
"""
Load Kepler light curves, then process them for analysis.
Parameters
----------
KIC : int
The Kepler ID for the system to look at
dataloc : string, optional
Directory point to the location of the Kepler light curves.
Default can be changed in the module initialization.
fill : boolean, optional
Should we fill in all missing cadences? If true, will
interpolate times to all missing cadences and assign them
flux with np.inf errors. Necessary for QATS requiring
continuous data. Default True.
badflags : tuple, optional
Flags that can be set by Kepler that we should take seriously
and ignore.
Set all cadences with these flags to have infinite errors.
Default 128 and 2048.
ignorelist : string, optional
File containing regions of time to ignore. File contents should
be 2 columns, with start and end times (in times already
adjusted by inp.timeoffset). Defaults to the file listed in the
module.
Returns
-------
time : ndarray
Kepler times of center of exposure
flux : ndarray
Kepler normalized fluxes
fluxerr : ndarray
Kepler flux errors
cadence : ndarray
Cadence number, starting at 0
quarter : ndarray
Kepler quarter
quality : ndarray
Kepler quality flag
"""
from glob import glob
import numpy as np
from scipy import interpolate
# load the lightcurve from FITS files
KICstr = str(int(KIC))
files = glob(dataloc + 'kplr*' + KICstr + '*llc.fits')
time, flux, fluxerr, cad, quart, qual = loadlc(files, **kwargs)
time -= inp.timeoffset
# make sure cadences start at 0
cad -= cad[0]
if fill:
# fill in the missing cadences and interpolate their times and
# fluxes (though the flux errors will be infinite)
newcad = np.arange(cad[-1]+1)
time = np.interp(newcad, cad, time)
newfluxerr = newcad * 0. + np.inf
newfluxerr[cad] = fluxerr
# fill in the old fluxes, etc to the new grid
newflux = newcad * 0. + 1.
newflux[cad] = flux
newqual = newcad * 0
newqual[cad] = qual
# default to quarter -1 for filled in gaps
newquart = newcad * 0. - 1.
newquart[cad] = quart
cad = newcad
flux = newflux
fluxerr = newfluxerr
qual = newqual
quart = newquart
# fill in the infinite flux errors with interpolated values
# to make plotting look better
func = interpolate.interp1d(time[np.isfinite(fluxerr)],
flux[np.isfinite(fluxerr)],
bounds_error=False, fill_value=1.)
flux[~np.isfinite(fluxerr)] = func(time[~np.isfinite(fluxerr)])
# ignore the places with these bad flags
for ii in badflags:
bad = np.where(qual & ii)[0]
fluxerr[bad] = np.inf
# ignore these regions for whatever reason
if ignorelist is not None:
tstart, tend = np.loadtxt(ignorelist, unpack=True, ndmin=2)
for ii in np.arange(len(tstart)):
igsrch = np.where((time >= tstart[ii]) & (time <= tend[ii]))[0]
fluxerr[igsrch] = np.inf
return time, flux, fluxerr, cad, quart, qual
|
def test_movie_goofs(ia):
movie = ia.get_movie('0133093', info=['goofs'])
goofs = movie.get('goofs', [])
assert len(goofs) > 120
|
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
face = misc.face(gray=True)
plt.imshow(face, cmap=plt.cm.gray)
plt.savefig('face0.png')
plt.clf()
bwimage = np.zeros_like(face)
bwimage[face > 128] = 255
plt.imshow(bwimage, cmap=plt.cm.gray)
plt.savefig('face1.png')
plt.clf()
framedface = np.zeros_like(face)
framedface[31:-30, 31:-30] = face[31:-30, 31:-30]
plt.imshow(framedface, cmap=plt.cm.gray)
plt.savefig('face2.png')
plt.clf()
darkface = 255*(face/255)**1.5
plt.imshow(darkface, cmap=plt.cm.gray)
plt.savefig('face3.png')
plt.clf()
sy, sx = face.shape
y, x = np.ogrid[0:sy, 0:sx]
centerx, centery = (660, 300)
mask = ((y - centery)**2 + (x - centerx)**2) > 230**2
face[mask] = 0
plt.imshow(face, cmap=plt.cm.gray)
plt.savefig('face4.png')
|
"""Added start, enddate to digitize group
Revision ID: 4f17336641b9
Revises: 4237cb2ca161
Create Date: 2016-10-18 16:16:56.159752
"""
# revision identifiers, used by Alembic.
revision = '4f17336641b9'
down_revision = '4237cb2ca161'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.add_column('digitize_feature_groups', sa.Column('_active', sa.Boolean(), nullable=True))
op.add_column('digitize_feature_groups', sa.Column('end_date', sa.DateTime(), nullable=True))
op.add_column('digitize_feature_groups', sa.Column('start_date', sa.DateTime(), nullable=True))
op.drop_column('digitize_feature_groups', 'active')
def downgrade():
op.add_column('digitize_feature_groups', sa.Column('active', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_column('digitize_feature_groups', 'start_date')
op.drop_column('digitize_feature_groups', 'end_date')
op.drop_column('digitize_feature_groups', '_active')
|
import teams as t
import org as o
import employee as emp
from empstore import employees
from empstore import teams
from empstore import org
while True:
print("Press 1 for add employee")
print("Press 2 for delete employee ")
print("Press 3 for search employee")
print("Press 4 for display employee")
print("Press 5 for change employee details")
print("Press 6 manage all teams")
print("Press 7 for Quit")
ch = int(input("Enter choice:"))
if ch == 1:
#adding employee
emp.add_employee()
elif ch == 2:
#delete
emp.delete_employee()
elif ch == 3:
#search
emp.search_employee()
elif ch == 4:
#display employee
emp.display_employee()
elif ch== 5:
#change employee details
emp.change_employee()
elif ch == 6:
t.manage_all_teams()
elif ch == 7:
break
else:
print("Invalid Choice")
|
#Felipe Lima
#Linguagem: Python
#Exercício 06 do site: https://wiki.python.org.br/EstruturaSequencial
#Importa biblioteca
import math
#Entra com o raio
raio = float(input("Qual o raio do círculo: "))
#Calcula a área
area = math.pi*raio
#Imprime o resultado
print("A área do círculo, para o raio digitado, é: {:.2f}.".format(area))
|
#--------------------------------------------------------------------------------------------
# Simplistic implementation of the Sinkhorn divergences, with a vanilla PyTorch backend
#--------------------------------------------------------------------------------------------
import numpy as np
import torch
#######################################################################################################################
# Elementary operations .....................................................................
#######################################################################################################################
def scal( α, f ) :
return torch.dot( α.view(-1), f.view(-1) )
def lse( v_ij ):
"""[lse(v_ij)]_i = log sum_j exp(v_ij), with numerical accuracy."""
V_i = torch.max(v_ij, 1)[0].view(-1,1)
return V_i + (v_ij - V_i).exp().sum(1).log().view(-1,1)
def dist_matrix(x_i, y_j, p) :
x_y = x_i.unsqueeze(1) - y_j.unsqueeze(0)
if p == 1 : return x_y.norm(dim=2) / ε
elif p == 2 : return ( x_y ** 2).sum(2) / ε
else : return x_y.norm(dim=2)**(p/2) / ε
#######################################################################################################################
# Sinkhorn iterations .....................................................................
#######################################################################################################################
def sink(α_i, x_i, β_j, y_j, p=1, eps=.1, nits=100, **kwargs):
ε = eps # Python supports Unicode. So fancy!
# Sinkhorn loop with A = a/eps , B = b/eps ....................................................
α_i_log, β_j_log = α_i.log(), β_j.log() # Precompute the logs of the measures' weights
B_i, A_j = torch.zeros_like(α_i), torch.zeros_like(β_j) # Sampled influence fields
Cxy_e = dist_matrix(x_i, y_j, p)
for i in range(nits):
A_j = -lse( (B_i + α_i_log).view(1,-1) - Cxy_e.t() ) # a(y)/ε = Smin_ε,x~α [ C(x,y) - b(x) ] / ε
B_i = -lse( (A_j + β_j_log).view(1,-1) - Cxy_e ) # b(x)/ε = Smin_ε,y~β [ C(x,y) - a(y) ] / ε
return ε*A_j.view(-1), ε*B_i.view(-1)
def sym_sink(α_i, x_i, y_j=None, p=1, eps=.1, nits=100, **kwargs):
ε = eps # Python supports Unicode. So fancy!
# Sinkhorn loop ......................................................................
α_i_log = α_i.log()
A_i = torch.zeros_like(α_i)
Cxx_e = dist_matrix(x_i, x_i, p)
for i in range(nits-1):
A_i = 0.5 * (A_i - lse( (A_i + α_i_log).view(1,-1) - Cxx_e )) # a(x)/ε = .5*(a(x)/ε + Smin_ε,y~α [ C(x,y) - a(y) ] / ε)
a_x = -ε*lse( (A_i + α_i_log).view(1,-1) - Cxx_e ).view(-1) # a(x) = Smin_e,z~α [ C(x,z) - a(z) ]
if y_j is None :
return None, a_x
else : # extrapolate "a" to the point cloud "y_j"
Cyx_e = dist_matrix(y_j, x_i, p)
a_y = - ε * lse( (A_i + α_i_log).view(1,-1) - Cyx_e ).view(-1) # a(z) = Smin_e,z~α [ C(y,z) - a(z) ]
return a_y, a_x
#######################################################################################################################
# Derived Functionals .....................................................................
#######################################################################################################################
def regularized_ot( α, x, β, y, **params): # OT_ε
a_y, b_x = sink( α, x, β, y, **params)
return scal(α, b_x) + scal(β, a_y)
def hausdorff_divergence(α, x, β, y, **params): # H_ε
a_y, a_x = sym_sink( α, x, y, **params)
b_x, b_y = sym_sink( β, y, x, **params)
return .5 * ( scal( α, b_x - a_x ) + scal( β, a_y - b_y ) )
def sinkhorn_divergence(α, x, β, y, **params): # S_ε
a_y, b_x = sink( α, x, β, y, **params)
_, a_x = sym_sink( α, x, **params )
_, b_y = sym_sink( β, y, **params )
return scal( α, b_x - a_x ) + scal( β, a_y - b_y )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.