max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
app/waterQual/CQ/legacy/carts.py | fkwai/geolearn | 0 | 12768351 | import sklearn.tree
import os
import pandas as pd
import numpy as np
from hydroDL import kPath
from hydroDL.data import usgs, gageII
from hydroDL.post import axplot
import matplotlib.pyplot as plt
dirCQ = os.path.join(kPath.dirWQ, 'C-Q')
dfS = pd.read_csv(os.path.join(dirCQ, 'slope'), dtype={
'siteNo': str}).set_index('siteNo')
dfN = pd.read_csv(os.path.join(dirCQ, 'nSample'), dtype={
'siteNo': str}).set_index('siteNo')
siteNoLst = dfS.index.tolist()
codeLst = dfS.columns.tolist()
dropColLst = ['STANAME', 'WR_REPORT_REMARKS',
'ADR_CITATION', 'SCREENING_COMMENTS']
dfX = gageII.readData(siteNoLst=siteNoLst).drop(columns=dropColLst)
dfX = gageII.updateCode(dfX)
dfCrd = gageII.readData(varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
code = '00955'
indValid = np.where((~np.isnan(dfS['00955'].values))
& (dfN['00955'].values > 10))[0]
dataAll = dfS[code][indValid]
vr = np.max([np.abs(np.percentile(dataAll, 1)),
np.abs(np.percentile(dataAll, 99))])
vRange = [-vr, vr]
def subTree(indInput):
x = dfX.values[indInput, :]
y = dfS[code].values[indInput]
x[np.isnan(x)] = -99
clf = sklearn.tree.DecisionTreeRegressor(max_depth=1)
clf = clf.fit(x, y)
tree = clf.tree_
feat = dfX.columns[tree.feature[0]]
th = tree.threshold[0]
indLeft = np.where(x[:, tree.feature[0]] <= tree.threshold[0])[0]
indRight = np.where(x[:, tree.feature[0]] > tree.threshold[0])[0]
indLeftG = indInput[indLeft]
indRightG = indInput[indRight]
return indLeftG, indRightG, feat, th
def plotCdf(ax, indInput, indLeft, indRight):
cLst = 'gbr'
labLst = ['parent', 'left', 'right']
y0 = dfS[code].values[indInput]
y1 = dfS[code].values[indLeft]
y2 = dfS[code].values[indRight]
dataLst = [y0, y1, y2]
for k, data in enumerate(dataLst):
xSort = np.sort(data[~np.isnan(data)])
yRank = np.arange(1, len(xSort)+1) / float(len(xSort))
ax.plot(xSort, yRank, color=cLst[k], label=labLst[k])
ax.set_xlim(vRange)
ax.legend(loc='best', frameon=False)
def plotMap(ax, indInput):
lat = dfCrd['LAT_GAGE'][indInput]
lon = dfCrd['LNG_GAGE'][indInput]
data = dfS[code][indInput]
axplot.mapPoint(ax, lat, lon, data, vRange=vRange, s=10)
indInput = indValid
indLeft, indRight, feat, th = subTree(indInput)
fig, ax = plt.subplots(1, 1)
plotCdf(ax, indInput, indLeft, indRight)
fig.show()
fig, axes = plt.subplots(2, 1)
plotMap(axes[0], indLeft)
plotMap(axes[1], indRight)
fig.show()
| 2.328125 | 2 |
examples/showcase/src/demos_panels/horizontalPanel.py | certik/pyjamas | 1 | 12768352 | <filename>examples/showcase/src/demos_panels/horizontalPanel.py
"""
The ``ui.HorizontalPanel`` class is a panel that lays out its contents from
left to right.
It is often useful to call ``setSpacing(spacing)`` to add space between each of
the panel's widgets. You can also call ``setHorizontalAlignment(alignment)``
and ``setVerticalAlignment(alignment)`` before adding widgets to control how
those widgets are aligned within the available space. Alternatively, you can
call ``setCellHorizontalAlignment(widget, alignment)`` and
``setCellVerticalAlignment(widget, alignment)`` to change the alignment of a
single widget after it has been added.
Note that if you want to have different widgets within the panel take up
different amounts of space, don't call ``widget.setWidth(width)`` or
``widget.setHeight(height)`` as these are ignored by the panel. Instead, call
``panel.setCellWidth(widget, width)`` and ``panel.setCellHeight(widget,
height)``.
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.Label import Label
from pyjamas.ui import HasAlignment
class HorizontalPanelDemo(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
panel = HorizontalPanel()
panel.setBorderWidth(1)
panel.setHorizontalAlignment(HasAlignment.ALIGN_CENTER)
panel.setVerticalAlignment(HasAlignment.ALIGN_MIDDLE)
part1 = Label("Part 1")
part2 = Label("Part 2")
part3 = Label("Part 3")
part4 = Label("Part 4")
panel.add(part1)
panel.add(part2)
panel.add(part3)
panel.add(part4)
panel.setCellWidth(part1, "10%")
panel.setCellWidth(part2, "70%")
panel.setCellWidth(part3, "10%")
panel.setCellWidth(part4, "10%")
panel.setCellVerticalAlignment(part3, HasAlignment.ALIGN_BOTTOM)
panel.setWidth("100%")
panel.setHeight("200px")
self.add(panel)
| 3.53125 | 4 |
gaze-test.py | gbazo/gazetracking | 0 | 12768353 | import cv2
from gaze_tracking import GazeTracking
from imutils.video import VideoStream
import imutils
import argparse
import time
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
args = vars(ap.parse_args())
gaze = GazeTracking()
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
vs = VideoStream(src=0).start()
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# allow the camera or video file to warm up
time.sleep(2.0)
count = 0
while True:
t = time.time()
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
if frame is None:
break
gaze.refresh(frame)
frame = gaze.annotated_frame()
text = ""
#if gaze.is_blinking():
# text = "Blinking"
if gaze.is_right():
text = "DIREITA"
elif gaze.is_left():
text = "ESQUERDA"
elif gaze.is_center():
text = "FRENTE"
else:
text = "DEVIOU"
cv2.putText(frame, text, (850, 60), cv2.FONT_HERSHEY_DUPLEX, 1.4, (147, 58, 31), 2)
#left_pupil = gaze.pupil_left_coords()
#right_pupil = gaze.pupil_right_coords()
#cv2.putText(frame, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.5, (147, 58, 31), 1)
#cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.5, (147, 58, 31), 1)
cv2.imwrite("/home/gabriel/Documentos/blur_face/frame%d.jpg" % count, frame)
count += 1
print("Time to process the frame = {}".format(time.time() - t))
# close all windows
cv2.destroyAllWindows() | 2.8125 | 3 |
tlds/__init__.py | critical-path/py-tlds | 0 | 12768354 | """util that retrieves and validates
a list of top-level domains from the
internet assigned names authority."""
from tlds.lib import TopLevelDomainGetter
from tlds.utils import write_results
__version__ = "1.0.0"
__author__ = "critical-path"
__all__ = [
"TopLevelDomainGetter",
"write_results"
]
| 1.898438 | 2 |
paraml/localDocker.py | yadudoc/ParaML | 0 | 12768355 | <reponame>yadudoc/ParaML<filename>paraml/localDocker.py<gh_stars>0
localDockerIPP = {
| 1.328125 | 1 |
garecovery/tests/test_recovery_scan.py | LeoComandini/garecovery | 61 | 12768356 | #!/usr/bin/env python3
import decimal
import mock
import wallycore as wally
import garecovery.two_of_three
from garecovery.clargs import DEFAULT_SUBACCOUNT_SEARCH_DEPTH
from gaservices.utils import txutil
from .util import AuthServiceProxy, datafile, get_output, parse_summary, raise_IOError
garecovery.bitcoin_config.open = raise_IOError
sub_depth = DEFAULT_SUBACCOUNT_SEARCH_DEPTH
key_depth = 20
destination_address = 'mynHfTyTWyGGB76NBFbfUrTnn8YWQkTJVs'
@mock.patch('garecovery.two_of_three.bitcoincore.AuthServiceProxy')
def test_recover_2of3(mock_bitcoincore):
"""Test 2of3 happy path"""
mock_bitcoincore.return_value = AuthServiceProxy('testnet_txs')
estimate = {'blocks': 3, 'feerate': 1, }
mock_bitcoincore.return_value.estimatesmartfee.return_value = estimate
args = [
'--mnemonic-file={}'.format(datafile('mnemonic_6.txt')),
'--rpcuser=abc',
'--rpcpassword=<PASSWORD>',
'2of3',
'--network=testnet',
'--recovery-mnemonic-file={}'.format(datafile('mnemonic_7.txt')),
'--key-search-depth={}'.format(key_depth),
'--search-subaccounts={}'.format(sub_depth),
'--destination-address={}'.format(destination_address),
]
# Raw tx
output = get_output(args).strip()
assert output == open(datafile("signed_2of3_5")).read().strip()
# Check replace by fee is set
tx = txutil.from_hex(output)
assert wally.tx_get_num_inputs(tx) == 1
assert wally.tx_get_input_sequence(tx, 0) == int(32*'1', 2) - 2
# Summary
args = ['--show-summary', ] + args
output = get_output(args)
summary = parse_summary(output)
assert len(summary) == 1
assert summary[0]['destination address'] == destination_address
@mock.patch('garecovery.two_of_three.bitcoincore.AuthServiceProxy')
def test_set_nlocktime(mock_bitcoincore):
"""Test that newly created recovery transactions have nlocktime = current blockheight + 1"""
mock_bitcoincore.return_value = AuthServiceProxy('testnet_txs')
estimate = {'blocks': 3, 'feerate': 1, }
mock_bitcoincore.return_value.estimatesmartfee.return_value = estimate
current_blockheight = 123
mock_bitcoincore.return_value.getblockcount.return_value = current_blockheight
args = [
'--mnemonic-file={}'.format(datafile('mnemonic_6.txt')),
'--rpcuser=abc',
'--rpcpassword=<PASSWORD>',
'2of3',
'--network=testnet',
'--recovery-mnemonic-file={}'.format(datafile('mnemonic_7.txt')),
'--key-search-depth={}'.format(key_depth),
'--search-subaccounts={}'.format(sub_depth),
'--destination-address={}'.format(destination_address),
]
output = get_output(args).strip()
tx = txutil.from_hex(output)
assert wally.tx_get_locktime(tx) == current_blockheight
@mock.patch('garecovery.two_of_three.bitcoincore.AuthServiceProxy')
def test_recover_2of2_csv(mock_bitcoincore):
"""Test 2of2-csv happy path"""
mock_bitcoincore.return_value = AuthServiceProxy('testnet_txs')
estimate = {'blocks': 3, 'feerate': decimal.Decimal('0.00001'), }
mock_bitcoincore.return_value.estimatesmartfee.return_value = estimate
mock_bitcoincore.return_value.getnetworkinfo = mock.Mock(return_value={'version': 190100})
mock_bitcoincore.return_value.getblockcount.return_value = 144
args = [
'--mnemonic-file={}'.format(datafile('mnemonic_1.txt')),
'--rpcuser=abc',
'--rpcpassword=<PASSWORD>',
'2of2-csv',
'--network=testnet',
'--key-search-depth={}'.format(key_depth),
'--search-subaccounts={}'.format(sub_depth),
]
# Raw tx
output = get_output(args).strip()
assert output == open(datafile("signed_2of2_csv_1")).read().strip()
tx = txutil.from_hex(output)
assert wally.tx_get_num_inputs(tx) == 1
# Summary
args = ['--show-summary', ] + args
output = get_output(args)
summary = parse_summary(output)
assert len(summary) == 1
# Use scantxoutset instead of importmulti + listunspent
scantxoutset_result = {
'success': True,
'unspents': [{
'txid': '0ab5d70ef25a601de455155fdcb8c492d21a9b3063211dc8a969568d9d0fe15b',
'vout': 0,
'scriptPubKey': 'a91458ce12e1773dd078940a9dc855b94c3c9a343b8587',
'desc': 'addr(2N1LnKRLTCWr8H9UdwoREazuFDXHMEgZj9g)#ztm9gzsm',
'amount': 0.001,
'height': 0,
}],
}
mock_bitcoincore.return_value.scantxoutset = mock.Mock(return_value=scantxoutset_result)
# output not expired yet
mock_bitcoincore.return_value.getblockcount.return_value = 143
args = [
'--mnemonic-file={}'.format(datafile('mnemonic_1.txt')),
'--rpcuser=abc',
'--rpcpassword=<PASSWORD>',
'2of2-csv',
'--network=testnet',
'--key-search-depth={}'.format(key_depth),
'--search-subaccounts={}'.format(sub_depth),
'--ignore-mempool',
]
# Raw tx
raw_tx = get_output(args).strip()
assert raw_tx == ''
# output expired
mock_bitcoincore.return_value.getblockcount.return_value = 144
# Raw tx
output = get_output(args).strip()
assert output == open(datafile("signed_2of2_csv_1")).read().strip()
# Check replace by fee is set
tx = txutil.from_hex(output)
assert wally.tx_get_num_inputs(tx) == 1
# Summary
args = ['--show-summary', ] + args
output = get_output(args)
summary = parse_summary(output)
assert len(summary) == 1
| 1.914063 | 2 |
tests/test_e2e_03_Reporting_vertical.py | blue-monk/csv-diff-python2 | 0 | 12768357 | import sys
import textwrap
from src.csvdiff2 import csvdiff
def test_show_difference(lhs, rhs, capfd):
lhs.write(textwrap.dedent('''
head1, head2, head3, head4, head5, head6
1, value1-2, key2-2, 1002, 20210921T035902, value4-2
1, value1-3, key2-3, 1003, 20210921T035904, value4-3
102, value1-4, key2-1, 1004, 20210924T180521, value4-e
1003, value1-5, key2-1, 1005, 20210924T180528, value4-5
1003, value1-6, key2-2, 1006, 20210923T143259, value4-6
1003, value1-7, key2-3, 1007, 20210923T143258, value4-7
1003, value1-e, key2-4, 1008, 20210923T143259, value4-8
''').strip())
rhs.write(textwrap.dedent('''
head1, head2, head3, head4, head5, head6
1, value1-1, key2-1, 1001, 20210921T035901, value4-1
1, value1-2, key2-2, 1002, 20210921T035902, value4-2
1, value1-3, key2-3, 1003, 20210921T035903, value4-3
102, value1-4e, key2-1, 1044, 20210924T180529, value4-4
1003, value1-6, key2-2, 1006, 20210923T143259, value4-6
1003, value1-8, key2-4, 1008, 20210923T143257, value4-e
''').strip())
sys.argv = ['csvdiff.py', lhs.strpath, rhs.strpath, '-k0:4,2', '-dv']
csvdiff.main()
out, err = capfd.readouterr()
assert err == ''
assert out == textwrap.dedent('''
============ Report ============
* Differences
--------------------------------------------------------------------------------
L left.csv
R right.csv
--------------------------------------------------------------------------------
> R 2 ['1', 'value1-1', 'key2-1', '1001', '20210921T035901', 'value4-1']
! @ [4]
L 3 ['1', 'value1-3', 'key2-3', '1003', '20210921T035904', 'value4-3']
R 4 ['1', 'value1-3', 'key2-3', '1003', '20210921T035903', 'value4-3']
! @ [1, 3, 4, 5]
L 4 ['102', 'value1-4', 'key2-1', '1004', '20210924T180521', 'value4-e']
R 5 ['102', 'value1-4e', 'key2-1', '1044', '20210924T180529', 'value4-4']
< L 5 ['1003', 'value1-5', 'key2-1', '1005', '20210924T180528', 'value4-5']
< L 7 ['1003', 'value1-7', 'key2-3', '1007', '20210923T143258', 'value4-7']
! @ [1, 4, 5]
L 8 ['1003', 'value1-e', 'key2-4', '1008', '20210923T143259', 'value4-8']
R 7 ['1003', 'value1-8', 'key2-4', '1008', '20210923T143257', 'value4-e']
''')
def test_show_difference_and_number_of_cases(lhs, rhs, capfd):
lhs.write(textwrap.dedent('''
head1, head2, head3, head4, head5, head6
1, value1-2, key2-2, 1002, 20210921T035902, value4-2
1, value1-3, key2-3, 1003, 20210921T035904, value4-3
102, value1-4, key2-1, 1004, 20210924T180521, value4-e
1003, value1-5, key2-1, 1005, 20210924T180528, value4-5
1003, value1-6, key2-2, 1006, 20210923T143259, value4-6
1003, value1-7, key2-3, 1007, 20210923T143258, value4-7
1003, value1-e, key2-4, 1008, 20210923T143259, value4-8
''').strip())
rhs.write(textwrap.dedent('''
head1, head2, head3, head4, head5, head6
1, value1-1, key2-1, 1001, 20210921T035901, value4-1
1, value1-2, key2-2, 1002, 20210921T035902, value4-2
1, value1-3, key2-3, 1003, 20210921T035903, value4-3
102, value1-4e, key2-1, 1044, 20210924T180529, value4-4
1003, value1-6, key2-2, 1006, 20210923T143259, value4-6
1003, value1-8, key2-4, 1008, 20210923T143257, value4-e
''').strip())
sys.argv = ['csvdiff.py', lhs.strpath, rhs.strpath, '-k0:4,2', '-dvc']
csvdiff.main()
out, err = capfd.readouterr()
assert err == ''
assert out == textwrap.dedent('''
============ Report ============
* Differences
--------------------------------------------------------------------------------
L left.csv
R right.csv
--------------------------------------------------------------------------------
> R 2 ['1', 'value1-1', 'key2-1', '1001', '20210921T035901', 'value4-1']
! @ [4]
L 3 ['1', 'value1-3', 'key2-3', '1003', '20210921T035904', 'value4-3']
R 4 ['1', 'value1-3', 'key2-3', '1003', '20210921T035903', 'value4-3']
! @ [1, 3, 4, 5]
L 4 ['102', 'value1-4', 'key2-1', '1004', '20210924T180521', 'value4-e']
R 5 ['102', 'value1-4e', 'key2-1', '1044', '20210924T180529', 'value4-4']
< L 5 ['1003', 'value1-5', 'key2-1', '1005', '20210924T180528', 'value4-5']
< L 7 ['1003', 'value1-7', 'key2-3', '1007', '20210923T143258', 'value4-7']
! @ [1, 4, 5]
L 8 ['1003', 'value1-e', 'key2-4', '1008', '20210923T143259', 'value4-8']
R 7 ['1003', 'value1-8', 'key2-4', '1008', '20210923T143257', 'value4-e']
* Count & Row number
same lines : 2
left side only (<): 2 :-- Row Numbers -->: [5, 7]
right side only (>): 1 :-- Row Numbers -->: [2]
with differences (!): 3 :-- Row Number Pairs -->: [(3, 4), (4, 5), (8, 7)]
''')
def test_show_all_and_number_of_cases(lhs, rhs, capfd):
lhs.write(textwrap.dedent('''
head1, head2, head3, head4, head5, head6
1, value1-2, key2-2, 1002, 20210921T035902, value4-2
1, value1-3, key2-3, 1003, 20210921T035904, value4-3
102, value1-4, key2-1, 1004, 20210924T180521, value4-e
1003, value1-5, key2-1, 1005, 20210924T180528, value4-5
1003, value1-6, key2-2, 1006, 20210923T143259, value4-6
1003, value1-7, key2-3, 1007, 20210923T143258, value4-7
1003, value1-e, key2-4, 1008, 20210923T143259, value4-8
''').strip())
rhs.write(textwrap.dedent('''
head1, head2, head3, head4, head5, head6
1, value1-1, key2-1, 1001, 20210921T035901, value4-1
1, value1-2, key2-2, 1002, 20210921T035902, value4-2
1, value1-3, key2-3, 1003, 20210921T035903, value4-3
102, value1-4e, key2-1, 1044, 20210924T180529, value4-4
1003, value1-6, key2-2, 1006, 20210923T143259, value4-6
1003, value1-8, key2-4, 1008, 20210923T143257, value4-e
''').strip())
sys.argv = ['csvdiff.py', lhs.strpath, rhs.strpath, '-k0:4,2', '-avc']
csvdiff.main()
out, err = capfd.readouterr()
assert err == ''
assert out == textwrap.dedent('''
============ Report ============
* All
--------------------------------------------------------------------------------
L left.csv
R right.csv
--------------------------------------------------------------------------------
> R 2 ['1', 'value1-1', 'key2-1', '1001', '20210921T035901', 'value4-1']
=
L 2 ['1', 'value1-2', 'key2-2', '1002', '20210921T035902', 'value4-2']
R 3 ['1', 'value1-2', 'key2-2', '1002', '20210921T035902', 'value4-2']
! @ [4]
L 3 ['1', 'value1-3', 'key2-3', '1003', '20210921T035904', 'value4-3']
R 4 ['1', 'value1-3', 'key2-3', '1003', '20210921T035903', 'value4-3']
! @ [1, 3, 4, 5]
L 4 ['102', 'value1-4', 'key2-1', '1004', '20210924T180521', 'value4-e']
R 5 ['102', 'value1-4e', 'key2-1', '1044', '20210924T180529', 'value4-4']
< L 5 ['1003', 'value1-5', 'key2-1', '1005', '20210924T180528', 'value4-5']
=
L 6 ['1003', 'value1-6', 'key2-2', '1006', '20210923T143259', 'value4-6']
R 6 ['1003', 'value1-6', 'key2-2', '1006', '20210923T143259', 'value4-6']
< L 7 ['1003', 'value1-7', 'key2-3', '1007', '20210923T143258', 'value4-7']
! @ [1, 4, 5]
L 8 ['1003', 'value1-e', 'key2-4', '1008', '20210923T143259', 'value4-8']
R 7 ['1003', 'value1-8', 'key2-4', '1008', '20210923T143257', 'value4-e']
* Count & Row number
same lines : 2
left side only (<): 2 :-- Row Numbers -->: [5, 7]
right side only (>): 1 :-- Row Numbers -->: [2]
with differences (!): 3 :-- Row Number Pairs -->: [(3, 4), (4, 5), (8, 7)]
''')
def test_show_all_and_number_of_cases_with_ignore_column(lhs, rhs, capfd):
lhs.write(textwrap.dedent('''
head1, head2, head3, head4, head5, head6
1, value1-2, key2-2, 1002, 20210921T035902, value4-2
1, value1-3, key2-3, 1003, 20210921T035904, value4-3
102, value1-4, key2-1, 1004, 20210924T180521, value4-e
1003, value1-5, key2-1, 1005, 20210924T180528, value4-5
1003, value1-6, key2-2, 1006, 20210923T143259, value4-6
1003, value1-7, key2-3, 1007, 20210923T143258, value4-7
1003, value1-e, key2-4, 1008, 20210923T143259, value4-8
''').strip())
rhs.write(textwrap.dedent('''
head1, head2, head3, head4, head5, head6
1, value1-1, key2-1, 1001, 20210921T035901, value4-1
1, value1-2, key2-2, 1002, 20210921T035902, value4-2
1, value1-3, key2-3, 1003, 20210921T035903, value4-3
102, value1-4e, key2-1, 1044, 20210924T180529, value4-4
1003, value1-6, key2-2, 1006, 20210923T143259, value4-6
1003, value1-8, key2-4, 1008, 20210923T143257, value4-e
''').strip())
sys.argv = ['csvdiff.py', lhs.strpath, rhs.strpath, '-k0:4,2', '-avc', '-i1']
csvdiff.main()
out, err = capfd.readouterr()
assert err == ''
assert out == textwrap.dedent('''
============ Report ============
* All
--------------------------------------------------------------------------------
L left.csv
R right.csv
--------------------------------------------------------------------------------
> R 2 ['1', 'value1-1', 'key2-1', '1001', '20210921T035901', 'value4-1']
=
L 2 ['1', 'value1-2', 'key2-2', '1002', '20210921T035902', 'value4-2']
R 3 ['1', 'value1-2', 'key2-2', '1002', '20210921T035902', 'value4-2']
! @ [4]
L 3 ['1', 'value1-3', 'key2-3', '1003', '20210921T035904', 'value4-3']
R 4 ['1', 'value1-3', 'key2-3', '1003', '20210921T035903', 'value4-3']
! @ [3, 4, 5]
L 4 ['102', 'value1-4', 'key2-1', '1004', '20210924T180521', 'value4-e']
R 5 ['102', 'value1-4e', 'key2-1', '1044', '20210924T180529', 'value4-4']
< L 5 ['1003', 'value1-5', 'key2-1', '1005', '20210924T180528', 'value4-5']
=
L 6 ['1003', 'value1-6', 'key2-2', '1006', '20210923T143259', 'value4-6']
R 6 ['1003', 'value1-6', 'key2-2', '1006', '20210923T143259', 'value4-6']
< L 7 ['1003', 'value1-7', 'key2-3', '1007', '20210923T143258', 'value4-7']
! @ [4, 5]
L 8 ['1003', 'value1-e', 'key2-4', '1008', '20210923T143259', 'value4-8']
R 7 ['1003', 'value1-8', 'key2-4', '1008', '20210923T143257', 'value4-e']
* Count & Row number
same lines : 2
left side only (<): 2 :-- Row Numbers -->: [5, 7]
right side only (>): 1 :-- Row Numbers -->: [2]
with differences (!): 3 :-- Row Number Pairs -->: [(3, 4), (4, 5), (8, 7)]
''')
def test_show_all_and_number_of_cases_with_ignore_columns(lhs, rhs, capfd):
lhs.write(textwrap.dedent('''
head1, head2, head3, head4, head5, head6
1, value1-2, key2-2, 1002, 20210921T035902, value4-2
1, value1-3, key2-3, 1003, 20210921T035904, value4-3
102, value1-4, key2-1, 1004, 20210924T180521, value4-e
1003, value1-5, key2-1, 1005, 20210924T180528, value4-5
1003, value1-6, key2-2, 1006, 20210923T143259, value4-6
1003, value1-7, key2-3, 1007, 20210923T143258, value4-7
1003, value1-e, key2-4, 1008, 20210923T143259, value4-8
''').strip())
rhs.write(textwrap.dedent('''
head1, head2, head3, head4, head5, head6
1, value1-1, key2-1, 1001, 20210921T035901, value4-1
1, value1-2, key2-2, 1002, 20210921T035902, value4-2
1, value1-3, key2-3, 1003, 20210921T035903, value4-3
102, value1-4e, key2-1, 1044, 20210924T180529, value4-4
1003, value1-6, key2-2, 1006, 20210923T143259, value4-6
1003, value1-8, key2-4, 1008, 20210923T143257, value4-e
''').strip())
sys.argv = ['csvdiff.py', lhs.strpath, rhs.strpath, '-k0:4,2', '-avc', '-i1,4']
csvdiff.main()
out, err = capfd.readouterr()
assert err == ''
assert out == textwrap.dedent('''
============ Report ============
* All
--------------------------------------------------------------------------------
L left.csv
R right.csv
--------------------------------------------------------------------------------
> R 2 ['1', 'value1-1', 'key2-1', '1001', '20210921T035901', 'value4-1']
=
L 2 ['1', 'value1-2', 'key2-2', '1002', '20210921T035902', 'value4-2']
R 3 ['1', 'value1-2', 'key2-2', '1002', '20210921T035902', 'value4-2']
=
L 3 ['1', 'value1-3', 'key2-3', '1003', '20210921T035904', 'value4-3']
R 4 ['1', 'value1-3', 'key2-3', '1003', '20210921T035903', 'value4-3']
! @ [3, 5]
L 4 ['102', 'value1-4', 'key2-1', '1004', '20210924T180521', 'value4-e']
R 5 ['102', 'value1-4e', 'key2-1', '1044', '20210924T180529', 'value4-4']
< L 5 ['1003', 'value1-5', 'key2-1', '1005', '20210924T180528', 'value4-5']
=
L 6 ['1003', 'value1-6', 'key2-2', '1006', '20210923T143259', 'value4-6']
R 6 ['1003', 'value1-6', 'key2-2', '1006', '20210923T143259', 'value4-6']
< L 7 ['1003', 'value1-7', 'key2-3', '1007', '20210923T143258', 'value4-7']
! @ [5]
L 8 ['1003', 'value1-e', 'key2-4', '1008', '20210923T143259', 'value4-8']
R 7 ['1003', 'value1-8', 'key2-4', '1008', '20210923T143257', 'value4-e']
* Count & Row number
same lines : 3
left side only (<): 2 :-- Row Numbers -->: [5, 7]
right side only (>): 1 :-- Row Numbers -->: [2]
with differences (!): 2 :-- Row Number Pairs -->: [(4, 5), (8, 7)]
''')
| 2.265625 | 2 |
Chapter07/basic_svm.py | AIHZP/ROS-Robotics-Projects-published-by-Packt | 85 | 12768358 | from sklearn import svm
import numpy as np
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
model = svm.SVC(kernel='linear',C=1,gamma=1)
model.fit(X,y)
print(model.predict([[-0.8,-1]]))
| 3.078125 | 3 |
Python/Tests/TestData/AstAnalysis/Functions.py | techkey/PTVS | 695 | 12768359 | def f():
'''f'''
pass
def f1(): pass
f2 = f
if True:
def g(): pass
else:
def h(): pass
class C:
def i(self): pass
def j(self):
def j2(self):
pass
class C2:
def k(self): pass
| 2.921875 | 3 |
D_Network_analysis/solutions/ex1_2.py | oercompbiomed/CBM101 | 7 | 12768360 | <filename>D_Network_analysis/solutions/ex1_2.py
print(G.number_of_edges())
print(len(G.edges()))
# both are equally valid solutions | 1.492188 | 1 |
rantlib/core_application/ui/parts/page.py | AlgoRythm-Dylan/qtpy-rant | 1 | 12768361 | from types import SimpleNamespace
class Page:
def __init__(self):
self.root = None
self.ui = SimpleNamespace() | 2.171875 | 2 |
malcolm/modules/scanning/hooks.py | dinojugosloven/pymalcolm | 0 | 12768362 | <reponame>dinojugosloven/pymalcolm
from annotypes import Anno, Any, Array, Mapping, TYPE_CHECKING, NO_DEFAULT, \
Sequence, Union
from scanpointgenerator import CompoundGenerator
from .infos import ParameterTweakInfo, Info
from malcolm.compat import OrderedDict
from malcolm.core import VMeta
from malcolm.modules import builtin
from .infos import ConfigureParamsInfo
if TYPE_CHECKING:
from typing import Dict, Callable, TypeVar
T = TypeVar("T")
with Anno("The Infos returned from other Parts"):
APartInfo = Mapping[str, Array[Info]]
UPartInfo = Union[APartInfo, Mapping[str, Sequence[Info]]]
with Anno("Infos about current Part status to be passed to other parts"):
AInfos = Array[Info]
with Anno("Generator instance providing specification for scan"):
AGenerator = CompoundGenerator
with Anno("List of axes in inner dimension of generator that should be moved"):
AAxesToMove = Array[str]
UAxesToMove = Union[AAxesToMove, Sequence[str]]
with Anno("Parameters that need to be changed to make them compatible"):
AParameterTweakInfos = Array[ParameterTweakInfo]
UInfos = Union[AInfos, Sequence[Info], Info, None]
UParameterTweakInfos = Union[AParameterTweakInfos, Sequence[ParameterTweakInfo],
ParameterTweakInfo, None]
with Anno("Directory to write data to"):
AFileDir = str
with Anno("Argument for fileTemplate, normally filename without extension"):
AFormatName = str
with Anno("""Printf style template to generate filename relative to fileDir.
Arguments are:
1) %s: the value of formatName"""):
AFileTemplate = str
with Anno("The demand exposure time of this scan, 0 for the maximum possible"):
AExposure = float
# Pull re-used annotypes into our namespace in case we are subclassed
APart = builtin.hooks.APart
AContext = builtin.hooks.AContext
# also bring in superclass which a subclasses may refer to
ControllerHook = builtin.hooks.ControllerHook
def check_array_info(anno, value):
# type: (T, Any) -> T
assert anno.is_array and issubclass(anno.typ, Info), \
"Expected Anno wrapping Array[something], got %s" % anno
ret = anno(value)
bad = [x for x in ret if not isinstance(x, anno.typ)]
assert not bad, \
"Passed objects %s that are not of type %s" % (bad, anno.typ)
return ret
class ValidateHook(ControllerHook[UParameterTweakInfos]):
"""Called at validate() to check parameters are valid"""
# Allow CamelCase for axesToMove as it must match ConfigureParams which
# will become a configure argument, so must be camelCase to match EPICS
# normative types conventions
# noinspection PyPep8Naming
def __init__(self,
part, # type: APart
context, # type: AContext
part_info, # type: UPartInfo
generator, # type: AGenerator
axesToMove, # type: AAxesToMove
**kwargs # type: Any
):
# type: (...) -> None
super(ValidateHook, self).__init__(
part, context, part_info=part_info, generator=generator,
axesToMove=axesToMove, **kwargs)
def validate_return(self, ret):
# type: (UParameterTweakInfos) -> AParameterTweakInfos
"""Check that all returned infos are ParameterTweakInfo that list
the parameters that need to be changed to make them compatible with
this part. ValidateHook will be re-run with the modified parameters."""
return check_array_info(AParameterTweakInfos, ret)
class ReportStatusHook(ControllerHook[UInfos]):
"""Called before Validate, Configure, PostRunArmed and Seek hooks to report
the current configuration of all parts"""
def validate_return(self, ret):
# type: (UInfos) -> AInfos
"""Check that all parts return Info objects relevant to other parts"""
return check_array_info(AInfos, ret)
with Anno("Number of steps already completed"):
ACompletedSteps = int
with Anno("Number of steps we should configure for"):
AStepsToDo = int
class PreConfigureHook(ControllerHook[None]):
"""Called before configure() to get the device into a suitable state to
report status and run configure. Typically will load a saved design."""
class ConfigureHook(ControllerHook[UInfos]):
"""Called at configure() to configure child block for a run"""
# Allow CamelCase for axesToMove as it must match ConfigureParams which
# will become a configure argument, so must be camelCase to match EPICS
# normative types conventions
# noinspection PyPep8Naming
def __init__(self,
part, # type: APart
context, # type: AContext
completed_steps, # type: ACompletedSteps
steps_to_do, # type: AStepsToDo
part_info, # type: APartInfo
generator, # type: AGenerator
axesToMove, # type: AAxesToMove
**kwargs # type: **Any
):
# type: (...) -> None
super(ConfigureHook, self).__init__(
part, context, completed_steps=completed_steps,
steps_to_do=steps_to_do, part_info=part_info, generator=generator,
axesToMove=axesToMove, **kwargs)
@classmethod
def create_info(cls, configure_func):
# type: (Callable) -> ConfigureParamsInfo
"""Create a `ConfigureParamsInfo` describing the extra parameters
that should be passed at configure"""
call_types = getattr(configure_func, "call_types",
{}) # type: Dict[str, Anno]
metas = OrderedDict()
required = []
defaults = OrderedDict()
for k, anno in call_types.items():
if k not in cls.call_types:
scls = VMeta.lookup_annotype_converter(anno)
metas[k] = scls.from_annotype(anno, writeable=True)
if anno.default is NO_DEFAULT:
required.append(k)
elif anno.default is not None:
defaults[k] = anno.default
return ConfigureParamsInfo(metas, required, defaults)
def validate_return(self, ret):
# type: (UInfos) -> AInfos
"""Check that all parts return Info objects for storing as attributes
"""
return check_array_info(AInfos, ret)
class PostConfigureHook(ControllerHook[None]):
"""Called at the end of configure() to store configuration info calculated
in the Configure hook"""
def __init__(self, part, context, part_info):
# type: (APart, AContext, APartInfo) -> None
super(PostConfigureHook, self).__init__(
part, context, part_info=part_info)
class PreRunHook(ControllerHook[None]):
"""Called at the start of run()"""
class RunHook(ControllerHook[None]):
"""Called at run() to start the configured steps running"""
class PostRunArmedHook(ControllerHook[None]):
"""Called at the end of run() when there are more steps to be run"""
# Allow CamelCase for axesToMove as it must match ConfigureParams which
# will become a configure argument, so must be camelCase to match EPICS
# normative types conventions
# noinspection PyPep8Naming
def __init__(self,
part, # type: APart
context, # type: AContext
completed_steps, # type: ACompletedSteps
steps_to_do, # type: AStepsToDo
part_info, # type: UPartInfo
generator, # type: AGenerator
axesToMove, # type: AAxesToMove
**kwargs # type: Any
):
# type: (...) -> None
super(PostRunArmedHook, self).__init__(
part, context, completed_steps=completed_steps,
steps_to_do=steps_to_do, part_info=part_info, generator=generator,
axesToMove=axesToMove, **kwargs)
class PostRunReadyHook(ControllerHook[None]):
"""Called at the end of run() when there are no more steps to be run"""
class PauseHook(ControllerHook[None]):
"""Called at pause() to pause the current scan before Seek is called"""
class SeekHook(ControllerHook[None]):
"""Called at seek() or at the end of pause() to reconfigure for a different
number of completed_steps"""
# Allow CamelCase for axesToMove as it must match ConfigureParams which
# will become a configure argument, so must be camelCase to match EPICS
# normative types conventions
# noinspection PyPep8Naming
def __init__(self,
part, # type: APart
context, # type: AContext
completed_steps, # type: ACompletedSteps
steps_to_do, # type: AStepsToDo
part_info, # type: APartInfo
generator, # type: AGenerator
axesToMove, # type: AAxesToMove
**kwargs # type: **Any
):
# type: (...) -> None
super(SeekHook, self).__init__(
part, context, completed_steps=completed_steps,
steps_to_do=steps_to_do, part_info=part_info, generator=generator,
axesToMove=axesToMove, **kwargs)
class AbortHook(ControllerHook[None]):
"""Called at abort() to stop the current scan"""
| 2.25 | 2 |
baekjoon/python/im_going_to_meet_18235.py | yskang/AlgorithmPractice | 0 | 12768363 | <filename>baekjoon/python/im_going_to_meet_18235.py<gh_stars>0
# Title: 지금 만나러 갑니다
# Link: https://www.acmicpc.net/problem/18235
import sys
from collections import deque
sys.setrecursionlimit(10 ** 6)
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def bfs(n: int, b: int):
queue = deque()
queue.append((b, 0))
pos = [0 for _ in range(n+1)]
t = 0
while queue:
ori, time = queue.popleft()
if t != time:
t += 1
pos[ori] = time
yield pos
offset = pow(2, time)
p = ori - offset
if 0 < p:
queue.append((p, time+1))
pos[p] = time+1
p = ori + offset
if p <= n:
queue.append((p, time+1))
pos[p] = time+1
yield -1
def solution(n: int, a: int, b: int):
queue = deque()
queue.append((a, 0))
bfs_co = bfs(n, b)
ori_b = next(bfs_co)
t = 0
while queue:
ori, time = queue.popleft()
if time != t:
t += 1
ori_b = next(bfs_co)
if ori_b == -1:
return -1
offset = pow(2, time)
p = ori - offset
if p > 0:
queue.append((p, time+1))
if ori_b[p] == time+1:
return time+1
p = ori + offset
if p <= n:
queue.append((p, time+1))
if ori_b[p] == time+1:
return time+1
return -1
def main():
n, a, b = read_list_int()
print(solution(n, a, b))
if __name__ == '__main__':
main() | 3.046875 | 3 |
rest_framework_simplejwt/state.py | SPKorhonen/django-rest-framework-simplejwt | 0 | 12768364 | <gh_stars>0
from django.contrib.auth import get_user_model
from .backends import TokenBackend
from .settings import api_settings
User = get_user_model()
token_backend = TokenBackend(
algorithm=.ALGORITHM,
signing_key=api_settings.SIGNING_KEY,
verifying_key=api_settings.VERIFYING_KEY,
secret_key=api_settings.JWT_SECRET_KEY,
get_user_secret_key=api_settings.GET_USER_SECRET_KEY)
audience=api_settings.AUDIENCE,
issuer=api_settings.ISSUER
)
| 1.78125 | 2 |
microlib/matcher/adaptergen_faster.py | Daniele/microtrim | 0 | 12768365 | '''
Faster brute-force adapter matcher
'''
import time
VERBOSE = False
BASES = ('A', 'C', 'G', 'T')
def addNewAdapterToSet(ad, adSet):
adSet.add(ad)
if VERBOSE:
print(f'Adding {ad}')
time.sleep(.1)
return adSet
def makeAdapters(adapter):
adapters = set()
adapters.add(adapter[:-8])
for i, x in enumerate(adapter):
for j in BASES:
ad = adapter[:i] + j + adapter[i+1:]
adapters.add(ad[:-8])
for l in BASES:
ad = adapter[:i] + l + adapter[i:]
adapters.add(ad[:-8])
ad = adapter[:i] + adapter[i+1:]
adapters.add(ad[:-8])
ad = adapter[:i] + adapter[i+2:]
adapters.add(ad[:-8])
return adapters
def build(adapter, _):
'''
Build a brute force adapter macher with no parameters
'''
adapters = sorted(makeAdapters(adapter))
def match(line):
for adapter in adapters:
if adapter in line:
return line.find(adapter)
return match
| 3.21875 | 3 |
Functions/token_manipulation.py | VatsalGala/Semantic-Tree | 0 | 12768366 | <reponame>VatsalGala/Semantic-Tree<gh_stars>0
# Token Manipulation Functions --->
from collections import defaultdict, OrderedDict
import spacy
from . import global_variables as gv
from .logging import log, log_return
# Indexing of Tokens to a private system.
def set_index():
"""No-param function. Sets index <custom variable> for each token.
Uses named tuple "Index" of the format(pid, sid, wid)
Each successive paragraph, sentence, word recieves an incrementing ID
Returns: None
Note:
Sets Global Variable <SENT_RANGE, WORD_RANGE, DOC>
"""
gv.SENT_RANGE = len(list(gv.DOC.sents))
gv.WORD_RANGE = len(list(gv.DOC))
spacy.tokens.Token.set_extension('index', default=None, force=True)
pc, sc, wc = 0, 0, 0
for t in gv.DOC:
if t.text is '\n':
pc += 1
if t.is_sent_start:
sc += 1
if not t.is_punct and t.text is not '\n':
t._.index = gv.Index(pc, sc, wc)
wc += 1
# Frequency Counts and Instance List and unique tokens
def set_extentions(doc):
"""No-param function. Sets 'frequency' and 'instance_list' variable for each token.
The frequency is calculated by lemma_.lower() word of the noun phrase.
And lemma_.lower() is used to add instance to instance list.
Returns: None
"""
freq_count = defaultdict(int)
instance_list = defaultdict(list)
for t in doc:
freq_count[t.lemma_.lower()] += 1
instance_list[t.lemma_.lower()].append((t))
def get_freq(t): return freq_count[t.lemma_.lower()]
def get_instance_list(t): return instance_list[t.lemma_.lower()]
spacy.tokens.Token.set_extension('frequency', getter=get_freq, force=True)
spacy.tokens.Token.set_extension('instance_list', getter=get_instance_list, force=True)
return doc
@log
def make_unique(tokens):
uniq = OrderedDict()
for t in tokens:
uniq.setdefault(t.lemma_.lower(), t)
return list(uniq.values())
| 2.625 | 3 |
qubiter/adv_applications/setup_autograd.py | yourball/qubiter | 3 | 12768367 | """
The purpose of this file is to install autograd and its dependencies and to
provide utility functions that are used when it is used in conjunction with
Qubiter.
When using autograd, one declares np to be the alias to module
numpy.autograd. If another file later declares np to be alias to numpy,
all sorts of error messages start cropping up. What I've done to avoid this
is to change the statements `import numpy as np` in some (not all, just the
ones called while using autograd) files by
import sys
if 'autograd.numpy' not in sys.modules:
import numpy as np
else:
import autograd.numpy as np
References
----------
1. https://github.com/HIPS/autograd/blob/master/docs/tutorial.md
2. https://github.com/HIPS/autograd/blob/master/docs/updateguide.md
"""
import autograd.numpy as np
from autograd import grad, jacobian
from autograd.extend import primitive, defvjp
import sys
print('np installed?', 'np' in sys.modules) # False
print('numpy installed?', 'numpy' in sys.modules) # True
print('autograd.numpy installed?', 'autograd.numpy' in sys.modules) # True
def sig_all():
"""
This method returns a numpy array of shape=(2, 3, 3) which contains the
3 Pauli matrices in it. sigx = sig_all[:, :, 0], sigy = sig_all[:, :,
1], sigz = sig_all[:, :, 2],
Returns
-------
np.ndarray
shape = (2, 2, 3)
"""
sigx = np.array([[0, 1], [1, 0]])
sigy = np.array([[0, -1j], [1j, 0]])
sigz = np.array([[1, 0], [0, -1]])
all_paulis = np.vstack([sigx, sigy, sigz])
all_paulis = np.reshape(all_paulis, (3, 2, 2)).transpose(1, 2, 0)
return all_paulis
def u2_alt(*tlist):
"""
An alternative to OneBitGates.u2(). Both should return identical 2-dim
matrices for identical arguments.
Parameters
----------
tlist : list[float]
tlist = [rads0, rads1, rads2, rads3]
Returns
-------
np.ndarray
shape = (2, 2)
"""
assert len(tlist) == 4
t = np.sqrt(tlist[1]**2 + tlist[2]**2 + tlist[3]**2)
if abs(t) < 1e-6:
return np.exp(1j*tlist[0])*np.eye(2)
tvec = np.array([tlist[1], tlist[2], tlist[3]])/t
out = np.eye(2)*np.cos(t) + 1j*np.dot(sig_all(), tvec)*np.sin(t)
return np.exp(1j*tlist[0])*out
def d_u2(dwrt, *tlist):
"""
tlist is a list of 4 floats, and dwrt (which stands for "derivative with
respect to") is in range(4). This method returns the analytical (not
numerical, in terms of closed functions) derivative of u2(*tlist) with
respect to tlist[dwrt].
The output of this method has been verified by comparing it to same
derivatives calculated numerically with autograd.
Parameters
----------
dwrt : int
tlist : list[float]
Returns
-------
np.ndarray
shape = (2, 2)
"""
assert dwrt in range(4)
assert len(tlist) == 4
if dwrt == 0:
return 1j*u2_alt(*tlist)
dwrt -= 1
t = np.sqrt(tlist[1]**2 + tlist[2]**2 + tlist[3]**2)
if abs(t) < 1e-6:
# we already know dwrt !=0
return np.zeros((2, 2), dtype=complex)
tvec = np.array([tlist[1], tlist[2], tlist[3]])/t
dotted_vec = tvec*tvec[dwrt]*np.cos(t) +\
(np.sin(t)/t)*(-tvec*tvec[dwrt] + np.eye(3)[dwrt, :])
out = -np.sin(t)*tvec[dwrt]*np.eye(2) +\
1j*np.dot(sig_all(), dotted_vec)
return np.exp(1j*tlist[0])*out
def d_auto_u2(dwrt, *tlist):
"""
Returns the automatic (computed by backprop) derivative of 2-dim matrix
UnitaryMat.u2_alt. UnitaryMat.u2_alt is an alternative to
OneBitGates.u2. Both functions return same answer for identical input (
input is 4 real parameters in tlist).
Parameters
----------
dwrt : int
stands for 'derivative with respect to'. int in range(4)
tlist : list[float]
len = 4
Returns
-------
np.ndarray
shape=(2,2)
"""
def u2r(*tlist1):
return np.real(u2_alt(*tlist1))
def u2i(*tlist1):
return np.imag(u2_alt(*tlist1))
return jacobian(u2r, dwrt)(*tlist) + 1j*jacobian(u2i, dwrt)(*tlist)
@primitive
def pu2r(*tlist):
"""
Returns real part of u2, and registers it as being primitive.
Primitive means that its derivative will be provided in a defvjp (
def of vector-jacobian-product) so no need for autograd to calculate it
from the u2 definition.
Parameters
----------
tlist : list[float]
len = 4
Returns
-------
np.ndarray
shape=(2,2)
"""
return np.real(u2_alt(*tlist))
@primitive
def pu2i(*tlist):
"""
Returns imaginary part of u2, and registers it as being primitive.
Primitive means that its derivative will be provided in a defvjp (
def of vector-jacobian-product) so no need for autograd to calculate it
from the u2 definition.
Parameters
----------
tlist : list[float]
len = 4
Returns
-------
np.ndarray
shape=(2,2)
"""
return np.imag(u2_alt(*tlist))
def pu2(*tlist):
"""
Returns primitive u2 as (primitive real part of u2) + j*(primtive
imaginary part of u2).
Parameters
----------
tlist : list[float]
len = 4
Returns
-------
np.ndarray
shape=(2,2)
"""
# print('mmbbvv, pu2', pu2r(*tlist) +1j* pu2r(*tlist))
return pu2r(*tlist) + 1j*pu2i(*tlist)
defvjp(pu2r,
# defines vector-jacobian-product of pu2r
# g.shape == pu2r.shape
lambda ans, *tlist: lambda g: np.sum(
g*np.real(d_u2(0, *tlist))),
lambda ans, *tlist: lambda g: np.sum(
g*np.real(d_u2(1, *tlist))),
lambda ans, *tlist: lambda g: np.sum(
g*np.real(d_u2(2, *tlist))),
lambda ans, *tlist: lambda g: np.sum(
g*np.real(d_u2(3, *tlist))),
argnums=range(4))
defvjp(pu2i,
# defines vector-jacobian-product of pu2i
# g.shape == pu2i.shape
lambda ans, *tlist: lambda g: np.sum(
g*np.imag(d_u2(0, *tlist))),
lambda ans, *tlist: lambda g: np.sum(
g*np.imag(d_u2(1, *tlist))),
lambda ans, *tlist: lambda g: np.sum(
g*np.imag(d_u2(2, *tlist))),
lambda ans, *tlist: lambda g: np.sum(
g*np.imag(d_u2(3, *tlist))),
argnums=range(4))
def d_auto_pu2(dwrt, *tlist):
"""
Returns the automatic derivative of pu2. We have defined things so that
this derivative is stipulated analytically a priori rather than being
calculated by autograd from def of u2.
Parameters
----------
dwrt : int
stands for 'derivative with respect to'. int in range(4)
tlist : list[float]
len = 4
Returns
-------
np.ndarray
shape=(2,2)
"""
assert dwrt in range(4)
return jacobian(pu2r, dwrt)(*tlist) + 1j*jacobian(pu2i, dwrt)(*tlist)
if __name__ == "__main__":
from qubiter.OneBitGates import *
def main():
print("\nu2_alt example-------------")
ex = np.array([1, 0, 0])
ey = np.array([0, 1, 0])
ez = np.array([0, 0, 1])
all_paulis = sig_all()
sigx_ = np.dot(all_paulis, ex)
sigy_ = np.dot(all_paulis, ey)
sigz_ = np.dot(all_paulis, ez)
print('sigx_=\n', sigx_)
print('sigy_=\n', sigy_)
print('sigz_=\n', sigz_)
rads_list = [.1, .2, .3, .4]
err = np.linalg.norm(OneBitGates.u2(*rads_list) -
u2_alt(*rads_list))
print('err=', err)
tlist = [.3, 1.1, .7, .5]
for dwrt in range(4):
print('err=', np.linalg.norm(
d_auto_u2(dwrt, *tlist) - d_u2(dwrt, *tlist)))
for dwrt in range(4):
print('err=', np.linalg.norm(
d_auto_pu2(dwrt, *tlist) - d_u2(dwrt, *tlist)))
main()
| 2.546875 | 3 |
home/admin.py | mysteriousCoder9991/To-Do-List-Rahul | 0 | 12768368 | <reponame>mysteriousCoder9991/To-Do-List-Rahul
from django.contrib import admin
from home.models import Task
# Register your models here.
admin.site.register(Task) | 1.195313 | 1 |
csi.py | purbe/Reinforcement_Learning_Team_Q_learnig_MARL_Multi_Agent_UAV_Spectrum_task | 20 | 12768369 | #################################
# CSI function
#################################
#########################################################
# import libraries
import scipy.spatial.distance as ssd
import numpy as np
import scipy.io as sio
#########################################################
# Function definition
###############################
# Load CSI
def load_csi(num_UAV, location, pthH, SaveFile):
"""
This function generate the CSI parameters based on the LOS propagation model and the location of nodes at the
beginning of the problem.
:param num_UAV: Number of UAVs.
:param location: A dictionary including all location.
:param pthH: The directory to save the CSI parameters on a file.
:param SaveFile: A Flag(True, False) to save or load data.
:return: Returns a Numpy array including CSI parameters.
"""
if SaveFile:
X_U = location.get('X_U')
X_S = location.get('X_S')
X_F = location.get('X_F')
X_GT = location.get('X_GT')
X_GR = location.get('X_GR')
Y_U = location.get('Y_U')
Y_S = location.get('Y_S')
Y_F = location.get('Y_F')
Y_GT = location.get('Y_GT')
Y_GR = location.get('Y_GR')
Z_U = location.get('Z_U')
Z_S = location.get('Z_S')
Z_F = location.get('Z_F')
Z_GT = location.get('Z_GT')
Z_GR = location.get('Z_GR')
dist_S_uav = [ssd.euclidean([X_S, Y_S, Z_S], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_S_uav = np.asarray(dist_S_uav)
dist_uav_F = [ssd.euclidean([X_F, Y_F, Z_F], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_uav_F = np.asarray(dist_uav_F)
dist_GT_uav = [ssd.euclidean([X_GT, Y_GT, Z_GT], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_GT_uav = np.asarray(dist_GT_uav)
dist_uav_GR = [ssd.euclidean([X_GR, Y_GR, Z_GR], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_uav_GR = np.asarray(dist_uav_GR)
dist_S_uav_Norm = dist_S_uav/min(dist_S_uav)
dist_uav_F_Norm = dist_uav_F/min(dist_uav_F)
dist_GT_uav_Norm = dist_GT_uav/min(dist_GT_uav)
dist_uav_GR_Norm = dist_uav_GR/min(dist_uav_GR)
h_S_uav = np.multiply(1/(dist_S_uav_Norm**2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_S_uav = h_S_uav.T
h_uav_F = np.multiply(1/(dist_uav_F_Norm**2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_uav_F = h_uav_F.T
h_GT_uav = np.multiply(1/(dist_GT_uav_Norm**2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_GT_uav = h_GT_uav.T
h_uav_GR = np.multiply(1/(dist_uav_GR_Norm**2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_uav_GR = h_uav_GR.T
csi_h = np.zeros([num_UAV, 4, 1], dtype=complex)
csi_h[:, 0, :] = h_S_uav
csi_h[:, 1, :] = h_uav_F
csi_h[:, 2, :] = h_GT_uav
csi_h[:, 3, :] = h_uav_GR
sio.savemat(pthH, {'csi_h': csi_h})
else:
csi_h_dict = sio.loadmat(pthH)
csi_h = csi_h_dict.get('csi_h')
return csi_h
###############################
# GET CSI
def get_csi(num_UAV, location, x_u, y_u):
"""
This function updates the CSI location based on the changed location of drones.
:param num_UAV: Number of UAVs.
:param location: The initial location of drones and the fixed nodes.
:param x_u: The updated longitude of UAVs.
:param y_u: The updated latitude of UAVs.
:return: It returns an update numpy array for the CSI parameters.
"""
source_uav = 0
uav_fusion = 1
gtuser_uav = 2
uav_gruser = 3
X_U = x_u
X_S = location.get('X_S')
X_F = location.get('X_F')
X_GT = location.get('X_GT')
X_GR = location.get('X_GR')
Y_U = y_u
Y_S = location.get('Y_S')
Y_F = location.get('Y_F')
Y_GT = location.get('Y_GT')
Y_GR = location.get('Y_GR')
Z_U = location.get('Z_U')
Z_S = location.get('Z_S')
Z_F = location.get('Z_F')
Z_GT = location.get('Z_GT')
Z_GR = location.get('Z_GR')
dist_S_uav = [ssd.euclidean([X_S, Y_S, Z_S], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_S_uav = np.asarray(dist_S_uav)
dist_uav_F = [ssd.euclidean([X_F, Y_F, Z_F], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_uav_F = np.asarray(dist_uav_F)
dist_GT_uav = [ssd.euclidean([X_GT, Y_GT, Z_GT], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_GT_uav = np.asarray(dist_GT_uav)
dist_uav_GR = [ssd.euclidean([X_GR, Y_GR, Z_GR], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_uav_GR = np.asarray(dist_uav_GR)
dist_S_uav_Norm = dist_S_uav
dist_uav_F_Norm = dist_uav_F
dist_GT_uav_Norm = dist_GT_uav
dist_uav_GR_Norm = dist_uav_GR
h_S_uav = np.multiply(1 / (dist_S_uav_Norm ** 2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_S_uav = h_S_uav.T
h_uav_F = np.multiply(1 / (dist_uav_F_Norm ** 2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_uav_F = h_uav_F.T
h_GT_uav = np.multiply(1 / (dist_GT_uav_Norm ** 2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_GT_uav = h_GT_uav.T
h_uav_GR = np.multiply(1 / (dist_uav_GR_Norm ** 2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_uav_GR = h_uav_GR.T
csi_h = np.zeros([num_UAV, 4], dtype=complex)
csi_h[:, source_uav] = np.squeeze(h_S_uav)
csi_h[:, uav_fusion] = np.squeeze(h_uav_F)
csi_h[:, gtuser_uav] = np.squeeze(h_GT_uav)
csi_h[:, uav_gruser] = np.squeeze(h_uav_GR)
return csi_h
| 2.84375 | 3 |
ena-dts/framework/gdb.py | amzn/amzn-ec2-ena-utilities | 7 | 12768370 | <gh_stars>1-10
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from exception import TimeoutException
from utils import GREEN, RED, BLUE
class GDB(object):
PROMPT = "(gdb)"
def __init__(self, host, app, app_prompt):
self.host = host
self.app = app
self.app_prompt = app_prompt
def start(self, p=""):
self.send("gdb --args {} {}".format(self.app, p))
self.send("set confirm off")
self.send("set pagination off")
def send_twice(self, command, prompt):
out = ""
try:
out = self.host.send_expect(command, prompt)
except TimeoutException as te:
out += self.send_app("")
return out
def send(self, command):
return self.send_twice(command, GDB.PROMPT)
def send_app(self, command):
return self.send_twice(command, self.app_prompt)
def test_path(self, f_in, action, f_ret, ret_value, call_number):
o = self.add_tbreak(f_in)
if o != 0:
self.exit_gdb()
return -1
if action is None:
self.send("run")
else:
self.interact(action)
for i in range(call_number):
o = self.add_tbreak(f_ret)
if o != 0:
self.exit_gdb()
return -1
self.send("continue")
self.send("backtrace")
self.send("return {}".format(ret_value))
return 0
def continue_app(self):
return self.send_app("continue")
def continue_gdb(self):
return self.send("continue")
def interact(self, action):
pass
def exit_gdb(self):
self.host.send_expect("quit", self.host.prompt)
def add_tbreak(self, func_name):
out = self.send("tbreak {}".format(func_name))
if "not defined" in out:
print(RED("Cannot find {}".format(func_name)))
return -1
return 0
class TestpmdGDB(GDB):
TESTPMD_PROMPT = "testpmd>"
APP = "./x86_64-native-linuxapp-gcc/app/testpmd -c 0x3 -n 2 -- " \
"--portmask=0x1 --forward-mode=icmpecho -i -a"
def __init__(self, host):
super(TestpmdGDB, self).__init__(host, self.APP, self.TESTPMD_PROMPT)
def exit(self):
out = self.send("quit") # Press enter to exit testpmd non interactive mode
self.exit_gdb()
return out
def interact(self, action):
self.send_twice("run", self.TESTPMD_PROMPT)
for a in action[:-1]:
self.send_twice(a, self.TESTPMD_PROMPT)
self.send(action[-1])
| 2.46875 | 2 |
rqalpha/core/strategy_universe.py | terencehk/rqalpha | 1 | 12768371 | <reponame>terencehk/rqalpha<filename>rqalpha/core/strategy_universe.py
# -*- coding: utf-8 -*-
#
# Copyright 2019 Ricequant, Inc
#
# * Commercial Usage: please contact <EMAIL>
# * Non-Commercial Usage:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import json
import copy
from rqalpha.events import EVENT, Event
from rqalpha.environment import Environment
from rqalpha.model.instrument import Instrument
class StrategyUniverse(object):
def __init__(self):
self._set = set()
Environment.get_instance().event_bus.prepend_listener(EVENT.AFTER_TRADING, self._clear_de_listed)
def get_state(self):
return json.dumps(sorted(self._set)).encode('utf-8')
def set_state(self, state):
l = json.loads(state.decode('utf-8'))
self.update(l)
def update(self, universe):
if isinstance(universe, (six.string_types, Instrument)):
universe = [universe]
new_set = set(universe)
if new_set != self._set:
self._set = new_set
Environment.get_instance().event_bus.publish_event(Event(EVENT.POST_UNIVERSE_CHANGED, universe=self._set))
def get(self):
return copy.copy(self._set)
def _clear_de_listed(self, event):
de_listed = set()
env = Environment.get_instance()
for o in self._set:
i = env.data_proxy.instruments(o)
if i.de_listed_date <= env.trading_dt:
de_listed.add(o)
if de_listed:
self._set -= de_listed
env.event_bus.publish_event(Event(EVENT.POST_UNIVERSE_CHANGED, universe=self._set))
| 1.921875 | 2 |
spydrnet/parsers/verilog/parser.py | ganeshgore/spydrnet | 34 | 12768372 | # Copyright 2021 <NAME>, BYU CCL
# please see the BYU CCl SpyDrNet license file for terms of usage.
from spydrnet.parsers.verilog.tokenizer import VerilogTokenizer
import spydrnet.parsers.verilog.verilog_tokens as vt
from spydrnet.ir import Netlist, Library, Definition, Port, Cable, Instance, OuterPin
from spydrnet.plugins import namespace_manager
import spydrnet as sdn
from functools import reduce
import re
class VerilogParser:
'''
Parse verilog files into spydrnet.
Higher level functions will always peek when deciding what lower level function to call.
within your own function call next instead to keep the flow moving.
the first token to expect in a function will be the token that starts that construct.
'''
#########################################################
# Note to contributors
#########################################################
# I have tried to follow the convention that each function
# parses all of the construct it is designed to parse
# for example the parse module function will parse the
# module keyword and then will call the parse
# instance function. It will not consume any of the tokens
# that belong to the instance instantations including the
# semi colon. (it just uses peek)
#
# I would suggest following this convention even on constructs
# where the first word is always the same.
# the small overhead of using the peek function to not consume
# the token has been well worth making it easier to maintain.
# --Dallin
#########################################################
# helper classes
#########################################################
class BlackboxHolder:
'''this is an internal class that helps manage
modules that are instanced before they are declared'''
def __init__(self):
self.name_lookup = dict()
self.defined = set()
def get_blackbox(self, name):
'''creates or returns the black box based on the name'''
if name in self.name_lookup:
return self.name_lookup[name]
else:
definition = sdn.Definition()
definition.name = name
self.name_lookup[name] = definition
return definition
def define(self, name):
'''adds the name to the defined set'''
self.defined.add(self.name_lookup[name])
def get_undefined_blackboxes(self):
'''return an iterable of all undefined blackboxes'''
undef = set()
for v in self.name_lookup.values():
if v not in self.defined:
undef.add(v)
return undef
#######################################################
# setup functions
#######################################################
@staticmethod
def from_filename(filename):
parser = VerilogParser()
parser.filename = filename
return parser
@staticmethod
def from_file_handle(file_handle):
parser = VerilogParser()
parser.filename = file_handle
return parser
def __init__(self):
self.filename = None
self.tokenizer = None
self.netlist = None
self.current_library = None
self.current_definition = None
self.current_instance = None
self.primitives = None
self.work = None
self.assigns = None
self.assignment_count = 0
self.blackbox_holder = self.BlackboxHolder()
def parse(self):
''' parse a verilog netlist represented by verilog file
verilog_file can be a filename or stream
'''
self.initialize_tokenizer()
ns_default = namespace_manager.default
namespace_manager.default = "DEFAULT"
self.parse_verilog()
namespace_manager.default = ns_default
self.tokenizer.__del__()
return self.netlist
def initialize_tokenizer(self):
self.tokenizer = VerilogTokenizer(self.filename)
def peek_token(self):
token = self.peek_token_remove_comments()
if token[0] == '`':
t_split = token.split(maxsplit=1)
if len(t_split) > 1 and t_split[0] in [vt.IFDEF]:
while t_split[0] != vt.ENDIF:
token = self.next_token_remove_comments()
t_split = token.split(maxsplit=1)
token = self.peek_token_remove_comments()
if len(t_split) > 1 and t_split[0] == vt.DEFINE:
assert False, self.error_string(
"define not supported", "assumes all macros are undefined", vt.DEFINE)
return token
def next_token(self):
token = self.next_token_remove_comments()
if token[0] == '`':
t_split = token.split(maxsplit=1)
if len(t_split) > 1 and t_split[0] in [vt.IFDEF]:
while t_split[0] != vt.ENDIF:
token = self.next_token_remove_comments()
t_split = token.split(maxsplit=1)
token = self.next_token_remove_comments()
if len(t_split) > 1 and t_split[0] == vt.DEFINE:
assert False, self.error_string(
"define not supported", "assumes all macros are undefined", vt.DEFINE)
return token
def peek_token_remove_comments(self):
'''peeks from the tokenizer this wrapper function exists to skip comment tokens'''
token = self.tokenizer.peek()
while len(token) >= 2 and token[0] == "/" and (token[1] == "/" or token[1] == "*"):
# this is a comment token skip it
self.tokenizer.next()
token = self.tokenizer.peek()
return token
def next_token_remove_comments(self):
'''peeks from the tokenizer this wrapper function exists to skip comment tokens'''
token = self.tokenizer.next()
while len(token) >= 2 and (token[0:2] == vt.OPEN_LINE_COMMENT or token[0:2] == vt.OPEN_BLOCK_COMMENT):
# this is a comment token, skip it
token = self.tokenizer.next()
return token
#######################################################
# parsing functions
#######################################################
def parse_verilog(self):
self.netlist = sdn.Netlist()
self.netlist.name = "SDN_VERILOG_NETLIST"
self.work = self.netlist.create_library("work")
self.primitives = self.netlist.create_library("SDN.verilog_primitives")
self.current_library = self.work
preprocessor_defines = set()
star_properties = dict()
time_scale = None
primitive_cell = False
while self.tokenizer.has_next():
token = self.peek_token()
if token.split(maxsplit=1)[0] == vt.CELL_DEFINE:
primitive_cell = True
self.current_library = self.primitives
#token = token.split(maxsplit = 1)[1]
token = self.next_token()
elif token.split(maxsplit=1)[0] == vt.END_CELL_DEFINE:
primitive_cell = False
self.current_library = self.work
#token = token.split(maxsplit = 1)[1]
token = self.next_token()
elif token == vt.MODULE:
if primitive_cell:
self.parse_primitive()
else:
self.parse_module()
# go ahead and set the extra metadata that we collected to this point
if time_scale is not None:
self.current_definition["VERILOG.TimeScale"] = time_scale
if len(star_properties.keys()) > 0:
self.current_definition["VERILOG.InlineConstraints"] = star_properties
star_properties = dict()
elif token == vt.PRIMITIVE:
# self.parse_primitive()
# if time_scale is not None:
# self.current_definition["VERILOG.TimeScale"] = time_scale
# if len(star_properties.keys()) > 0:
# self.current_definition["VERILOG.InlineConstraints"] = star_properties
# star_properties = dict()
star_properties = dict()
while token != vt.END_PRIMITIVE:
token = self.next_token()
elif token == vt.DEFINE:
assert False, "Currently `define is not supported"
elif token == vt.IFDEF:
token = self.next_token()
token = self.next_token()
if token not in preprocessor_defines:
while token != vt.ENDIF:
token = self.next_token()
elif token == vt.OPEN_PARENTHESIS:
stars = self.parse_star_property()
for k, v in stars.items():
star_properties[k] = v
elif token.split(maxsplit=1)[0] == vt.TIMESCALE:
token = self.next_token()
time_scale = token.split(maxsplit=1)[1]
else:
pass
assert False, self.error_string(
"something at the top level of the file", "got unexpected token", token)
self.add_blackbox_definitions()
return self.netlist
def add_blackbox_definitions(self):
self.current_library = self.primitives
for d in self.blackbox_holder.get_undefined_blackboxes():
d["VERILOG.primitive"] = True
self.current_library.add_definition(d)
def parse_primitive(self):
'''similar to parse module but it will only look for the inputs and outputs to get an idea of how those things look'''
token = self.next_token()
assert token == vt.MODULE or token == vt.PRIMITIVE, self.error_string(
vt.MODULE, "to begin module statement", token)
token = self.next_token()
assert vt.is_valid_identifier(token), self.error_string(
"identifier", "not a valid module name", token)
name = token
definition = self.blackbox_holder.get_blackbox(name)
self.blackbox_holder.define(name)
self.current_library.add_definition(definition)
self.current_definition = definition
# uses the same header parser because the primitives and regular cells have the same header.
self.parse_module_header()
self.parse_primitive_body()
def parse_primitive_body(self):
''' just look for port information, skip tasks and functions to help out.'''
token = self.peek_token()
while token != vt.END_MODULE and token != vt.END_PRIMITIVE:
token = self.peek_token()
if token == vt.FUNCTION: # these constructs may contain input output or inout
while token != vt.END_FUNCTION:
token = self.next_token()
elif token == vt.TASK: # these constructs may contain input output or inout
while token != vt.END_TASK:
token = self.next_token()
elif token in vt.PORT_DIRECTIONS:
self.parse_port_declaration(dict())
else:
token = self.next_token()
def parse_module(self):
token = self.next_token()
assert token == vt.MODULE, self.error_string(
vt.MODULE, "to begin module statement", token)
token = self.next_token()
assert vt.is_valid_identifier(token), self.error_string(
"identifier", "not a valid module name", token)
name = token
definition = self.blackbox_holder.get_blackbox(name)
self.blackbox_holder.define(name)
self.current_library.add_definition(definition)
self.current_definition = definition
self.assignment_count = 0
if self.netlist.top_instance is None:
self.netlist.top_instance = sdn.Instance()
self.netlist.top_instance.name = definition.name + "_top"
self.netlist.top_instance.reference = definition
self.netlist.name = "SDN_VERILOG_NETLIST_" + definition.name
self.parse_module_header()
self.parse_module_body()
def parse_module_header(self):
'''parse a module header and add the parameter dictionary and port list to the current_definition'''
token = self.peek_token()
if token == "#":
self.parse_module_header_parameters()
token = self.peek_token()
assert token == "(", self.error_string("(", "for port mapping", token)
self.parse_module_header_ports()
token = self.next_token()
assert token == vt.CLOSE_PARENTHESIS, self.error_string(
vt.CLOSE_PARENTHESIS, "to end the module ports in the header", token)
token = self.next_token()
assert token == vt.SEMI_COLON, self.error_string(
vt.SEMI_COLON, "to end the module header section", token)
def parse_module_header_parameters(self):
'''parse a parameter block in a module header, add all parameters to the current definition'''
token = self.next_token()
assert token == vt.OCTOTHORP, self.error_string(
vt.OCTOTHORP, "to begin parameter map", token)
token = self.next_token()
assert token == vt.OPEN_PARENTHESIS, self.error_string(
vt.OPEN_PARENTHESIS, "to begin parameter map", token)
token = self.next_token()
parameter_dictionary = dict()
while token != ")":
# this is happening twice for all but the first one.. could simplify
assert token == vt.PARAMETER, self.error_string(
vt.PARAMETER, "parameter declaration", token)
key = ""
token = self.peek_token()
if token == vt.OPEN_BRACKET:
left, right = self.parse_brackets()
if right != None:
key = "[" + str(left) + ":" + str(right) + "] "
else:
key = "[" + str(left) + "] "
token = self.next_token()
assert vt.is_valid_identifier(token), self.error_string(
'identifer', "in parameter list", token)
key += token
token = self.next_token()
if key == vt.INTEGER:
key += " " + token
token = self.next_token()
assert token == vt.EQUAL, self.error_string(
vt.EQUAL, "in parameter list", token)
token = self.next_token()
# not really sure what to assert here.
value = token
parameter_dictionary[key] = value
token = self.next_token()
if token == vt.COMMA: # just keep going
token = self.next_token()
assert token == vt.PARAMETER, self.error_string(
vt.PARAMETER, "after comma in parameter map", token)
else:
assert token == vt.CLOSE_PARENTHESIS, self.error_string(
vt.CLOSE_PARENTHESIS, "to end parameter declarations", token)
self.set_definition_parameters(
self.current_definition, parameter_dictionary)
def parse_module_header_ports(self):
'''parse port declarations in the module header and add them to the definition'''
token = self.next_token()
assert token == vt.OPEN_PARENTHESIS, self.error_string(
vt.OPEN_PARENTHESIS, "to begin port declarations", token)
token = self.peek_token()
while token != ")":
# the first token could be a name or input output or inout
if token == ".":
self.parse_module_header_port_alias()
else:
self.parse_module_header_port()
token = self.peek_token()
if token != vt.CLOSE_PARENTHESIS:
assert token == vt.COMMA, self.error_string(
vt.COMMA, "to separate port declarations", token)
token = self.next_token() # consume the comma token
token = self.peek_token() # setup the next token
def parse_module_header_port_alias(self):
'''parse the port alias portion of the module header
this parses the port alias section so that the port name is only a port and the mapped wires are the cables names that connect to that port.
this requires that the cables names be kept in a dictionary to allow for setting the direction when the direction is given to the internal port names.
example syntax
.canale({\\canale[3] ,\\canale[2] ,\\canale[1] ,\\canale[0] }),'''
token = self.next_token()
assert token == vt.DOT, self.error_string(
vt.DOT, "for port aliasing", token)
token = self.next_token()
assert vt.is_valid_identifier(token), self.error_string(
"identifier", "for port in port aliasing", token)
name = token
token = self.next_token()
assert token == vt.OPEN_PARENTHESIS, self.error_string(
vt.OPEN_PARENTHESIS, "parethesis to enclose port aliasing", token)
token = self.peek_token()
if token == vt.OPEN_BRACE:
wires = self.parse_cable_concatenation()
else:
cable, left, right = self.parse_variable_instantiation()
wires = self.get_wires_from_cable(cable, left, right)
token = self.next_token()
assert token == vt.CLOSE_PARENTHESIS, self.error_string(
vt.CLOSE_PARENTHESIS, "parethesis to end port aliasing construct", token)
port = self.create_or_update_port(
name, left_index=len(wires)-1, right_index=0)
# connect the wires to the pins
assert len(port.pins) == len(
wires), "Internal Error: the pins in a created port and the number of wires the aliased cable do not match up"
for i in range(len(port.pins)):
wires[i].connect_pin(port.pins[i])
def parse_cable_concatenation(self):
'''parse a concatenation structure of cables, create the cables mentioned, and deal with indicies
return a list of ordered wires that represents the cable concatenation
example syntax
{wire1, wire2, wire3, wire4}'''
token = self.next_token()
assert token == vt.OPEN_BRACE, self.error_string(
vt.OPEN_BRACE, "to start cable concatenation", token)
token = self.peek_token()
wires = []
while token != vt.CLOSE_BRACE:
cable, left, right = self.parse_variable_instantiation()
wires_temp = self.get_wires_from_cable(cable, left, right)
for w in wires_temp:
wires.append(w)
token = self.next_token()
if token != vt.COMMA:
assert token == vt.CLOSE_BRACE, self.error_string(
vt.CLOSE_BRACE, "to end cable concatenation", token)
return wires
def parse_module_header_port(self):
'''parse the port declaration in the module header'''
token = self.peek_token()
direction = None
defining = False
if token in vt.PORT_DIRECTIONS:
token = self.next_token()
direction = vt.string_to_port_direction(token)
token = self.peek_token()
defining = True
left = None
right = None
if token == vt.OPEN_BRACKET:
left, right = self.parse_brackets()
token = self.next_token()
assert vt.is_valid_identifier(token), self.error_string(
"identifier", "for port declaration", token)
name = token
port = self.create_or_update_port(
name, left_index=left, right_index=right, direction=direction, defining=defining)
# get the left and right out of the port (in case we got more information out of an instance?)
if left == None and right == None:
left = port.lower_index + len(port.pins) - 1
right = port.lower_index
if not port.is_downto:
temp = left
left = right
right = temp
cable = self.create_or_update_cable(
name, left_index=left, right_index=right, defining=defining)
# wire together the cables and the port
assert len(port.pins) == len(cable.wires), self.error_string(
"the pins in a created port and the number of wires in it's cable do not match up", "wires: " + str(len(cable.wires)), "pins: " + str(len(port.pins)))
for i in range(len(port.pins)):
cable.wires[i].connect_pin(port.pins[i])
def parse_module_body(self):
'''
parse through a module body
module bodies consist of port declarations,
wire and reg declarations
and instantiations
expects port declarations to start with the direction and then include the cable type if provided
'''
direction_tokens = [vt.INPUT, vt.OUTPUT, vt.INOUT]
variable_tokens = [vt.WIRE, vt.REG, vt.TRI0, vt.TRI1]
token = self.peek_token()
params = dict()
while token != vt.END_MODULE:
if token in direction_tokens:
self.parse_port_declaration(params)
params = dict()
elif token in variable_tokens:
self.parse_cable_declaration(params)
params = dict()
elif token == vt.ASSIGN:
o_cable, o_left, o_right, i_cable, i_left, i_right = self.parse_assign()
self.connect_wires_for_assign(
o_cable, o_left, o_right, i_cable, i_left, i_right)
elif token == vt.DEFPARAM:
self.parse_defparam_parameters()
elif vt.is_valid_identifier(token):
self.parse_instantiation(params)
params = dict()
elif token == vt.OPEN_PARENTHESIS:
stars = self.parse_star_property()
for k, v in stars.items():
params[k] = v
else:
assert False, self.error_string(
"direction, reg, wire, star_properties, or instance identifier", "in module body", token)
token = self.peek_token()
token = self.next_token()
assert token == vt.END_MODULE, self.error_string(
vt.END_MODULE, "to end the module body", token)
def parse_port_declaration(self, properties):
'''parse the port declaration post port list.'''
token = self.next_token()
assert token in vt.PORT_DIRECTIONS, self.error_string(
"direction keyword", "to define port", token)
direction = vt.string_to_port_direction(token)
token = self.peek_token()
if token in [vt.REG, vt.WIRE]:
var_type = token
token = self.next_token()
else:
var_type = None
token = self.peek_token()
if token == vt.OPEN_BRACKET:
left, right = self.parse_brackets()
else:
left = None
right = None
token = self.next_token()
assert vt.is_valid_identifier(token), self.error_string(
"port identifier", "identify port", token)
names = []
names.append(token)
token = self.next_token()
while token == vt.COMMA:
token = self.next_token()
names.append(token)
token = self.next_token()
assert token == vt.SEMI_COLON, self.error_string(
vt.SEMI_COLON, "to end port declaration", token)
for name in names:
cable = self.create_or_update_cable(
name, left_index=left, right_index=right, var_type=var_type, defining=True)
port_list = self.get_all_ports_from_wires(
self.get_wires_from_cable(cable, left, right))
assert len(port_list) > 0, self.error_string(
"port name defined in the module header", "to declare a port", cable.name)
if len(port_list) > 1:
for p in port_list:
port = self.create_or_update_port(
p.name, direction=direction)
port["VERILOG.InlineConstraints"] = properties
else:
port = self.create_or_update_port(port_list.pop(
).name, left_index=left, right_index=right, direction=direction, defining=True)
if len(cable.wires) > 1:
self.connect_resized_port_cable(cable, port)
def parse_cable_declaration(self, properties):
token = self.next_token()
assert token in [vt.REG, vt.WIRE, vt.TRI0, vt.TRI1], self.error_string(
"reg, tri1, tri0, or wire", "for cable declaration", token)
var_type = token
token = self.peek_token()
if token == vt.OPEN_BRACKET:
left, right = self.parse_brackets()
else:
left = None
right = None
token = self.next_token()
assert vt.is_valid_identifier(token), self.error_string(
"valid cable identifier", "identify the cable", token)
name = token
cable = self.create_or_update_cable(
name, left_index=left, right_index=right, var_type=var_type)
cable["VERILOG.InlineConstraints"] = properties
token = self.next_token()
assert token == vt.SEMI_COLON, self.error_string(
vt.SEMI_COLON, "to end cable declaration", token)
def parse_instantiation(self, properties):
token = self.next_token()
assert vt.is_valid_identifier(token), self.error_string(
"module identifier", "for instantiation", token)
def_name = token
parameter_dict = dict()
token = self.peek_token()
if token == vt.OCTOTHORP:
parameter_dict = self.parse_parameter_mapping()
token = self.next_token()
assert vt.is_valid_identifier(token), self.error_string(
"instance name", "for instantiation", token)
name = token
token = self.peek_token()
assert token == vt.OPEN_PARENTHESIS, self.error_string(
vt.OPEN_PARENTHESIS, "to start port to cable mapping", token)
instance = self.current_definition.create_child()
self.current_instance = instance
instance.name = name
instance.reference = self.blackbox_holder.get_blackbox(def_name)
instance["VERILOG.InlineConstraints"] = properties
self.parse_port_mapping()
self.set_instance_parameters(instance, parameter_dict)
token = self.next_token()
assert token == vt.SEMI_COLON, self.error_string(
vt.SEMI_COLON, "to end instatiation", token)
def parse_defparam_parameters(self):
'''parse a defparam structure and add the parameters to the associated instance
this looks like:
defparam \\avs_s1_readdata[12]~output .bus_hold = "false"; //single backslash to escape name
and must come after the associated instance (I'm not sure this is the verilog spec but
it is the way quartus wrote my example and is much simpler)
'''
params = dict()
token = self.next_token()
assert token == vt.DEFPARAM, self.error_string(vt.DEFPARAM, "to being defparam statement", token)
token = self.next_token()
assert vt.is_valid_identifier(token), self.error_string("valid identifier", "of an instance to apply the defparam to", token)
instance_name = token
if self.current_instance.name == instance_name:
instance = self.current_instance
else:
instance = next(self.current_definition.get_instances(instance_name), None)
assert instance is not None, self.error_string("identifer of existing instance", "within the current definition", instance_name)
token = self.next_token()
assert token == vt.DOT, self.error_string(vt.DOT, "give separate parameter key from the instance name", token)
token = self.next_token()
key = token
token = self.next_token()
assert token == vt.EQUAL, self.error_string(vt.EQUAL, "separate the key from the value in a defparam statement", token)
token = self.next_token()
value = token
token = self.next_token()
assert token == vt.SEMI_COLON, self.error_string(vt.SEMI_COLON, "to end the defparam statement", token)
self.set_instance_parameters(instance, params)
def parse_parameter_mapping(self):
params = dict()
token = self.next_token()
assert token == vt.OCTOTHORP, self.error_string(
vt.OCTOTHORP, "to begin parameter mapping", token)
token = self.next_token()
assert token == vt.OPEN_PARENTHESIS, self.error_string(
vt.OPEN_PARENTHESIS, "after # to begin parameter mapping", token)
while token != vt.CLOSE_PARENTHESIS:
k, v = self.parse_parameter_map_single()
params[k] = v
token = self.next_token()
assert token in [vt.CLOSE_PARENTHESIS, vt.COMMA], self.error_string(
vt.COMMA + " or " + vt.CLOSE_PARENTHESIS, "to separate parameters or end parameter mapping", token)
assert token == vt.CLOSE_PARENTHESIS, self.error_string(
vt.CLOSE_PARENTHESIS, "to terminate ", token)
return params
def parse_parameter_map_single(self):
# syntax looks like .identifier(value)
token = self.next_token()
assert token == vt.DOT, self.error_string(
vt.DOT, "to begin parameter mapping", token)
token = self.next_token()
assert vt.is_valid_identifier(token), self.error_string(
"valid parameter identifier", "in parameter mapping", token)
k = token
token = self.next_token()
assert token == vt.OPEN_PARENTHESIS, self.error_string(
vt.OPEN_PARENTHESIS, "after identifier in parameter mapping", token)
token = self.next_token()
v = token
token = self.next_token()
assert token == vt.CLOSE_PARENTHESIS, self.error_string(
vt.CLOSE_PARENTHESIS, "to close the parameter mapping value", token)
return k, v
def parse_port_mapping(self):
token = self.next_token()
assert token == vt.OPEN_PARENTHESIS, self.error_string(
vt.OPEN_PARENTHESIS, "to start the port mapping", token)
while token != vt.CLOSE_PARENTHESIS:
self.parse_port_map_single()
token = self.next_token()
assert token in [vt.COMMA, vt.CLOSE_PARENTHESIS], self.error_string(
vt.COMMA + " or " + vt.CLOSE_PARENTHESIS, "between port mapping elements or to end the port mapping", token)
def parse_port_map_single(self):
'''acutally does the mapping of the pins'''
token = self.next_token()
assert token == vt.DOT, self.error_string(
vt.DOT, "to start a port mapping instance", token)
token = self.next_token()
assert vt.is_valid_identifier(token), self.error_string(
"valid port identifier", "for port in instantiation port map", token)
port_name = token
token = self.next_token()
assert token == vt.OPEN_PARENTHESIS, self.error_string(
vt.OPEN_PARENTHESIS, "to encapsulate cable name in port mapping", token)
token = self.peek_token()
if token != vt.CLOSE_PARENTHESIS:
if token == vt.OPEN_BRACE:
wires = self.parse_cable_concatenation()
else:
cable, left, right = self.parse_variable_instantiation()
wires = self.get_wires_from_cable(cable, left, right)
pins = self.create_or_update_port_on_instance(
port_name, len(wires))
assert len(pins) >= len(wires), self.error_string(
"pins length to match or exceed cable.wires length", "INTERNAL ERROR", str(len(pins)) + "!=" + str(len(wires)))
# there can be unconnected pins at the end of the port.
for i in range(len(wires)):
wires[i].connect_pin(pins[i])
token = self.next_token()
else:
# consume the )
token = self.next_token()
# the port is intentionally left unconnected.
self.create_or_update_port_on_instance(port_name, 1)
assert token == vt.CLOSE_PARENTHESIS, self.error_string(
vt.CLOSE_PARENTHESIS, "to end cable name in port mapping", token)
def parse_assign(self):
token = self.next_token()
assert token == vt.ASSIGN, self.error_string(
vt.ASSIGN, "to begin assignment statement", token)
l_cable, l_left, l_right = self.parse_variable_instantiation()
token = self.next_token()
assert token == vt.EQUAL, self.error_string(
vt.EQUAL, "in assigment statment", token)
r_cable, r_left, r_right = self.parse_variable_instantiation()
token = self.next_token()
assert token == vt.SEMI_COLON, self.error_string(
vt.SEMI_COLON, "to terminate assign statement", token)
return l_cable, l_left, l_right, r_cable, r_left, r_right
def parse_variable_instantiation(self):
'''parse the cable name and its indicies if any
if we are in Intel land then 2 other things can happen.
the "cable" is a constant,
attach it to the \\<const0> or \\<const1> cable.
the cable is inverted,
create a new cable and an inverter block similar to the assign but with an inversion in the block
'''
token = self.next_token()
if token[0] == "1":
assert token[1] == vt.SINGLE_QUOTE, self.error_string(vt.SINGLE_QUOTE, "in the constant", token)
assert token[2] == 'b', self.error_string('b', "in the constant", token)
assert token[3] in ["0", "1", "x", "X", "z", "Z"], self.error_string("one of 0, 1, x, X, z, Z", "represent the constant value after '", token)
name = "\\<const" + token[2] + "> "
elif vt.is_numeric(token[0]):
assert False, self.error_string("single bit constant", "multibit constants not supported", token)
else:
name = token
assert vt.is_valid_identifier(name), self.error_string(
"valid port identifier", "for port in instantiation port map", name)
token = self.peek_token()
left = None
right = None
if token == vt.OPEN_BRACKET:
left, right = self.parse_brackets()
cable = self.create_or_update_cable(
name, left_index=left, right_index=right)
return cable, left, right
def parse_brackets(self):
'''returns 2 integer values or 1 integer value and none'''
token = self.next_token()
assert token == vt.OPEN_BRACKET, self.error_string(
"[", "to begin array slice", token)
token = self.next_token()
assert self.is_numeric(token), self.error_string(
"number", "after [", token)
left = int(token)
token = self.next_token()
if token == "]":
return left, None
else:
assert(token == vt.COLON), self.error_string(
"] or :", "in array slice", token)
token = self.next_token()
assert(self.is_numeric(token)), self.error_string(
"number", "after : in array slice", token)
right = int(token)
token = self.next_token()
assert token == vt.CLOSE_BRACKET, self.error_string(
"]", "to terminate array slice", token)
return left, right
def parse_star_property(self):
token = self.next_token()
assert token == vt.OPEN_PARENTHESIS, self.error_string(
vt.OPEN_PARENTHESIS, "to begin star property", token)
token = self.next_token()
assert token == vt.STAR, self.error_string(
vt.STAR, "to begin star property", token)
properties_dict = dict()
token = self.next_token()
while token != vt.STAR:
assert vt.is_valid_identifier(token)
key = token
token = self.next_token()
assert token in [vt.EQUAL, vt.STAR, vt.COMMA], self.error_string(
vt.EQUAL + " or " + vt.STAR + " or " + vt.COMMA, "to set a star parameter", token)
if token == vt.EQUAL:
token = self.next_token()
value = ""
while token != vt.STAR and token != vt.COMMA:
value += token
token = self.next_token()
else:
value = None
properties_dict[key] = value
if token != vt.STAR:
token = self.next_token()
assert token == vt.STAR, self.error_string(
vt.STAR, "to start the ending of a star property", token)
token = self.next_token()
assert token == vt.CLOSE_PARENTHESIS, self.error_string(
vt.CLOSE_PARENTHESIS, "to end the star property", token)
return properties_dict
#######################################################
# assignment helpers
#######################################################
def get_assignment_library(self):
'''create or return a previously created assignment library'''
if self.assigns == None:
self.assigns = self.netlist.create_library(
name="SDN_VERILOG_ASSIGNMENT")
return self.assigns
def get_assignment_definition(self, width):
'''get the definition of the specified width for assignments'''
proposed_name = "SDN_VERILOG_ASSIGNMENT_" + str(width)
library = self.get_assignment_library()
definition = next(library.get_definitions(proposed_name), None)
if definition == None:
definition = library.create_definition(name=proposed_name)
in_port = definition.create_port("i")
out_port = definition.create_port("o")
in_port.create_pins(width)
out_port.create_pins(width)
in_port.direction = sdn.Port.Direction.IN
out_port.direction = sdn.Port.Direction.OUT
cable = definition.create_cable("through")
cable.create_wires(width)
for i in range(width):
cable.wires[i].connect_pin(in_port.pins[i])
cable.wires[i].connect_pin(out_port.pins[i])
return definition
def create_assignment_instance(self, width):
'''create a new assign instance of the specified width on the current definition'''
definition = self.get_assignment_definition(width)
instance_name = definition.name + "_" + str(self.assignment_count)
self.assignment_count += 1
instance = self.current_definition.create_child(instance_name)
instance.reference = definition
return instance
def connect_wires_for_assign(self, l_cable, l_left, l_right, r_cable, r_left, r_right):
'''connect the wires in r_left to the wires in l_left'''
out_wires = self.get_wires_from_cable(l_cable, l_left, l_right)
in_wires = self.get_wires_from_cable(r_cable, r_left, r_right)
# min because we don't need extra pins since only what can will assign.
width = min(len(out_wires), len(in_wires))
instance = self.create_assignment_instance(width)
in_port = next(instance.reference.get_ports('i'), None)
out_port = next(instance.reference.get_ports('o'), None)
in_pins = self.get_pins_by_port_from_instance(instance, in_port)
out_pins = self.get_pins_by_port_from_instance(instance, out_port)
for i in range(width):
out_wires[i].connect_pin(out_pins[i])
in_wires[i].connect_pin(in_pins[i])
#######################################################
# helper functions
#######################################################
def get_pins_by_port_from_instance(self, instance, port):
pin_lookup = instance.pins
pins_out = []
for p in port.pins:
pins_out.append(pin_lookup[p])
return pins_out
def set_instance_parameters(self, instance, params):
for k, v in params.items():
#self.set_single_parameter(instance.reference, k, None)
self.set_single_parameter(instance, k, v)
def set_definition_parameters(self, definition, params):
for k, v in params.items():
self.set_single_parameter(definition, k, v)
def set_single_parameter(self, var, k, v):
if "VERILOG.Parameters" not in var:
var["VERILOG.Parameters"] = dict()
if k not in var["VERILOG.Parameters"] or var["VERILOG.Parameters"][k] is None:
var["VERILOG.Parameters"][k] = v
def get_all_ports_from_wires(self, wires):
'''gets all ports associated with a set of wires'''
ports = set()
for w in wires:
for p in w.pins:
if isinstance(p, sdn.InnerPin):
ports.add(p.port)
return ports
def get_wires_from_cable(self, cable, left, right):
wires = []
if left != None and right != None:
left = left - cable.lower_index
right = right - cable.lower_index
temp_wires = cable.wires[min(left, right): max(left, right) + 1]
if left > right:
temp_wires = reversed(temp_wires)
for w in temp_wires:
wires.append(w)
elif left != None or right != None:
if left != None:
index = left - cable.lower_index
else:
index = right - cable.lower_index
wires.append(cable.wires[index])
else:
for w in cable.wires:
wires.append(w)
return wires
def convert_string_to_port_direction(self, token):
if token == vt.INPUT:
return sdn.Port.Direction.IN
if token == vt.INOUT:
return sdn.Port.Direction.INOUT
if token == vt.OUTPUT:
return sdn.Port.Direction.OUT
else:
return sdn.Port.Direction.UNDEFINED
########################################################################################
# Port and cable creation and update managment
########################################################################################
'''I'm handed a few different possible senarios
module name(port1, port2,...);
input [3:0] port1
output [3:0] port2
endmodule
or
module name
(
input [3:0] port1,
output[3:0] port2,
...
);
additionally i need to be aware of the possibility that something like this happens
module name(.port1({cable1, cable2}));
input [1:0] cable1;
output [1:0] cable2;
'''
def connect_resized_port_cable(self, resized_cable, resized_port):
'''One to one connector. Don't use with alias statements. this expects that the given cable should completely fill the port...
after a cable has been updated that is attached to a port it may need to update the port and reconnect the'''
assert len(resized_cable.wires) == len(resized_port.pins), self.error_string("cable and port to have same size",
"to reconnect expanded cables and ports", "wires: " + str(len(resized_cable.wires)) + " pins: " + str(len(resized_port.pins)))
for i in range(len(resized_port.pins)):
# I think these should be lined up right?
if resized_port.pins[i] not in resized_cable.wires[i].pins:
resized_cable.wires[i].connect_pin(resized_port.pins[i])
def create_or_update_cable(self, name, left_index=None, right_index=None, var_type=None, defining=False):
cable_generator = self.current_definition.get_cables(name)
cable = next(cable_generator, None)
if cable == None:
cable = self.current_definition.create_cable()
self.populate_new_cable(
cable, name, left_index, right_index, var_type)
return cable
assert cable.name == name
cable_lower = cable.lower_index
# -1 so that it is the same number if the width is 1
cable_upper = cable.lower_index + len(cable.wires) - 1
if left_index is not None and right_index is not None:
in_lower = min(left_index, right_index)
in_upper = max(left_index, right_index)
elif left_index is not None:
in_lower = left_index
in_upper = left_index
elif right_index is not None:
in_upper = right_index
in_lower = right_index
else:
in_upper = None
in_lower = None
if defining and in_lower is not None: # if the cable width is being defined then recenter the cable
cable.lower_index = in_lower
cable_lower = cable.lower_index
cable_upper = cable.lower_index + len(cable.wires) - 1
if in_upper is not None and in_lower is not None:
if in_lower < cable_lower:
prepend = cable_lower - in_lower
self.prepend_wires(cable, prepend)
if in_upper > cable_upper:
postpend = in_upper - cable_upper
self.postpend_wires(cable, postpend)
if var_type is not None:
cable["VERILOG.CableType"] = var_type
return cable
def populate_new_cable(self, cable, name, left_index, right_index, var_type):
cable.name = name
if left_index is not None and right_index is not None:
cable.is_downto = right_index <= left_index
cable.create_wires(max(left_index, right_index) -
min(left_index, right_index) + 1)
cable.lower_index = min(left_index, right_index)
elif left_index is not None:
cable.lower_index = left_index
cable.create_wire()
elif right_index is not None:
cable.lower_index = right_index
cable.create_wire()
else:
cable.lower_index = 0
cable.create_wire()
if var_type:
cable["VERILOG.CableType"] = var_type
return cable
def prepend_wires(self, cable, count):
orig_count = len(cable.wires)
cable.create_wires(count)
cable.wires = cable.wires[orig_count:] + cable.wires[:orig_count]
cable.lower_index = cable.lower_index - count
def postpend_wires(self, cable, count):
cable.create_wires(count)
def create_or_update_port_on_instance(self, name, width):
'''returns the set of pins associated with the port on the instance'''
pins = []
port = self.create_or_update_port(
name, left_index=width - 1, right_index=0, definition=self.current_instance.reference)
for pin in self.current_instance.pins:
if pin.inner_pin in port.pins:
pins.append(pin)
return pins
def create_or_update_port(self, name, left_index=None, right_index=None, direction=None, definition=None, defining=False):
if definition == None:
definition = self.current_definition
port_generator = definition.get_ports(name)
port = next(port_generator, None)
if port == None:
port = definition.create_port()
self.populate_new_port(
port, name, left_index, right_index, direction)
return port
assert port.name == name
# figure out what we need to do with the indicies
port_lower = port.lower_index
# -1 so that it is the same number if the width is 1
port_upper = port.lower_index + len(port.pins) - 1
if left_index is not None and right_index is not None:
in_lower = min(left_index, right_index)
in_upper = max(left_index, right_index)
elif left_index is not None:
in_lower = left_index
in_upper = left_index
elif right_index is not None:
in_upper = right_index
in_lower = right_index
else:
in_upper = None
in_lower = None
if defining and in_lower is not None: # if the cable width is being defined then recenter the cable
port.lower_index = in_lower
port_lower = port.lower_index
port_upper = port.lower_index + len(port.pins) - 1
if in_upper is not None and in_lower is not None:
if in_lower < port_lower:
prepend = port_lower - in_lower
self.prepend_pins(port, prepend)
if in_upper > port_upper:
postpend = in_upper - port_upper
self.postpend_pins(port, postpend)
if direction is not None:
port.direction = direction
return port
def populate_new_port(self, port, name, left_index, right_index, direction):
port.name = name
if left_index is not None and right_index is not None:
port.is_downto = right_index <= left_index
port.create_pins(max(left_index, right_index) -
min(left_index, right_index) + 1)
port.lower_index = min(left_index, right_index)
elif left_index is not None:
port.lower_index = left_index
port.create_pin()
elif right_index is not None:
port.lower_index = right_index
port.create_pin()
else:
port.lower_index = 0
port.create_pin()
if direction is not None:
port.direction = direction
return port
def prepend_pins(self, port, count):
orig_count = len(port.pins)
port.create_pins(count)
port.pins = port.pins[orig_count:] + port.pins[:orig_count]
port.lower_index = port.lower_index - count
def postpend_pins(self, port, count):
port.create_pins(count)
def is_numeric(self, token):
first = True
for c in token:
if first:
first = False
if c == "-":
continue
if c not in vt.NUMBERS:
return False
return True
def is_alphanumeric(self, token):
for c in token:
if c not in vt.NUMBERS and c not in vt.LETTERS:
return False
return True
def error_string(self, expected, why, result):
'''put in the expectation and then the reason or location and the actual result'''
return "expected " + str(expected) + " " + why + " but got " + str(result) + " Line: " + str(self.tokenizer.line_number)
| 2.25 | 2 |
scripts/vrf_scripts/request_randomness.py | coozebra/chainlink-mix | 9 | 12768373 | #!/usr/bin/python3
import os
from brownie import VRFConsumer, accounts, config
STATIC_SEED = 123
def main():
dev = accounts.add(os.getenv(config['wallets']['from_key']))
# Get the most recent PriceFeed Object
vrf_contract = VRFConsumer[len(VRFConsumer) - 1]
vrf_contract.getRandomNumber(STATIC_SEED, {'from': dev})
| 2.09375 | 2 |
furnace/datasets/voc/voc.py | Yongjin-colin-choi/TorchSemiSeg | 1,439 | 12768374 | <filename>furnace/datasets/voc/voc.py
#!/usr/bin/env python3
# encoding: utf-8
# @Time : 2017/12/16 下午8:41
# @Author : yuchangqian
# @Contact : <EMAIL>
# @File : mclane.py
from datasets.BaseDataset import BaseDataset
class VOC(BaseDataset):
@classmethod
def get_class_colors(*args):
return [[0, 0, 0], [0, 0, 128], [0, 128, 0], [0, 128, 128],
[128, 0, 0], [128, 0, 128], [128, 128, 0],
[128, 128, 128],
[0, 0, 64], [0, 0, 192], [0, 128, 64],
[0, 128, 192],
[128, 0, 64], [128, 0, 192], [128, 128, 64],
[128, 128, 192], [0, 64, 0], [0, 64, 128],
[0, 192, 0],
[0, 192, 128], [128, 64, 0], ]
@classmethod
def get_class_names(*args):
return ['background', 'aeroplane', 'bicycle', 'bird',
'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable',
'dog', 'horse', 'motorbike', 'person',
'pottedplant',
'sheep', 'sofa', 'train', 'tv/monitor']
if __name__ == "__main__":
data_setting = {'img_root': '/unsullied/sharefs/g:research_detection/GeneralDetection/VOC/VOC/VOC2012_AUG/',
'gt_root': '/unsullied/sharefs/g:research_detection/GeneralDetection/VOC/VOC/VOC2012_AUG',
'train_source': '/unsullied/sharefs/g:research_detection/GeneralDetection/VOC/VOC/VOC2012_AUG/config/train.txt',
'eval_source': '/unsullied/sharefs/g:research_detection/GeneralDetection/VOC/VOC/VOC2012_AUG/config/val.txt'}
voc = VOC(data_setting, 'train', None)
print(voc.get_class_names())
print(voc.get_length())
print(next(iter(voc)))
| 2.390625 | 2 |
examples/shared/dqn/networks.py | MushroomRL/mushroom-rl-meta | 0 | 12768375 | <reponame>MushroomRL/mushroom-rl-meta
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class AtariNetwork(nn.Module):
n_features = 512
def __init__(self, input_shape, _, n_actions_per_head, use_cuda, n_games,
features, dropout):
super().__init__()
self._n_input = input_shape
self._n_games = n_games
self._max_actions = max(n_actions_per_head)[0]
self._features = features
self._use_cuda = use_cuda
self._n_shared = 2
self._h1 = nn.ModuleList(
[nn.Conv2d(self._n_input[0], 32, kernel_size=8, stride=4) for _ in range(
self._n_games)]
)
self._h2 = nn.ModuleList(
[nn.Conv2d(32, 64, kernel_size=4, stride=2) for _ in range(
self._n_games)]
)
self._h3 = nn.ModuleList(
[nn.Conv2d(64, 64, kernel_size=3, stride=1) for _ in range(
self._n_games)]
)
self._h4 = nn.Linear(3136, self.n_features)
self._h5 = nn.ModuleList(
[nn.Linear(self.n_features, self._max_actions) for _ in range(
self._n_games)]
)
nn.init.xavier_uniform_(self._h4.weight,
gain=nn.init.calculate_gain('relu'))
for i in range(self._n_games):
nn.init.xavier_uniform_(self._h1[i].weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h2[i].weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h3[i].weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h5[i].weight,
gain=nn.init.calculate_gain('linear'))
def forward(self, state, action=None, idx=None):
state = state.float() / 255.
h = list()
for i in np.unique(idx):
idxs = np.argwhere(idx == i).ravel()
h_f = F.relu(
self._h1[i](state[idxs, :self._n_input[0]])
)
h_f = F.relu(self._h2[i](h_f))
h.append(F.relu(self._h3[i](h_f)))
cat_h3 = torch.cat(h)
if self._features == 'relu':
h_f = F.relu(self._h4(cat_h3.view(-1, 3136)))
elif self._features == 'sigmoid':
h_f = torch.sigmoid(self._h4(cat_h3.view(-1, 3136)))
else:
raise ValueError
q = [self._h5[i](h_f) for i in range(self._n_games)]
q = torch.stack(q, dim=1)
if action is not None:
action = action.long()
q_acted = torch.squeeze(
q.gather(2, action.repeat(1, self._n_games).unsqueeze(-1)), -1)
q = q_acted
if idx is not None:
idx = torch.from_numpy(idx)
if self._use_cuda:
idx = idx.cuda()
if q.dim() == 2:
q_idx = q.gather(1, idx.unsqueeze(-1))
else:
q_idx = q.gather(1, idx.view(-1, 1).repeat(
1, self._max_actions).unsqueeze(1))
q = torch.squeeze(q_idx, 1)
return q
def get_shared_weights(self):
p1 = list()
for p in self._h4.parameters():
p1.append(p.data.detach().cpu().numpy())
return p1
def set_shared_weights(self, weights):
w1 = weights
for p, w in zip(self._h4.parameters(), w1):
w_tensor = torch.from_numpy(w).type(p.data.dtype)
if self._use_cuda:
w_tensor = w_tensor.cuda()
p.data = w_tensor
def freeze_shared_weights(self):
for p in self._h4.parameters():
p.requires_grad = False
def unfreeze_shared_weights(self):
for p in self._h4.parameters():
p.requires_grad = True
class GymNetwork(nn.Module):
def __init__(self, input_shape, _, n_actions_per_head, use_cuda, features,
dropout, n_features=80):
super().__init__()
self._n_input = input_shape
self._n_games = len(n_actions_per_head)
self._max_actions = max(n_actions_per_head)[0]
self._use_cuda = use_cuda
self._n_shared = 4
self._features = features
self._h1 = nn.ModuleList(
[nn.Linear(self._n_input[i][0], n_features) for i in range(
len(input_shape))]
)
self._h2 = nn.Linear(n_features, n_features)
self._h3 = nn.Linear(n_features, n_features)
self._h4 = nn.ModuleList(
[nn.Linear(n_features, self._max_actions) for _ in range(
self._n_games)]
)
nn.init.xavier_uniform_(self._h2.weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h3.weight,
gain=nn.init.calculate_gain('relu'))
for i in range(self._n_games):
nn.init.xavier_uniform_(self._h1[i].weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h4[i].weight,
gain=nn.init.calculate_gain('linear'))
def forward(self, state, action=None, idx=None):
state = state.float()
h1 = list()
for i in np.unique(idx):
idxs = np.argwhere(idx == i).ravel()
h1.append(F.relu(self._h1[i](state[idxs, :self._n_input[i][0]])))
cat_h1 = torch.cat(h1)
h_f = F.relu(self._h2(cat_h1))
if self._features == 'relu':
h_f = F.relu(self._h3(h_f))
elif self._features == 'sigmoid':
h_f = torch.sigmoid(self._h3(h_f))
else:
raise ValueError
q = [self._h4[i](h_f) for i in range(self._n_games)]
q = torch.stack(q, dim=1)
if action is not None:
action = action.long()
q_acted = torch.squeeze(
q.gather(2, action.repeat(1, self._n_games).unsqueeze(-1)), -1)
q = q_acted
if idx is not None:
idx = torch.from_numpy(idx)
if self._use_cuda:
idx = idx.cuda()
if q.dim() == 2:
q_idx = q.gather(1, idx.unsqueeze(-1))
else:
q_idx = q.gather(1, idx.view(-1, 1).repeat(
1, self._max_actions).unsqueeze(1))
q = torch.squeeze(q_idx, 1)
return q
def get_shared_weights(self):
p2 = list()
p3 = list()
for p in self._h2.parameters():
p2.append(p.data.detach().cpu().numpy())
for p in self._h3.parameters():
p3.append(p.data.detach().cpu().numpy())
return p2, p3
def set_shared_weights(self, weights):
w2, w3 = weights
for p, w in zip(self._h2.parameters(), w2):
w_tensor = torch.from_numpy(w).type(p.data.dtype)
if self._use_cuda:
w_tensor = w_tensor.cuda()
p.data = w_tensor
for p, w in zip(self._h3.parameters(), w3):
w_tensor = torch.from_numpy(w).type(p.data.dtype)
if self._use_cuda:
w_tensor = w_tensor.cuda()
p.data = w_tensor
def freeze_shared_weights(self):
for p in self._h2.parameters():
p.requires_grad = False
for p in self._h3.parameters():
p.requires_grad = False
def unfreeze_shared_weights(self):
for p in self._h2.parameters():
p.requires_grad = True
for p in self._h3.parameters():
p.requires_grad = True
| 2.34375 | 2 |
ANNClassification.py | UniqueSud/AffectiveFilmStimuliDataset | 0 | 12768376 | <filename>ANNClassification.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import tensorflow as tf
#from tensorflow import keras
#from tensorflow.keras.layers import Dense, Dropout, GaussianNoise
#from tensorflow.keras import regularizers
import random
from sklearn.metrics import confusion_matrix, f1_score
import os
import pdb
videoPrefix = 'WithThirtyVideos_'
if videoPrefix == 'WithThirtyVideos_':
clipDire = '/mnt/7CBFA0EC210FC340/ExperimentRelatedData/FromUbuntuAcerSystem/Experiment/block_For_30_Stimuli'
else:
clipDire = os.path.join(sourceDir, 'all_clips')
#if emotionWise
DF=pd.read_csv(os.path.join(clipDire, videoPrefix+'ForClassificaion_AllMultimediaCalculatedPCADimensionsValenceArousal.csv'), index_col=0)
#DF=pd.read_csv(os.path.join(sourceDir, 'all_clips/ForClassificaion_AllMultimediaCalculatedPCADimensionsValenceArousalGB.csv'), index_col=0)
# baseline cnn model for mnist
from numpy import mean
from numpy import std
from matplotlib import pyplot
from sklearn.model_selection import KFold
'''from keras.datasets import mnist
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.optimizers import SGD'''
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(lr=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# evaluate a model using k-fold cross-validation
def evaluate_model(dataX, dataY, n_folds=5):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
for train_ix, test_ix in kfold.split(dataX):
# define model
model = define_model()
# select rows for train and test
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
# fit model
history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# stores scores
scores.append(acc)
histories.append(history)
return scores, histories
# plot diagnostic learning curves
def summarize_diagnostics(histories):
for i in range(len(histories)):
# plot loss
pyplot.subplot(2, 1, 1)
pyplot.title('Cross Entropy Loss')
pyplot.plot(histories[i].history['loss'], color='blue', label='train')
pyplot.plot(histories[i].history['val_loss'], color='orange', label='test')
# plot accuracy
pyplot.subplot(2, 1, 2)
pyplot.title('Classification Accuracy')
pyplot.plot(histories[i].history['accuracy'], color='blue', label='train')
pyplot.plot(histories[i].history['val_accuracy'], color='orange', label='test')
pyplot.show()
# summarize model performance
def summarize_performance(scores):
# print summary
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
# box and whisker plots of results
pyplot.boxplot(scores)
pyplot.show()
# run the test harness for evaluating a model
def DeepLearning(trainX,trainY,testX, testY, classList):
# load dataset
# prepare pixel data
#trainX, testX = prep_pixels(trainX, testX)
# evaluate model
pdb.set_trace()
scores, histories = evaluate_model(trainX, trainY)
# learning curves
summarize_diagnostics(histories)
# summarize estimated performance
summarize_performance(scores)
def svmClass(X, y, X_test, y_test, classList):
from sklearn import svm
# kernels: kernel='rbf', kernel='linear', kernel='sigmoid'
no_classes = len(classList)
clf = svm.SVC()
clf.fit(X, y)
overallAccMax = 0
flag = 0
for iter_ in np.arange(20):
minn = 1000
for class_ in classList:
if minn > sum(y_test==class_):
minn = sum(y_test==class_)
for class_ in classList:
print('How minn[0] is working here when minn is a scalar')
sampIdxs = random.sample(np.where(y_test==class_)[0].tolist(), minn[0])
if class_ == 0:
newTestSet = X_test[sampIdxs, :]
newTargSet = y_test[sampIdxs]
else:
newTestSet = np.concatenate((newTestSet, X_test[sampIdxs, :]), axis=0)
newTargSet = np.concatenate((newTargSet, y_test[sampIdxs]), axis=0)
print(f' Test Target 0 = {sum(newTargSet.reshape(-1)==0)}')
print(f' Test Target 1 = {sum(newTargSet.reshape(-1)==1)}')
print(f' Test Target 2 = {sum(newTargSet.reshape(-1)==2)}')
pre_labels = clf.predict(newTestSet).reshape(-1,1)
res = f1_score(newTargSet, pre_labels, average=None).reshape(1,no_classes)
cm = confusion_matrix(newTargSet, pre_labels)
overallAccuracy = [sum(cm[np.arange(no_classes),np.arange(no_classes)])/np.sum(cm)]
cmArr = cm.reshape(1,no_classes,no_classes)
clf = svm.SVC(C=0.5)
clf.fit(X, y)
pre_labels = clf.predict(newTestSet).reshape(-1,1)
res = np.concatenate((res, f1_score(newTargSet, pre_labels, average=None).reshape(1,no_classes)), axis=0)
cm = confusion_matrix(newTargSet, pre_labels)
cmArr = np.concatenate((cmArr, cm.reshape(1,no_classes,no_classes)), axis=0)
overallAccuracy.extend([sum(cm[np.arange(no_classes),np.arange(no_classes)])/np.sum(cm)])
clf = svm.SVC(C=0.75)
clf.fit(X, y)
pre_labels = clf.predict(newTestSet).reshape(-1,1)
res = np.concatenate((res, f1_score(newTargSet, pre_labels, average=None).reshape(1,no_classes)), axis=0)
cm = confusion_matrix(newTargSet, pre_labels)
cmArr = np.concatenate((cmArr, cm.reshape(1,no_classes,no_classes)), axis=0)
overallAccuracy.extend([sum(cm[np.arange(no_classes),np.arange(no_classes)])/np.sum(cm)])
index = np.argmax(overallAccuracy)
if flag == 0:
newOverallAcc = overallAccuracy[index]
newF1Score = np.reshape(res[index], (1, no_classes)) #
newCMatrix = np.reshape(cmArr[index], (1,no_classes,no_classes)) #
flag = 1
else:
newOverallAcc = newOverallAcc+overallAccuracy[index] #
newF1Score = np.concatenate((newF1Score, np.reshape(res[index], (1, no_classes))), axis=0)
newCMatrix = np.concatenate((newCMatrix, np.reshape(cmArr[index], (1,no_classes,no_classes))), axis=0)
'''if flag == 0:
if max(overallAccuracy) > overallAccMax:
index = np.argmax(overallAccuracy) # Finding that out of three settings in which setting the accuracy is maximum
overallAccMax = overallAccuracy[index] #
newOverallAcc = overallAccuracy #
newF1Score = res[index] #
newCMatrix = cmArr[index] #
flag = 1
if flag == 1:
if max(overallAccuracy) > overallAccMax:
index = np.argmax(overallAccuracy) # Finding that out of three settings in which setting the accuracy is maximum
overallAccMax = overallAccuracy[index] #
newOverallAcc = newOverallAcc+overallAccuracy #
newF1Score = newF1Score + res[index] #
newCMatrix = newCMatrix + cmArr[index] #'''
print(newOverallAcc/20)
print(np.mean(newF1Score, axis=0))
print(np.mean(newCMatrix, axis=0))
index = np.argmax(overallAccuracy)
return list([np.mean(newF1Score, axis=0), np.mean(newCMatrix, axis=0), newOverallAcc/20, minn])
def with4Quadrants():
############################# Data Cleaning
noFeature = len(DF.keys())-3
DF['index'] = np.arange(len(DF))
Experiment_ids = DF.index.values
DF.set_index('index', drop=True, inplace=True)
# Find out the duplicate data.
duplicateIndexes = np.where(DF.duplicated().values)[0]
DF.drop(duplicateIndexes, axis=0, inplace=True)
DF.reset_index(drop=True, inplace=True)
target = DF.iloc[:, noFeature+2].values.reshape(-1, 1)
############################################
#### If you want to remove 4 class (class with very less samples) selection remove4Class = 1; no_classes = 3, else remove4Class = 0; no_classes = 4
remove4Class = 1
no_classes = 3
####### Removing HVLA Class. Since, it has less number of samples ############
if remove4Class == 1:
indexToRemove = DF.index.values[np.where(target==3)[0]]
DF.drop(indexToRemove, axis=0, inplace=True)
DF.reset_index(drop=True, inplace=True)
#DF['Experiment_id'] = Experiment_ids
#DF.set_index('Experiment_id', drop=True, inplace=True)
##################################
#### Here I am selecting the random features. It was an attempt to reduce the overfitting problem ############
randomFeature = random.sample(np.arange(noFeature).tolist(), noFeature)
np.save('randomFeatures.npy', randomFeature)
#########################################################
ValArlDim = DF.iloc[:, randomFeature].values
target = DF.iloc[:, noFeature+2].values.reshape(-1, 1)
# Data split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(ValArlDim, target, test_size = 0.2, random_state = 0)
print(sum(y_train==0))
print(sum(y_train==1))
print(sum(y_train==2))
if remove4Class == 0:
print(sum(y_train==3))
minSamples = np.min([sum(y_train==0), sum(y_train==1), sum(y_train==2), sum(y_train==3)])
class1Samples = np.where(y_train==0)[0]
class2Samples = np.where(y_train==1)[0]
class3Samples = np.where(y_train==2)[0]
class4Samples = np.where(y_train==3)[0]
else:
minSamples = np.min([sum(y_train==0), sum(y_train==1), sum(y_train==2)])
class1Samples = np.where(y_train==0)[0]
class2Samples = np.where(y_train==1)[0]
class3Samples = np.where(y_train==2)[0]
maxRes = [0]
svmAcc = 0
for repeat in np.arange(50):
class1Idx = random.sample(class1Samples.tolist(), minSamples)
class2Idx = random.sample(class2Samples.tolist(), minSamples)
class3Idx = random.sample(class3Samples.tolist(), minSamples)
trainingData = X_train[class1Idx,:]
trainingData = np.concatenate((trainingData, X_train[class2Idx,:]), axis=0)
trainingData = np.concatenate((trainingData, X_train[class3Idx,:]), axis=0)
if remove4Class == 0:
trainingData = np.concatenate((trainingData, X_train[class4Samples,:]), axis=0)
#### Adding Noise here
#noise = np.random.normal(0, 1, trainingData.shape)
#trainingData = trainingData + noise
#trainingData = np.concatenate((trainingData, noise), axis=0)
allTargets = y_train[class1Idx]
allTargets = np.concatenate((allTargets, y_train[class2Idx]), axis=0)
allTargets = np.concatenate((allTargets, y_train[class3Idx]), axis=0)
if remove4Class == 0:
allTargets = np.concatenate((allTargets, y_train[class4Samples]), axis=0)
#allTargets = np.concatenate((allTargets, allTargets), axis=0) ### THese are the labels for noise.
# Feature scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
trainingData = sc.fit_transform(trainingData)
X_test = sc.fit_transform(X_test)
transDataTrain = trainingData.copy()
transDataTest = X_test.copy()
svmDict = {0:'rbf', 1:'rbf_with_C-0.5', 2:'rbf_with_C-0.75'}
#################### SVM Classification #############
#res = svmClass(trainingData, allTargets, X_test, y_test)
res = svmClass(transDataTrain, allTargets, transDataTest, y_test, no_classes)
if res[2] > svmAcc:
np.save(videoPrefix+'no_classes-%s_' %str(no_classes)+svmDict[res[3]]+'_svm_f1_score.npy', res[0])
np.save(videoPrefix+'no_classes-%s_' %str(no_classes)+svmDict[res[3]]+'_svm_conf_mat.npy', res[1])
np.save(videoPrefix+'no_classes-%s_' %str(no_classes)+svmDict[res[3]]+'_svm_over_acc.npy', res[2])
svmAcc = res[2]
############################################## Classification in valence and arousal only ########################################################
def with2Classes():
############################# Data Cleaning
valenceFlag = 1 # if 1: valence calculation, else Arousal Calculation
no_classes = 2
DF['index'] = np.arange(len(DF))
Experiment_ids = DF.index.values
pdb.set_trace()
DF.set_index('index', drop=True, inplace=True)
# Find out the duplicate data.
duplicateIndexes = np.where(DF.duplicated().values)[0]
DF.drop(duplicateIndexes, axis=0, inplace=True)
DF.reset_index(drop=True, inplace=True)
################ For valence
valenceFeatColumns = ['max_rhythm_comp', 'min_rhythm_comp', 'mean_rhythm_comp', 'std_rhythm_comp', 'rhythm_comp_percent_1', 'rhythm_comp_percent_2', 'rhythm_comp_percent_3', 'max_bright_array', 'min_bright_array', 'mean_bright_array', 'std_bright_array', 'bright_array_percent_1', 'bright_array_percent_2', 'bright_array_percent_3']
valenceFeatColumns.extend(['val-'+str(i) for i in np.arange(15)])
valFeatFrame = DF.loc[:, valenceFeatColumns]
valFeatFrame['target'] = DF['target']
valFeatFrame["target"].replace({0:0, 3:0, 1:1, 2:1}, inplace=True)
valtarget = valFeatFrame.loc[:, 'target'].values.reshape(-1, 1)
valFeatDim = valFeatFrame.drop('target', axis=1).values
arousalFeatColumns = ['max_motion_comp', 'min_motion_comp', 'mean_motion_comp', 'std_motion_comp', 'motion_percent_1', 'motion_percent_2', 'motion_percent_3', 'max_shotRate', 'min_shotRate', 'mean_shotRate', 'std_shotRate', 'shotRate_percent_1', 'shotRate_percent_2', 'shotRate_percent_3']
arousalFeatColumns.extend(['arl-'+str(i) for i in np.arange(15)])
arlFeatFrame = DF.loc[:, arousalFeatColumns]
arlFeatFrame['target'] = DF['target']
arlFeatFrame["target"].replace({0:0, 1:0, 2:1, 3:1}, inplace=True)
arltarget = arlFeatFrame.loc[:, 'target'].values.reshape(-1, 1)
arlFeatDim = arlFeatFrame.drop('target', axis=1).values
############################################
if valenceFlag == 1:
featValues = valFeatDim.copy()
targetVal = valtarget.copy()
prefix = 'valence'
else:
featValues = arlFeatDim.copy()
targetVal = arltarget.copy()
prefix = 'arousal'
# Data split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(featValues, targetVal, test_size = 0.2, random_state = 0)
print(sum(y_train==0))
print(sum(y_train==1))
minSamples = np.min([sum(y_train==0), sum(y_train==1)])
class1Samples = np.where(y_train==0)[0]
class2Samples = np.where(y_train==1)[0]
svmAcc = 0
for repeat in np.arange(50):
class1Idx = random.sample(class1Samples.tolist(), minSamples)
class2Idx = random.sample(class2Samples.tolist(), minSamples)
trainingData = X_train[class1Idx,:]
trainingData = np.concatenate((trainingData, X_train[class2Idx,:]), axis=0)
allTargets = y_train[class1Idx]
allTargets = np.concatenate((allTargets, y_train[class2Idx]), axis=0)
# Feature scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
trainingData = sc.fit_transform(trainingData)
X_test = sc.fit_transform(X_test)
svmDict = {0:'rbf', 1:'rbf_with_C-0.5', 2:'rbf_with_C-0.75'}
#################### SVM Classification #############
#res = svmClass(trainingData, allTargets, X_test, y_test)
res = svmClass(trainingData, allTargets, X_test, y_test, no_classes)
if res[2] > svmAcc:
np.save(videoPrefix+prefix+'-'+svmDict[res[3]]+'_svm_f1_score.npy', res[0])
np.save(videoPrefix+prefix+'-'+svmDict[res[3]]+'_svm_conf_mat.npy', res[1])
np.save(videoPrefix+prefix+'-'+svmDict[res[3]]+'_svm_over_acc.npy', res[2])
svmAcc = res[2]
pdb.set_trace()
'''
from sklearn.preprocessing import OneHotEncoder
onehotencoder = OneHotEncoder()
allTargets = onehotencoder.fit_transform(allTargets).toarray()
# Model building
classifier = keras.Sequential()
#add input layer and first hidden layer
#classifier.add(Dropout(0.5, name='dropout1'))
classifier.add(Dense(30, kernel_regularizer=regularizers.l2(0.001), activation = 'relu', input_dim = noFeature))
#classifier.add(GaussianNoise(0.5))
#classifier.add(Dropout(0.5, name='dropout2'))
#classifier.add(Dense(30, kernel_regularizer=regularizers.l2(0.001), activation = 'relu'))
#classifier.add(Dropout(0.1, name='dropout1'))
#add 2nd hidden layer
#classifier.add(Dense(output_dim = 6, init = ‘uniform’, activation = ‘relu’))
if remove4Class == 0:
classifier.add(Dense(4, activation = 'softmax'))
else:
classifier.add(Dense(3, activation = 'softmax'))
# Model compiling
batchsize = 20 #int(trainingData.shape[0]/100)
classifier.compile(optimizer = 'Adam', loss ='categorical_crossentropy', metrics = ['accuracy'])
print(trainingData.shape)
print(allTargets.shape)
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=sourceDir, save_weights_only=True, monitor='val_accuracy', mode='max', save_best_only=True)
history = classifier.fit(trainingData, allTargets, batch_size = batchsize, epochs = 1000, validation_split=0.1)
# Model prediction
y_pred = classifier.predict(X_test)
y_prid_ = np.array([np.where(y_pred[i,:]==np.max(y_pred[i]))[0][0] for i in np.arange(len(y_pred))]).reshape(-1,1)
# confusion_matrix
res = f1_score(y_test, y_prid_, average=None)
if sum(res) > sum(maxRes):
print(sum(res))
max_history = history.history.copy()
maxRes = res
pdb.set_trace()
tf.keras.utils.plot_model(classifier, to_file='AfterBalancing_model_%s.png' %str(maxRes), show_shapes=True, show_dtype=True)
print(max_history.keys())
# summarize history for accuracy
plt.plot(max_history['accuracy'])
plt.plot(max_history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('AfterBalancing_AccuracyTrace_%s.png' %str(maxRes))
plt.savefig('AfterBalancing_AccuracyTrace_%s.pdf' %str(maxRes))
# summarize history for loss
plt.plot(max_history['loss'])
plt.plot(max_history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('AfterBalancing_LossTrace_%s.png' %str(maxRes))
plt.savefig('AfterBalancing_LossTrace_%s.pdf' %str(maxRes))'''
#pdb.set_trace()
#cm = confusion_matrix(y_test, y_prid_)
#pdb.set_trace()
'''################################# Trying to reduce the overfit
1. I can try regularization technique.
2.
''' | 2.640625 | 3 |
src/solve_all_sudokus.py | Lgt2x/PIT_doku | 0 | 12768377 | #!/usr/bin/env python3
from grid import SudokuGrid
from solver import SudokuSolver
import os.path
import time
import multiprocessing
def solve_all(running_times):
for l in range(1, 245):
g = SudokuGrid.from_file(os.path.join(os.path.dirname(__file__), "..", "sudoku_db.txt", l)
start = time.monotonic()
solver = SudokuSolver(g)
solver.solve()
running_times.append(1000 * (time.monotonic() - start))
print("\r[{: <40}] ({:.0%})".format('='*int(40 * l / 244), l / 244), end='')
if __name__ == "__main__":
manager = multiprocessing.Manager()
running_times = manager.list()
p = multiprocessing.Process(target=solve_all, args=(running_times,))
print("Starting solver on all 244 instances in 'sudoku_db.txt' with a time-out of 5min...")
p.start()
p.join(300)
if p.is_alive():
print("\nTime-out!")
p.terminate()
p.join()
else:
print()
n_runs = len(running_times)
print("Number of completed run: {}".format(n_runs))
print("Running times statistics: min = {:.3f}ms, average = {:.3f}ms, max = {:.3f}ms".format(
min(running_times), sum(running_times) / n_runs, max(running_times)))
| 3.125 | 3 |
respa_o365/sync_operations.py | tansionline/respa | 0 | 12768378 | <gh_stars>0
import logging
from enum import Enum
logger = logging.getLogger(__name__)
class SyncOperations:
def __init__(self, changes, sync_actions):
self._changes = changes
self.sync_actions = sync_actions
def get_sync_operations(self):
"""Returns list of operations that would synchronise the given changes between systems.
Changes is expected to be a list of id-state-tuplet pairs. Each tuplet represents the
item in either Respa (first one) or in other system (second one). Caller is expected to
align dependant items together.
[
((respa id, status), (other id,status)), # Item exist in both systems
(None, (other id,status)), # Item exist in remote system only
((respa id, status), None), # Item exist in Respa only
...
]
Returned operations are actions which can be interpreted using ActionVisitor
"""
ops = []
for pair in self._changes:
respa_id, respa_state = pair[0] if pair[0] else (None, None)
remote_id, remote_state = pair[1] if pair[1] else (None, None)
try:
fn = self.sync_actions[respa_state][remote_state]
result = fn(respa_id, remote_id)
logger.info("{} ({}) + ({}) {} -> {}".format(respa_id, respa_state, remote_state, remote_id, result))
if result:
ops.append(result)
except KeyError:
pass
return ops
class SyncActionVisitor:
"""Visitor for sync actions. Implement this protocol to perform synchronisation operations (SyncAction)."""
def create_event(self, target, source_id):
pass
def delete_event(self, target, target_id):
pass
def update_event(self, target, source_id, target_id):
pass
def remove_mapping(self, respa_id, remote_id):
pass
class SyncAction:
def __eq__(self, other):
"""Action is equal when internal fields are equal"""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __str__(self):
"""Creates string representation that looks like this:
Class{'field1': 'value2', 'field2': 'value2'}
"""
class_name = str(self.__class__.__name__)
return class_name + str({k: v for k, v in self.__dict__.items()})
class CreateEvent(SyncAction):
def __init__(self, target, source_system_id):
self.__target = target
self.__source_id = source_system_id
def accept(self, visitor):
visitor.create_event(target=self.__target, source_id=self.__source_id)
class UpdateEvent(SyncAction):
def __init__(self, target, target_system_id, source_system_id):
self.__target = target
self.__target_id = target_system_id
self.__source_id = source_system_id
def accept(self, visitor):
visitor.update_event(target=self.__target, target_id=self.__target_id, source_id=self.__source_id)
class DeleteEvent(SyncAction):
def __init__(self, target, target_system_id):
self.__target = target
self.__target_id = target_system_id
def accept(self, visitor):
visitor.delete_event(target=self.__target, target_id=self.__target_id)
class RemoveMapping(SyncAction):
def __init__(self, respa_id, remote_id):
self.__respa_id = respa_id
self.__remote_id = remote_id
def accept(self, visitor):
visitor.remove_mapping(respa_id=self.__respa_id, remote_id=self.__remote_id)
class ChangeType(Enum):
NO_CHANGE = 1
CREATED = 2
UPDATED = 3
DELETED = 4
class TargetSystem(Enum):
RESPA = 1
REMOTE = 2
class SyncActionFactory:
@staticmethod
def to(target, fn):
def wrapper(*args):
return fn(target, *args)
return wrapper
@staticmethod
def nop(respa_id, remote_id):
pass
@staticmethod
def delete(target, respa_id, remote_id):
if target == TargetSystem.RESPA:
return DeleteEvent(TargetSystem.RESPA, target_system_id=respa_id)
else:
return DeleteEvent(TargetSystem.REMOTE, target_system_id=remote_id)
@staticmethod
def create(target, respa_id, remote_id):
if target == TargetSystem.RESPA:
return CreateEvent(TargetSystem.RESPA, source_system_id=remote_id)
else:
return CreateEvent(TargetSystem.REMOTE, source_system_id=respa_id)
@staticmethod
def update(target, respa_id, remote_id):
if target == TargetSystem.RESPA:
return UpdateEvent(TargetSystem.RESPA, source_system_id=remote_id, target_system_id=respa_id)
else:
return UpdateEvent(TargetSystem.REMOTE, source_system_id=respa_id, target_system_id=remote_id)
@staticmethod
def removeMapping(respa_id, remote_id):
return RemoveMapping(respa_id, remote_id)
def build_reservation_sync_actions_dict():
REMOTE = TargetSystem.REMOTE
RESPA = TargetSystem.RESPA
s = SyncActionFactory
statesToAction = {
None: {
ChangeType.NO_CHANGE: s.to(RESPA, s.create),
ChangeType.CREATED: s.to(RESPA, s.create),
ChangeType.UPDATED: s.to(RESPA, s.create),
ChangeType.DELETED: s.nop},
ChangeType.NO_CHANGE: {
None: s.to(REMOTE, s.create),
ChangeType.NO_CHANGE: s.nop,
ChangeType.CREATED: s.to(RESPA, s.update),
ChangeType.UPDATED: s.to(RESPA, s.update),
ChangeType.DELETED: s.to(RESPA, s.delete)},
ChangeType.CREATED: {
None: s.to(REMOTE, s.create),
ChangeType.NO_CHANGE: s.to(REMOTE, s.update),
ChangeType.CREATED: s.to(REMOTE, s.update),
ChangeType.UPDATED: s.to(REMOTE, s.update),
ChangeType.DELETED: s.to(REMOTE, s.create)},
ChangeType.UPDATED: {
None: s.to(REMOTE, s.create),
ChangeType.NO_CHANGE: s.to(REMOTE, s.update),
ChangeType.CREATED: s.to(REMOTE, s.update),
ChangeType.UPDATED: s.to(REMOTE, s.update),
ChangeType.DELETED: s.to(REMOTE, s.create)},
ChangeType.DELETED: {
None: s.nop,
ChangeType.NO_CHANGE: s.to(REMOTE, s.delete),
ChangeType.CREATED: s.to(REMOTE, s.delete),
ChangeType.UPDATED: s.to(REMOTE, s.delete),
ChangeType.DELETED: s.removeMapping}
}
return statesToAction
def build_availability_sync_actions_dict():
REMOTE = TargetSystem.REMOTE
RESPA = TargetSystem.RESPA
s = SyncActionFactory
statesToAction = {
None: {
ChangeType.NO_CHANGE: s.to(RESPA, s.create),
ChangeType.CREATED: s.to(RESPA, s.create),
ChangeType.UPDATED: s.to(RESPA, s.create),
ChangeType.DELETED: s.nop},
ChangeType.NO_CHANGE: {
None: s.to(RESPA, s.delete),
ChangeType.NO_CHANGE: s.nop,
ChangeType.CREATED: s.to(RESPA, s.update),
ChangeType.UPDATED: s.to(RESPA, s.update),
ChangeType.DELETED: s.to(RESPA, s.delete)},
ChangeType.CREATED: {
None: s.to(RESPA, s.delete),
ChangeType.NO_CHANGE: s.to(RESPA, s.update),
ChangeType.CREATED: s.to(RESPA, s.update),
ChangeType.UPDATED: s.to(RESPA, s.update),
ChangeType.DELETED: s.to(RESPA, s.delete)},
ChangeType.UPDATED: {
None: s.to(RESPA, s.delete),
ChangeType.NO_CHANGE: s.to(RESPA, s.update),
ChangeType.CREATED: s.to(RESPA, s.update),
ChangeType.UPDATED: s.to(RESPA, s.update),
ChangeType.DELETED: s.to(RESPA, s.delete)},
ChangeType.DELETED: {
None: s.nop,
ChangeType.NO_CHANGE: s.to(RESPA, s.create),
ChangeType.CREATED: s.to(RESPA, s.create),
ChangeType.UPDATED: s.to(RESPA, s.create),
ChangeType.DELETED: s.removeMapping}
}
return statesToAction
reservationSyncActions = build_reservation_sync_actions_dict()
availabilitySyncActions = build_availability_sync_actions_dict()
| 3.015625 | 3 |
allmychanges/auth/pipeline.py | AllMyChanges/allmychanges.com | 46 | 12768379 | <gh_stars>10-100
from ..models import Changelog
def add_default_package(strategy, is_new=None, user=None, *args, **kwargs):
if is_new:
try:
changelog = Changelog.objects.get(
namespace='web', name='allmychanges')
except Changelog.DoesNotExist:
changelog = Changelog.objects.create(
namespace='web', name='allmychanges',
source='https://allmychanges.com/CHANGELOG.md')
user.track(changelog)
| 1.734375 | 2 |
setup.py | mahinlma/PYNQ-experiment | 69 | 12768380 | <gh_stars>10-100
# Copyright (c) 2019, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Xilinx"
__email__ = "<EMAIL>"
from setuptools import setup, find_packages
import os
import platform
import re
from pynq.utils import build_py
# global variables
module_name = "pynq_helloworld"
data_files = []
current_platform = ""
# parse version number
def find_version(file_path):
with open(file_path, 'r') as fp:
version_file = fp.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise NameError("Version string must be defined in {}.".format(file_path))
# extend package
def extend_package(path):
if os.path.isdir(path):
data_files.extend(
[os.path.join("..", root, f)
for root, _, files in os.walk(path) for f in files]
)
elif os.path.isfile(path):
data_files.append(os.path.join("..", path))
# get current platform: either edge or pcie
def get_platform():
cpu = platform.processor()
if cpu in ['armv7l', 'aarch64']:
return "edge"
elif cpu in ['x86_64']:
return "pcie"
else:
raise OSError("Platform is not supported.")
pkg_version = find_version('{}/__init__.py'.format(module_name))
with open("README.md", encoding='utf-8') as fh:
readme_lines = fh.readlines()[2:6]
long_description = (''.join(readme_lines))
extend_package(os.path.join(module_name, "notebooks"))
setup(
name=module_name,
version=pkg_version,
description="PYNQ example design supporting edge and PCIE boards",
long_description=long_description,
long_description_content_type='text/markdown',
author='Xilinx PYNQ Development Team',
author_email="<EMAIL>",
url='https://github.com/Xilinx/PYNQ-HelloWorld.git',
license='BSD 3-Clause License',
packages=find_packages(),
package_data={
"": data_files,
},
python_requires=">=3.6.0",
install_requires=[
"pynq"
],
extras_require={
':python_version<"3.6"': [
'matplotlib<3.1',
'ipython==7.9'
],
':python_version>="3.6"': [
'matplotlib'
]
},
entry_points={
"pynq.notebooks": [
"pynq-helloworld = {}.notebooks.{}".format(
module_name, get_platform())
]
},
cmdclass={"build_py": build_py}
)
| 1.21875 | 1 |
pyiacsun/util/wigner.py | aasensio/pyiacsun | 5 | 12768381 | <gh_stars>1-10
from __future__ import division
from scipy.misc import factorial#, floor, sqrt
from numpy import arange, floor, sqrt, round
__all__ = ['wigner3j', 'wigner6j']
def TriaCoeff(a,b,c):
# Calculating the triangle coefficient
return factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)/(factorial(a+b+c+1))
def wigner3j(j1,j2,j3,m1,m2,m3):
"""Return the value of the 3-j symbol for the given values of j and m using the Racah formula [1].
/ j1 j2 j3 \
| |
\ m1 m2 m3 /
Reference: Wigner 3j-Symbol entry of Eric Weinstein's Mathworld:
http://mathworld.wolfram.com/Wigner3j-Symbol.html
Usage
wigner = Wigner3j(j1,j2,j3,m1,m2,m3)
Args:
j1 (float): j1
j2 (float): j2
j3 (float): j3
m1 (float): m1
m2 (float): m2
m3 (float): m3
Returns:
float: value of the 3-j symbol
"""
# Error checking
if ( ( 2*j1 != floor(2*j1) ) | ( 2*j2 != floor(2*j2) ) | ( 2*j3 != floor(2*j3) ) | ( 2*m1 != floor(2*m1) ) | ( 2*m2 != floor(2*m2) ) | ( 2*m3 != floor(2*m3) ) ):
print('All arguments must be integers or half-integers.')
return -1
# Additional check if the sum of the second row equals zero
if ( m1+m2+m3 != 0 ):
print('3j-Symbol unphysical')
return 0
if ( j1 - m1 != floor ( j1 - m1 ) ):
print('2*j1 and 2*m1 must have the same parity')
return 0
if ( j2 - m2 != floor ( j2 - m2 ) ):
print('2*j2 and 2*m2 must have the same parity')
return; 0
if ( j3 - m3 != floor ( j3 - m3 ) ):
print('2*j3 and 2*m3 must have the same parity')
return 0
if ( j3 > j1 + j2) | ( j3 < abs(j1 - j2) ):
print('j3 is out of bounds.')
return 0
if abs(m1) > j1:
print('m1 is out of bounds.')
return 0
if abs(m2) > j2:
print('m2 is out of bounds.')
return 0
if abs(m3) > j3:
print('m3 is out of bounds.')
return 0
t1 = j2 - m1 - j3
t2 = j1 + m2 - j3
t3 = j1 + j2 - j3
t4 = j1 - m1
t5 = j2 + m2
tmin = max( 0, max( t1, t2 ) )
tmax = min( t3, min( t4, t5 ) )
tvec = arange(tmin, tmax+1, 1)
wigner = 0
for t in tvec:
wigner += (-1)**t / ( factorial(t) * factorial(t-t1) * factorial(t-t2) * factorial(t3-t) * factorial(t4-t) * factorial(t5-t) )
return wigner * (-1)**(j1-j2-m3) * sqrt( factorial(j1+j2-j3) * factorial(j1-j2+j3) * factorial(-j1+j2+j3) / factorial(j1+j2+j3+1) * factorial(j1+m1) * factorial(j1-m1) * factorial(j2+m2) * factorial(j2-m2) * factorial(j3+m3) * factorial(j3-m3) )
def wigner6j(j1,j2,j3,J1,J2,J3):
"""Return the value of the 6-j symbol for the given values of j and m using the Racah formula.
/ j1 j2 j3 \
< >
\ J1 J2 J3 /
Based upon Wigner3j.m from <NAME>, Raytheon
Reference: http://mathworld.wolfram.com/Wigner6j-Symbol.html
Usage
wigner = Wigner6j(j1,j2,j3,J1,J2,J3)
Args:
j1 (float): j1
j2 (float): j2
j3 (float): j3
J1 (float): J1
J2 (float): J2
J3 (float): J3
Returns:
float: value of the 6-j symbol
"""
# Check that the js and Js are only integer or half integer
if ( ( 2*j1 != round(2*j1) ) | ( 2*j2 != round(2*j2) ) | ( 2*j2 != round(2*j2) ) | ( 2*J1 != round(2*J1) ) | ( 2*J2 != round(2*J2) ) | ( 2*J3 != round(2*J3) ) ):
print('All arguments must be integers or half-integers.')
return -1
# Check if the 4 triads ( (j1 j2 j3), (j1 J2 J3), (J1 j2 J3), (J1 J2 j3) ) satisfy the triangular inequalities
if ( ( abs(j1-j2) > j3 ) | ( j1+j2 < j3 ) | ( abs(j1-J2) > J3 ) | ( j1+J2 < J3 ) | ( abs(J1-j2) > J3 ) | ( J1+j2 < J3 ) | ( abs(J1-J2) > j3 ) | ( J1+J2 < j3 ) ):
print('6j-Symbol is not triangular!')
return 0
# Check if the sum of the elements of each traid is an integer
if ( ( 2*(j1+j2+j3) != round(2*(j1+j2+j3)) ) | ( 2*(j1+J2+J3) != round(2*(j1+J2+J3)) ) | ( 2*(J1+j2+J3) != round(2*(J1+j2+J3)) ) | ( 2*(J1+J2+j3) != round(2*(J1+J2+j3)) ) ):
print('6j-Symbol is not triangular!')
return 0
# Arguments for the factorials
t1 = j1+j2+j3
t2 = j1+J2+J3
t3 = J1+j2+J3
t4 = J1+J2+j3
t5 = j1+j2+J1+J2
t6 = j2+j3+J2+J3
t7 = j1+j3+J1+J3
# Finding summation borders
tmin = max(0, max(t1, max(t2, max(t3,t4))))
tmax = min(t5, min(t6,t7))
tvec = arange(tmin,tmax+1,1)
# Calculation the sum part of the 6j-Symbol
WignerReturn = 0
for t in tvec:
WignerReturn += (-1)**t*factorial(t+1)/( factorial(t-t1)*factorial(t-t2)*factorial(t-t3)*factorial(t-t4)*factorial(t5-t)*factorial(t6-t)*factorial(t7-t) )
# Calculation of the 6j-Symbol
return WignerReturn*sqrt( TriaCoeff(j1,j2,j3)*TriaCoeff(j1,J2,J3)*TriaCoeff(J1,j2,J3)*TriaCoeff(J1,J2,j3) ) | 3.578125 | 4 |
copy_crop_weights.py | kerryvernebegeman/Kerry-Verne-Begeman | 0 | 12768382 | <reponame>kerryvernebegeman/Kerry-Verne-Begeman
# ~~~ aydao ~~~~ 2020 ~~~
#
#
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import sys, getopt, os
import numpy as np
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib import tfutil
from dnnlib.tflib.autosummary import autosummary
from training import dataset
from training import misc
import pickle
import argparse
# Note well that the argument order is target then source
def copy_and_crop_trainables_from(target_net, source_net) -> None:
source_trainables = source_net.trainables.keys()
target_trainables = target_net.trainables.keys()
names = [pair for pair in zip(source_trainables, target_trainables)]
skip = []
for pair in names:
source_name, target_name = pair
x = source_net.get_var(source_name)
y = target_net.get_var(target_name)
source_shape = x.shape
target_shape = y.shape
if source_shape != target_shape:
update = x
index = None
if 'Dense' in source_name:
index = 0
gap = source_shape[index] - target_shape[index]
start = abs(gap) // 2
end = start + target_shape[index]
update = update[start:end,:]
else:
if source_shape[2] != target_shape[2]:
index = 2
gap = source_shape[index] - target_shape[index]
start = abs(gap) // 2
end = start + target_shape[index]
update = update[:,:,start:end,:]
if source_shape[3] != target_shape[3]:
index = 3
gap = source_shape[index] - target_shape[index]
start = abs(gap) // 2
end = start + target_shape[index]
update = update[:,:,:,start:end]
target_net.set_var(target_name, update)
skip.append(source_name)
weights_to_copy = {target_net.vars[pair[1]]: source_net.vars[pair[0]] for pair in names if pair[0] not in skip}
tfutil.set_vars(tfutil.run(weights_to_copy))
def main(args):
source_pkl = args.source_pkl
target_pkl = args.target_pkl
output_pkl = args.output_pkl
tflib.init_tf()
with tf.Session() as sess:
with tf.device('/gpu:0'):
sourceG, sourceD, sourceGs = pickle.load(open(source_pkl, 'rb'))
targetG, targetD, targetGs = pickle.load(open(target_pkl, 'rb'))
print('Source:')
sourceG.print_layers()
sourceD.print_layers()
sourceGs.print_layers()
print('Target:')
targetG.print_layers()
targetD.print_layers()
targetGs.print_layers()
copy_and_crop_trainables_from(targetG, sourceG)
copy_and_crop_trainables_from(targetD, sourceD)
copy_and_crop_trainables_from(targetGs, sourceGs)
misc.save_pkl((targetG, targetD, targetGs), os.path.join('./', output_pkl))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Copy and crop weights from one StyleGAN pkl to another', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('source_pkl', help='Path to the source pkl (weights copied from this one). This will *not* be overwritten or modified.')
parser.add_argument('target_pkl', help='Path to the target pkl (weights copied onto this one). This will *not* be overwritten or modified.')
parser.add_argument('--output_pkl', default='network-copyover.pkl', help='Path to the output pkl (source_pkl weights copied into target_pkl architecture)')
args = parser.parse_args()
main(args) | 2.078125 | 2 |
pydhs/controller_stored_procedures.py | 00krishna-tools/pydhs | 0 | 12768383 | """
Controller Class
-----------------
This class contains the controller logic for the application. This takes
input from the table and other interface objects and then manages change of
state for the database. It will also pass the state changes to the table
objects.
"""
__author__ = 'krishnab'
__version__ = '0.1.0'
from pydhs.Database import DatabasePsycopg2
from pydhs.Database import DatabaseSqlalchemy
## Initialize Constants
TABLENAMES = ["union_table", "intersection_table"]
class Controller_stored_procedures():
def __init__(self, dbname):
## create a database object inside the controller to manage state changes
# to the database.
self.db = DatabasePsycopg2(dbname,
'krishnab',
'3kl4vx71',
'localhost',
5433)
self.conn_sqlalchemy = DatabaseSqlalchemy(dbname,
'krishnab',
'3kl4vx71',
'localhost',
5433)
self.database_table_fields = {}
def add_four_digit_function(self):
query = """
create or replace function four_digit_date(dt TEXT)
returns TEXT
as
$$
DECLARE
intDate INT;
newDate TEXT;
BEGIN
intDate = dt::INT;
IF (intDate > 1000) THEN
RETURN dt;
END IF;
IF (intDate < 18) THEN
intDate = intDate + 2000;
ELSIF (intDate > 18) THEN
intDate = intDate + 1900;
END IF;
RETURN intDate::TEXT;
END;
$$
LANGUAGE plpgsql;
"""
self.db.get_regular_cursor_query_no_return(query)
def add_wealth_v190_recode_function(self):
query = """
create or replace function wealth_v190_recode(val TEXT)
returns TEXT
as
$$
BEGIN
CASE val
WHEN '1.0', '2.0', '3.0', '4.0', '5.0' THEN
RETURN CAST(CAST(CAST(val as FLOAT )as INT) as TEXT);
WHEN 'lowest quintile' THEN
RETURN '1';
WHEN 'second quintile' THEN
RETURN '2';
WHEN 'middle quintile' THEN
RETURN '3';
WHEN 'fourth quintile' THEN
RETURN '4';
WHEN 'highest quintile' THEN
RETURN '5';
WHEN '1', '2', '3', '4', '5' THEN
RETURN val;
ELSE
RETURN val;
END CASE;
END;
$$
LANGUAGE plpgsql;
"""
self.db.get_regular_cursor_query_no_return(query)
| 3.328125 | 3 |
source-code/Delete_Node_in_a_BST.py | ttungl/Coding-Interview-Challenge | 0 | 12768384 | # 450. Delete_Node_in_a_BST
# <EMAIL>
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def deleteNode(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
# // search key in the tree, if key is found, return root.
# // if key found at node n:
# // + node has no left/right: return null
# // + node has either left/right: return right/left
# // + node has both left and right:
# // + find minval of right.
# // + set minval to current node found
# // + delete min in right.
# // time complexity: O(height of tree)
# // space complexity: O(n)
if root is None:
return None
# search the key
if root.val > key:
root.left = self.deleteNode(root.left, key)
elif root.val < key:
root.right = self.deleteNode(root.right, key)
else: # key is found
if root.left is None:
return root.right
elif root.right is None:
return root.left
minValue = root.right
while minValue.left: # find min value
minValue = minValue.left
# replace current found
minValue.left = root.left
return root.right
return root
| 4 | 4 |
applications/physbam/physbam-lib/External_Libraries/Archives/boost/tools/build/v2/test/bad_dirname.py | schinmayee/nimbus | 20 | 12768385 | #!/usr/bin/python
# Copyright (C) <NAME> 2003. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
# Regression test: when directory of project root contained regex metacharacters,
# Boost.Build failed to work. Bug reported by <NAME>
from BoostBuild import Tester, List
t = Tester()
t.write("bad[abc]dirname/Jamfile", """
""")
t.write("bad[abc]dirname/project-root.jam", """
""")
t.run_build_system(subdir="bad[abc]dirname")
t.cleanup()
| 1.578125 | 2 |
koku/api/common/permissions/test/test_ocp_all_access.py | rubik-ai/koku | 157 | 12768386 | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
from itertools import chain
from itertools import combinations
from unittest.mock import Mock
from django.test import TestCase
from api.common.permissions.openshift_all_access import OpenshiftAllAccessPermission
from api.iam.models import User
from api.provider.models import Provider
ACCESS_KEYS = {
Provider.PROVIDER_AWS: {"aws.account": {"read": ["*"]}},
Provider.PROVIDER_AZURE: {"azure.subscription_guid": {"read": ["*"]}},
Provider.PROVIDER_OCP: {"openshift.cluster": {"read": ["*"]}},
}
class OCPAllAccessPermissionTest(TestCase):
"""Test the OCP-on-All access permissions."""
def test_has_perm_with_access_on_get(self):
"""Test that a user with at least 1 access can execute."""
accessPerm = OpenshiftAllAccessPermission()
s = ACCESS_KEYS.keys()
for key in chain.from_iterable(combinations(s, r) for r in range(1, len(s) + 1)):
with self.subTest(permission=key):
access = {}
for k in key:
access.update(ACCESS_KEYS[k])
user = Mock(spec=User, access=access, admin=False)
req = Mock(user=user, method="GET")
result = accessPerm.has_permission(request=req, view=None)
self.assertTrue(result)
| 2.40625 | 2 |
lifelong_rl/trainers/multi_trainer.py | nakamotoo/lifelong_rl | 67 | 12768387 | <reponame>nakamotoo/lifelong_rl<filename>lifelong_rl/trainers/multi_trainer.py
from collections import OrderedDict
from lifelong_rl.core.rl_algorithms.torch_rl_algorithm import TorchTrainer
class MultiTrainer(TorchTrainer):
"""
Interface for combining multiple trainers into one trainer.
"""
def __init__(
self,
trainers, # List of trainers
trainer_steps, # List of number of steps to call each trainer per call of MultiTrainer
trainer_names=None, # Optionally, specify the names (used for printing/logging)
):
super().__init__()
assert len(trainers) == len(trainer_steps), 'Must specify number of steps for each trainer'
self.trainers = trainers
self.trainer_steps = trainer_steps
if trainer_names is None:
self.trainer_names = ['trainer_%d' % i for i in range(1, len(trainers)+1)]
else:
self.trainer_names = trainer_names
while len(self.trainer_names) < len(trainers):
self.trainer_names.append('trainer_%d' % (len(self.trainer_names)+1))
self.eval_statistics = OrderedDict()
def train_from_torch(self, batch):
for i in range(len(self.trainers)):
self.trainers[i].train_from_torch(batch)
for k, v in self.trainers[i].get_diagnostics().items():
self.eval_statistics[self.trainer_names[i] + '/' + k] = v
def get_diagnostics(self):
return self.eval_statistics
def end_epoch(self, epoch):
for trainer in self.trainers:
trainer.end_epoch(epoch)
@property
def networks(self):
networks = []
for trainer in self.trainers:
networks.extend(trainer.networks)
return networks
def get_snapshot(self):
snapshot = dict()
for i in range(len(self.trainers)):
for k, v in self.trainers[i].get_diagnostics().items():
snapshot[self.trainer_names[i] + '/' + k] = v
return snapshot
| 2.234375 | 2 |
precourse.py | cdhorn515/ML-Precourse | 0 | 12768388 | # Machine Learning/Data Science Precourse Work
# ###
# LAMBDA SCHOOL
# ###
# MIT LICENSE
# ###
# Free example function definition
# This function passes one of the 11 tests contained inside of test.py. Write the rest, defined in README.md, here,
# and execute python test.py to test. Passing this precourse work will greatly increase your odds of acceptance
# into the program.
import math
import numpy as np
def f(x):
return x**2
def f_2(x):
return x**3
def f_3(x):
return (x**3) + (5*x)
def d_f(x):
return 2*x
def d_f_2(x):
return 3*(x**2)
def d_f_3(x):
return 3*(x**2) + 5
# for all values of x, return x + y
def vector_sum(x, y):
for num in range(len(x)):
return [x[num] + y[num]]
# for all values of x, return x - y
def vector_less(x, y):
for num in range(len(x)):
return [x[num] - y[num]]
def vector_magnitude(v):
sqvector = 0
for vector in v:
sqvector += vector**2
return math.sqrt(sqvector)
def vec5():
return np.array([1, 1, 1, 1, 1])
def vec3():
return np.array([0, 0, 0])
def vec2_1():
return np.array([1, 0])
def vec2_2():
return np.array([0, 1])
def matrix_multiply(vec, matrix):
return np.dot(vec, matrix)
| 3.859375 | 4 |
climateeconomics/tests/l2_test_witness_coarse_cache.py | os-climate/witness-core | 1 | 12768389 | <reponame>os-climate/witness-core
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from climateeconomics.sos_processes.iam.witness.witness_coarse_optim_process.usecase_witness_optim_invest_distrib import Study as witness_proc_usecase
import unittest
from energy_models.core.energy_study_manager import DEFAULT_COARSE_TECHNO_DICT
from energy_models.core.energy_process_builder import INVEST_DISCIPLINE_OPTIONS
from gemseo.utils.compare_data_manager_tooling import compare_dict
class WitnessCoarseCache(unittest.TestCase):
def test_01_cache_on_witness_coarse_optim_with_unconverged_mda(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
builder = self.ee.factory.get_builder_from_process(
'climateeconomics.sos_processes.iam.witness', 'witness_coarse_optim_process', techno_dict=DEFAULT_COARSE_TECHNO_DICT,
invest_discipline=INVEST_DISCIPLINE_OPTIONS[1])
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
usecase = witness_proc_usecase(
bspline=True, execution_engine=self.ee, techno_dict=DEFAULT_COARSE_TECHNO_DICT, invest_discipline=INVEST_DISCIPLINE_OPTIONS[1])
usecase.study_name = self.name
values_dict = usecase.setup_usecase()
full_values_dict = {}
for dict_v in values_dict:
full_values_dict.update(dict_v)
algo_options = {"ftol_rel": 3e-16,
"normalize_design_space": False,
"maxls": 2 * 55,
"maxcor": 55,
"pg_tol": 1.e-8,
"max_iter": 2,
"disp": 110}
full_values_dict['Test.WITNESS_MDO.algo_options'] = algo_options
full_values_dict['Test.WITNESS_MDO.WITNESS_Eval.warm_start'] = False
full_values_dict['Test.WITNESS_MDO.max_iter'] = 1
full_values_dict['Test.WITNESS_MDO.WITNESS_Eval.sub_mda_class'] = 'MDAGaussSeidel'
full_values_dict['Test.WITNESS_MDO.WITNESS_Eval.max_mda_iter'] = 1
# execute optim without cache and retrieve dm
self.ee.load_study_from_input_dict(full_values_dict)
self.ee.execute()
dm_without_cache = self.ee.dm.get_data_dict_values()
# execute optim with SimpleCache and retrieve dm
self.ee2 = ExecutionEngine(self.name)
builder = self.ee2.factory.get_builder_from_process(
'climateeconomics.sos_processes.iam.witness', 'witness_coarse_optim_process', techno_dict=DEFAULT_COARSE_TECHNO_DICT,
invest_discipline=INVEST_DISCIPLINE_OPTIONS[1])
self.ee2.factory.set_builders_to_coupling_builder(builder)
self.ee2.configure()
for cache_type_key in self.ee2.dm.get_all_namespaces_from_var_name('cache_type'):
full_values_dict[cache_type_key] = 'SimpleCache'
self.ee2.load_study_from_input_dict(full_values_dict)
self.ee2.execute()
dm_with_simple_cache = self.ee2.dm.get_data_dict_values()
# remove cache_type keys from dm_with_simple_cache and dm_without_cache
for cache_type_key in self.ee.dm.get_all_namespaces_from_var_name('cache_type') + self.ee.dm.get_all_namespaces_from_var_name('residuals_history'):
dm_with_simple_cache.pop(cache_type_key)
dm_without_cache.pop(cache_type_key)
optim_output_df_simple_cache = dm_with_simple_cache.pop(
'Test.WITNESS_MDO.WITNESS_Eval.FunctionsManager.optim_output_df')
optim_output_df_simple_cache = optim_output_df_simple_cache.iloc[-1].drop(
'iteration')
optim_output_df_without_cache = dm_without_cache.pop(
'Test.WITNESS_MDO.WITNESS_Eval.FunctionsManager.optim_output_df')
optim_output_df_without_cache = optim_output_df_without_cache.iloc[-1].drop(
'iteration')
# compare values in dm_with_simple_cache and dm_without_cache
dict_error = {}
compare_dict(dm_with_simple_cache,
dm_without_cache, '', dict_error)
self.assertDictEqual(dict_error, {})
self.assertTrue(optim_output_df_simple_cache.equals(
optim_output_df_without_cache))
def test_02_cache_on_witness_coarse_optim(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
builder = self.ee.factory.get_builder_from_process(
'climateeconomics.sos_processes.iam.witness', 'witness_coarse_optim_process', techno_dict=DEFAULT_COARSE_TECHNO_DICT,
invest_discipline=INVEST_DISCIPLINE_OPTIONS[1])
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
usecase = witness_proc_usecase(
bspline=True, execution_engine=self.ee, techno_dict=DEFAULT_COARSE_TECHNO_DICT, invest_discipline=INVEST_DISCIPLINE_OPTIONS[1])
usecase.study_name = self.name
values_dict = usecase.setup_usecase()
full_values_dict = {}
for dict_v in values_dict:
full_values_dict.update(dict_v)
algo_options = {"ftol_rel": 3e-16,
"normalize_design_space": False,
"maxls": 2 * 55,
"maxcor": 55,
"pg_tol": 1.e-8,
"max_iter": 2,
"disp": 110}
full_values_dict['Test.WITNESS_MDO.algo_options'] = algo_options
full_values_dict['Test.WITNESS_MDO.WITNESS_Eval.warm_start'] = False
full_values_dict['Test.WITNESS_MDO.max_iter'] = 2
full_values_dict['Test.WITNESS_MDO.WITNESS_Eval.sub_mda_class'] = 'MDAGaussSeidel'
full_values_dict['Test.WITNESS_MDO.WITNESS_Eval.max_mda_iter'] = 10
# execute optim without cache and retrieve dm
self.ee.load_study_from_input_dict(full_values_dict)
self.ee.execute()
dm_without_cache = self.ee.dm.get_data_dict_values()
# execute optim with SimpleCache and retrieve dm
self.ee2 = ExecutionEngine(self.name)
builder = self.ee2.factory.get_builder_from_process(
'climateeconomics.sos_processes.iam.witness', 'witness_coarse_optim_process', techno_dict=DEFAULT_COARSE_TECHNO_DICT,
invest_discipline=INVEST_DISCIPLINE_OPTIONS[1])
self.ee2.factory.set_builders_to_coupling_builder(builder)
self.ee2.configure()
for cache_type_key in self.ee2.dm.get_all_namespaces_from_var_name('cache_type'):
full_values_dict[cache_type_key] = 'SimpleCache'
self.ee2.load_study_from_input_dict(full_values_dict)
self.ee2.execute()
dm_with_simple_cache = self.ee2.dm.get_data_dict_values()
# remove cache_type keys from dm_with_simple_cache and dm_without_cache
for cache_type_key in self.ee.dm.get_all_namespaces_from_var_name('cache_type') + self.ee.dm.get_all_namespaces_from_var_name('residuals_history'):
dm_with_simple_cache.pop(cache_type_key)
dm_without_cache.pop(cache_type_key)
optim_output_df_simple_cache = dm_with_simple_cache.pop(
'Test.WITNESS_MDO.WITNESS_Eval.FunctionsManager.optim_output_df')
optim_output_df_simple_cache = optim_output_df_simple_cache.iloc[-1].drop(
'iteration')
optim_output_df_without_cache = dm_without_cache.pop(
'Test.WITNESS_MDO.WITNESS_Eval.FunctionsManager.optim_output_df')
optim_output_df_without_cache = optim_output_df_without_cache.iloc[-1].drop(
'iteration')
# compare values in dm_with_simple_cache and dm_without_cache
dict_error = {}
compare_dict(dm_with_simple_cache,
dm_without_cache, '', dict_error)
self.assertDictEqual(dict_error, {})
self.assertTrue(optim_output_df_simple_cache.equals(
optim_output_df_without_cache))
if '__main__' == __name__:
cls = WitnessCoarseCache()
# cls.test_01_cache_on_witness_coarse_optim_with_unconverged_mda()
cls.test_02_cache_on_witness_coarse_optim()
| 1.507813 | 2 |
main.py | jack-cummings/cleanmytweets | 0 | 12768390 | import tweepy
import pandas as pd
import re
import json
import os
import datetime
import stripe
import time
from fastapi import FastAPI, Request, BackgroundTasks, Response, Cookie
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
from fastapi.responses import RedirectResponse
from typing import Optional
from sqlalchemy import create_engine
## Configs
if os.environ['MODE'] == 'dev':
import uvicorn
if os.environ['STRIPE_MODE'] == 'prod':
stripe.api_key = os.environ['STRIPE_KEY_PROD']
price = "price_1L0We3CsKWtKuHp02UYDbhBF"
else:
stripe.api_key = os.environ['STRIPE_KEY_DEV']
price = "price_1KeQ1PCsKWtKuHp0PIYQ1AnH"
# if os.environ['PAY_MODE'] == 'pay':
# return_path = "create-checkout-session"
# else:
# return_path = 'free_mode'
def HtmlIntake(path):
with open(path) as f:
lines = f.readlines()
return ''.join(lines)
def loadWords(mode):
f = open("references/profane_words.json", 'r')
bad_words = json.load(f)
bad_words_pattern = ' | '.join(bad_words)
return bad_words_pattern, bad_words
def flagDFProces(df):
df['Profane Words'] = df['Text'].apply(lambda x: ' , '.join(re.findall(bad_words_pattern, x)))
df['occurance'] = df['Profane Words'].apply(lambda x: 1 if len(x) > 0 else 0)
df['Date'] = df['date_full'].apply(lambda x: datetime.datetime.date(x))
return df
def inituserOauth(basepath):
oauth2_user_handler = tweepy.OAuth2UserHandler(
client_id=os.getenv('CLIENT_ID'),
redirect_uri=f'{basepath}/return-get',
scope=["tweet.read", "tweet.write", "users.read"],
# Client Secret is only necessary if using a confidential client
client_secret=os.getenv('CLIENT_SECRET'))
return oauth2_user_handler
def setBasePath(mode):
if mode.lower() == 'dev':
basepath = 'http://0.0.0.0:4242'
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
elif mode.lower() == 'prod':
basepath = "https://www.cleanmytweets.com"
# basepath = 'https://cleanmytweets.herokuapp.com'
return basepath
def getTweets(user_id, client, username):
# Collect user timeline
twitter_client = client
tweets_out = []
for tweet in tweepy.Paginator(twitter_client.get_users_tweets, id=user_id,
tweet_fields=['id', 'text', 'created_at'], max_results=100).flatten(limit=3000):
tweets_out.append([tweet.id, tweet.text, tweet.created_at])
timeline_df = pd.DataFrame(tweets_out, columns=['Delete?', 'Text', 'date_full'])
# Run scan for flag words
out_df = flagDFProces(timeline_df)
total_count = out_df.shape[0]
prof_df = pd.DataFrame(out_df[out_df['occurance'] == 1])
prof_df['Text'] = prof_df['Text'].apply(lambda x: x.encode('utf-8', 'ignore'))
prof_df['username'] = username
prof_df['total_count'] = total_count
# Check length of prof_df
if len(prof_df) == 0:
prof_df.loc[1] = [0, "Great work, we've found no controversial tweets in your timeline!",
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S+00:00'), ' ', 1,
datetime.datetime.now().strftime('%Y-%m-%d'), username, 0]
user_df = pd.DataFrame([[username, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S+00:00')]],
columns=['Name', 'Insert_DT'])
# write to sql
prof_df.to_sql('tweets', con=db_engine, if_exists='append') # 'replace'
user_df.to_sql('users', con=db_engine, if_exists='append')
print('Processing Complete')
# initialization
mode = os.environ['MODE']
bad_words_pattern, bad_words = loadWords(mode)
# init DB
db_engine = create_engine(os.environ['DB_URL'], echo=False)
app = FastAPI()
basepath = setBasePath(mode)
oauth2_handler = inituserOauth(basepath)
app.auth = oauth2_handler
templates = Jinja2Templates(directory='templates/jinja')
@app.get("/")
async def home(request: Request):
try:
authorization_url = app.auth.get_authorization_url()
return templates.TemplateResponse('index_j.html', {"request": request, "user_auth_link": authorization_url})
except:
return templates.TemplateResponse('error.html', {"request": request})
@app.get('/return-get', response_class=RedirectResponse)
async def results(request: Request, background_tasks: BackgroundTasks):
try:
access_token = app.auth.fetch_token(str(request.url))
client = tweepy.Client(access_token['access_token'])
except Exception as e:
print(e)
print(request.url)
return templates.TemplateResponse('auth_failed.html', {"request": request})
user = client.get_me(user_auth=False)
username = user.data.username
user_id = user.data.id
# response.set_cookie(key="user_id", value=user_id)
response = RedirectResponse(url="/return-get_2")
response.set_cookie("username", str(username))
response.set_cookie(key="access_token", value=access_token['access_token'])
# Begin Timeline scrape
print(f'beginning scrape: {username}')
background_tasks.add_task(getTweets, user_id=user_id, client=client, username=username)
return response
@app.get('/return-get_2')
async def results(request: Request, username: Optional[str] = Cookie(None)):
return templates.TemplateResponse('account_val.html', {"request": request, "user": username,
"pc_msg": ''})
@app.post("/checkout")
async def userInput(request: Request, username: Optional[str] = Cookie(None)):
try:
# Collect User Input
body = await request.body()
inputPC = body.decode('UTF-8').split('=')[1].strip()
approvedPCs = os.environ['PROMO_CODES'].split(',')
# Check if promocode entered
if len(inputPC) > 0:
if inputPC in approvedPCs:
return templates.TemplateResponse('payment_val.html', {"request": request, "user": Cookie('user')})
else:
return templates.TemplateResponse('account_val.html', {"request": request, "user": username,
"pc_msg": 'Incorrect promocode. Please try again.'})
# If no promocode, then stripe checkout
else:
checkout_session = stripe.checkout.Session.create(
success_url=basepath + "/success?session_id={CHECKOUT_SESSION_ID}",
cancel_url=basepath,
payment_method_types=["card"],
mode="payment",
line_items=[{
"price": price,
"quantity": 1
}], )
return RedirectResponse(checkout_session.url, status_code=303)
except Exception as e:
print(e)
return templates.TemplateResponse('error.html', {"request": request})
@app.get("/success")
async def success(request: Request):
return templates.TemplateResponse('payment_val.html', {"request": request, "user": Cookie('user')})
@app.get("/free_mode")
async def success(request: Request):
return templates.TemplateResponse('free_mode.html', {"request": request})
@app.get("/learn_more")
async def read(request: Request, response: Response, ):
return templates.TemplateResponse('learn_more.html', {"request": request})
@app.get('/create-checkout-session')
async def create_checkout_session(request: Request):
checkout_session = stripe.checkout.Session.create(
success_url=basepath + "/success?session_id={CHECKOUT_SESSION_ID}",
cancel_url=basepath,
payment_method_types=["card"],
mode="payment",
line_items=[{
"price": price,
"quantity": 1
}],
)
return RedirectResponse(checkout_session.url, status_code=303)
@app.get("/scan_tweets")
async def scan_tweets(request: Request, username: Optional[str] = Cookie(None)):
# pull rows
query = (f"""
SELECT *
FROM tweets
WHERE username = '{username}'""")
df = pd.read_sql_query(query, db_engine)
# delete from DB
db_engine.execute(f"DELETE FROM tweets WHERE username = '{username}'")
try:
df['Text'] = df['Text'].apply(lambda x: bytes.fromhex(x[2:]).decode('utf-8'))
except ValueError:
pass
df = df.drop_duplicates()
check_box = r"""<input type="checkbox" id="\1" name="tweet_id" value="\1">
<label for="\1"> </label><br>"""
out_table_html = str(re.sub(r'(\d{18,19})', check_box,
df.drop(columns=['date_full', 'occurance', 'username', 'total_count', 'index'],
axis=1).to_html(index=False).replace(
'<td>', '<td align="center">').replace(
'<tr style="text-align: right;">', '<tr style="text-align: center;">').replace(
'<table border="1" class="dataframe">', '<table class="table">')))
return templates.TemplateResponse('returnPage_j.html', {"request": request,
"p_count": str(df.shape[0]),
'table': out_table_html,
'total_count': str(df['total_count'].values[0]),
'user': username})
try:
tc = str(df['total_count'].values[0])
except:
tc = str(0)
try:
return templates.TemplateResponse('returnPage_j.html', {"request": request,
"p_count": str(df.shape[0]),
'table': out_table_html,
'total_count': tc,
'user': Cookie('user')})
except:
return templates.TemplateResponse('error.html', {"request": request})
@app.post('/selectTweets')
async def selectTweets(request: Request, access_token: Optional[str] = Cookie(None)):
try:
client = tweepy.Client(access_token)
body = await request.body()
values = body.decode("utf-8").replace('tweet_id=', '').split(',')
if values == [""]:
pass
elif len(values) < 17:
delete_failed_flag = False
for v in values:
try:
twitter_client = client
twitter_client.delete_tweet(v, user_auth=False)
except:
delete_failed_flag = True
if delete_failed_flag:
return templates.TemplateResponse('delete_failed.html', {'request': request})
else:
return templates.TemplateResponse('Tweets_deleted.html', {'request': request,
'count': str(len(values))})
elif len(values) >= 17:
return templates.TemplateResponse('over_15.html', {'request': request})
except:
return templates.TemplateResponse('error.html', {"request": request})
if __name__ == '__main__':
if os.environ['MODE'] == 'dev':
uvicorn.run(app, port=4242, host='0.0.0.0')
| 2.1875 | 2 |
src/PlugIns/PE/ResourceEntriesPlug.py | codexgigassys/codex-backend | 161 | 12768391 | <gh_stars>100-1000
# Copyright (C) 2016 <NAME>.
# This file is part of CodexGigas - https://github.com/codexgigassys/
# See the file 'LICENSE' for copying permission.
from PlugIns.PlugIn import PlugIn
from Modules.PEFileModule import PEFileModule
import pefile
from Utils.InfoExtractor import *
class ResourceEntriesPlug(PlugIn):
def __init__(self, sample=None):
PlugIn.__init__(self, sample)
def getPath(self):
return "particular_header.res_entries"
def getName(self):
return "res_entries"
def getVersion(self):
return 6
def process(self):
pelib = self._getLibrary(PEFileModule().getName())
if(pelib is None):
return ""
ret = []
if hasattr(pelib, 'DIRECTORY_ENTRY_RESOURCE'):
i = 0
for resource_type in pelib.DIRECTORY_ENTRY_RESOURCE.entries:
if resource_type.name is not None:
name = "%s" % resource_type.name
else:
name = "%s" % pefile.RESOURCE_TYPE.get(
resource_type.struct.Id)
if name is None:
name = "%d" % resource_type.struct.Id
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
for resource_lang in resource_id.directory.entries:
try:
data = pelib.get_data(
resource_lang.data.struct.OffsetToData, resource_lang.data.struct.Size)
# fd=open(name,'wb')
# fd.write(data)
# (data)
except pefile.PEFormatError:
return "corrupt"
filetype = MIME_TYPE(data, False)
lang = pefile.LANG.get(
resource_lang.data.lang, 'unknown')
sublang = pefile.get_sublang_name_for_lang(
resource_lang.data.lang, resource_lang.data.sublang)
entry = {}
entry["name"] = self._normalize(name)
entry["rva"] = self._normalize(
hex(resource_lang.data.struct.OffsetToData))
entry["size"] = self._normalize(
hex(resource_lang.data.struct.Size))
entry["type"] = self._normalize(filetype)
entry["lang"] = self._normalize(lang)
entry["sublang"] = self._normalize(sublang)
entry["sha1"] = SHA1(data)
ret.append(entry)
return ret
| 1.976563 | 2 |
Aula 19/ex095.py | alaanlimaa/Python_CVM1-2-3 | 0 | 12768392 | <reponame>alaanlimaa/Python_CVM1-2-3
from random import randint
jogador = {}
listagols = []
time = []
while True:
jogador['Nome'] = str(input('Nome: ')).title()
jogos = int(input('Quantidades de partidas: '))
somagols = 0
for p in range(1, jogos + 1):
gols = randint(1, 3)
listagols.append(gols)
somagols += gols
print(f' Na partida {p} fez {gols} gols!')
jogador['Gols'] = listagols[:]
jogador['Total'] = somagols
time.append(jogador.copy())
listagols.clear()
resp = ' '
while resp not in 'SN':
resp = str(input('Deseja continuar? [S/N]: ')).strip().upper()[0]
if resp == 'N':
break
print('-=' * 30)
print('Cod. ', end=' ')
for i in jogador.keys():
print(f'{i:<15}', end='')
print()
print('--' * 30)
for i, v in enumerate(time):
print(f'{i:<3} ', end='')
for d in v.values():
print(f'{str(d):<15} ', end='')
print()
print('==== DADOS INDIVIDUAIS ===')
while True:
busca = int(input('Qual jogador deseja avaliar? [999 break]: '))
if busca == 999:
break
if busca > len(time):
print('ERRO! Jogador não existe!!!')
else:
print(f'==== JOGADOR {time[busca]["Nome"]}:')
for i, g in enumerate(time[busca]['Gols']):
print(f' - Na partida {i+1} fez {g} gols! ')
print('PROGRAMA FINALIZADO')
| 3.671875 | 4 |
main.py | Pyzekiel/Pygame-Line-Graph | 0 | 12768393 | <reponame>Pyzekiel/Pygame-Line-Graph<filename>main.py<gh_stars>0
import pygame
import json
import asset
import random
with open('settings.json', 'r') as src:
prp = json.load(src)
pygame.init()
screen_size = screen_width, screen_height = prp['window']['screen']['width'], prp['window']['screen']['height']
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption(prp['window']['caption'])
# Objects
container = asset.cnr(screen_width/2, screen_height/2, screen_width, screen_height, 'black')
pointer = asset.makeLine(container.x, container.y, 3, container.h, 'red')
# pointY = asset.makeLine(container.x, container.y, container.w, 3, 'green')
if prp['graph']['debug']:
pointY = asset.makeLine(container.x, container.y, container.w, 3, 'green')
else:
pointY = asset.makeLine(container.x, container.y, container.w, 0, container.c)
# Groups
objects = pygame.sprite.Group()
objects.add(container)
objects.add(pointer)
objects.add(pointY)
# Add Objects ^There
lx = pointer.rect.x-container.x+pointer.w/2
ly = pointY.rect.y-container.y+pointY.h/2
steps = container.w/prp['graph']['steps']
tpsY = prp['graph']['tpsY']
colorList = prp['graph']['colors']
cc = random.choice(colorList)
ticker = prp['graph']['TPS']
running = True
while running:
screen.fill(prp['window']['screen']['color'])
objects.draw(screen)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
if pointer.rect.x >= container.x + container.w-pointer.w:
pointer.rect.x -= container.w
cc = random.choice(colorList)
container.clear()
lx = 0
ly = random.randint(container.y, container.y+container.h)
if tpsY <= 1:
pointY.rect.y = random.randint(container.y, container.y+container.h)
tpsY = prp['graph']['tpsY']
else:
tpsY -= 1
if ticker <= 1:
pygame.draw.circle(container.image, cc, (pointer.rect.x-container.x+26, pointY.rect.y-container.y+pointY.h/2), 3)
pygame.draw.line(container.image, cc, (lx, ly), (pointer.rect.x-container.x+26, pointY.rect.y-container.y+pointY.h/2), prp['graph']['lineWidth'])
lx = pointer.rect.x-container.x+26
ly = pointY.rect.y-container.y+pointY.h/2
pointer.move(steps, 0)
pygame.display.set_caption(f"Value: {pointY.rect.y-container.y+pointY.h/2}")
ticker = prp['graph']['TPS']
else:
ticker -= 1
pygame.display.flip()
pygame.quit()
exit()
| 2.78125 | 3 |
setup.py | Zwork101/engage | 0 | 12768394 | <reponame>Zwork101/engage
from setuptools import setup
setup(
name="engage",
description="Sphinx autodoc templater",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/Zwork101/engage",
license="MIT",
packages=['engage'],
version="1.0.0",
install_requires=["astor"]
)
| 1.03125 | 1 |
PART06/12_df_merge.py | arti1117/python-machine-learning-pandas-data-analytics | 1 | 12768395 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 11 22:19:36 2020
@author: arti
"""
import pandas as pd
pd.set_option('display.max_columns', 10)
pd.set_option('display.max_colwidth', 20)
pd.set_option('display.unicode.east_asian_width', True)
df1 = pd.read_excel('./stock price.xlsx')
df2 = pd.read_excel('./stock valuation.xlsx')
print(df1.head())
print('--')
print(df2.head())
print('--')
merge_inner = pd.merge(df1, df2)
print(merge_inner)
print('--')
merge_outer = pd.merge(df1, df2, how='outer', on='id')
print(merge_outer)
print('--')
merge_left = pd.merge(df1, df2, how='left',
left_on='stock_name', right_on='name')
print(merge_left)
print('--')
merge_right = pd.merge(df1, df2, how='right',
left_on='stock_name', right_on='name')
print(merge_right)
print('--')
price = df1[df1['price'] < 50000]
print(price.head())
print('--')
value = pd.merge(price, df2)
print(value)
| 3.0625 | 3 |
utils/utils.py | bvy007/vae | 92 | 12768396 | <reponame>bvy007/vae
import os
import numpy as np
import theano
import theano.tensor as T
### Misc ###
def sharedX(value, name=None, borrow=True, keep_on_cpu=False):
""" Transform value into a shared variable of type floatX """
if keep_on_cpu:
return T._shared(theano._asarray(value, dtype=theano.config.floatX),name=name, borrow=borrow)
return theano.shared(theano._asarray(value, dtype=theano.config.floatX), name=name, borrow=borrow)
def mkdirs(path):
try:
os.makedirs(path)
except:
pass
return path
### Weights initializers ###
def guess_init_scale(shape):
""" Provides appropriate scale for initialization of the weights. """
if len(shape) == 2:
# For feedforward networks (see http://deeplearning.net/tutorial/mlp.html#going-from-logistic-regression-to-mlp)
return np.sqrt(6. / (shape[0] + shape[1]))
elif len(shape) == 4:
# For convnet (see http://deeplearning.net/tutorial/lenet.html)
fan_in = np.prod(shape[1:])
fan_out = shape[0] * np.prod(shape[2:])
return np.sqrt(6. / (fan_in + fan_out))
else:
raise ValueError("Don't know what to do in this case!")
def init_params_zeros(shape=None, values=None, name=None):
if values is None:
values = np.zeros(shape, dtype=theano.config.floatX)
return theano.shared(value=values, name=name)
def init_params_randn(rng, shape=None, sigma=0.01, values=None, name=None):
if sigma is None:
sigma = guess_init_scale(shape)
if values is None:
values = sigma * rng.randn(*shape)
return sharedX(values, name=name)
def init_params_uniform(rng, shape=None, scale=None, values=None, name=None):
if scale is None:
scale = guess_init_scale(shape)
if values is None:
values = rng.uniform(-scale, scale, shape)
return sharedX(values, name=name)
def init_params_orthogonal(rng, shape=None, values=None, name=None):
if values is None:
# initialize w/ orthogonal matrix. code taken from:
# https://github.com/mila-udem/blocks/blob/master/blocks/initialization.py
M = np.asarray(rng.standard_normal(size=shape), dtype=theano.config.floatX)
Q, R = np.linalg.qr(M)
values = Q * np.sign(np.diag(R)) * 0.01
return sharedX(values, name=name)
| 2.59375 | 3 |
JR.py | samsalemi/OpenSim-Python-Simulation | 2 | 12768397 | # August 21st 2018
# Author: <NAME>
# University of Guelph Masters Graduate
# This module is an OpenSim tool created for Static optimization and Computed Muscle Control data to achieve Joint Reaction forces and loads in model
def run(setup,resultsDirectory):
import os
import re
import shutil
import opensim as osim
import directories
allDir = list(directories.main(directories))
paramsDir = allDir[1]
subID = allDir[4]
subResultsDir = allDir[5]
# ikResultsDir = allDir[6]
# idResultsDir = allDir[7]
# soResultsDir = allDir[8]
# cmcResultsDir = allDir[10]
# jrResultsDir = allDir[11]
# # actuatorFile = paramsDir + "/soActuators.xml"
# # genericSetupSO = paramsDir + "/" + "setupSO.xml"
# ikFileName = "subject01_walk1_ik.mot"
# ikFile = ikResultsDir + "/" + ikFileName
# # soForces = soResultsDir + "/" + "subject01_walk1_StaticOptimization_force.sto"
# if os.path.exists(jrResultsDir):
# shutil.rmtree(jrResultsDir, ignore_errors=True)
# if not os.path.exists(jrResultsDir):
# os.mkdir(jrResultsDir)
# # Load Model
aModel = osim.Model(subResultsDir + "/" + subID + ".osim")
# # initialize system
aModel.initSystem()
# # Initialize External Loads File from Generic File
# extLoads = idResultsDir + "/subject01_walk1_extLoads.xml"
# # Get .mot data to determine time range
# motCoordsData = osim.Storage(ikFile)
# # Get initial and final time
# initial_time = motCoordsData.getFirstTime()
# final_time = motCoordsData.getLastTime()
# Analyze Tool Setup for Static Optimization
analyzeTool = osim.AnalyzeTool(setup)
analyzeTool.setModel(aModel)
analyzeTool.setResultsDir(resultsDirectory)
analyzeTool.run()
# analyzeTool = osim.AnalyzeTool(cmcJrSetup)
# analyzeTool.setExternalLoadsFileName(extLoads)
# analyzeTool.setInitialTime(initial_time)
# analyzeTool.setFinalTime(final_time)
# analyzeTool.setLowpassCutoffFrequency(6)
# analyzeTool.setOutputPrecision(20)
# myForceSet = osim.ForceSet(aModel, actuatorFile)
# for i in range(myForceSet.getSize()):
# aModel.updForceSet().append(myForceSet.get(i))
# print(aModel.getForceSet().getSize())
# analysisSet = analyzeTool.getAnalysisSet()
# myForceSetArray = analyzeTool.getForceSetFiles()
# myForceSetArray.set(0, "")
# analyzeTool.setReplaceForceSet(False)
# analyzeTool.setForceSetFiles(myForceSetArray)
# # Joint Reaction Analysis
# jrTool = osim.JointReaction(jrSetup)
# analysisSet.cloneAndAppend(jrTool)
# # Set coordinates
# coordtype = "mot"
# if coordtype == "mot":
# analyzeTool.setStatesFileName("")
# analyzeTool.setCoordinatesFileName(ikFile)
# elif coordtype == "states":
# analyzeTool.setStatesFileName(ikFile)
# analyzeTool.setCoordinatesFileName("")
# analyzeTool.verifyControlsStates()
# analyzeTool.setResultsDir(jrResultsDir)
# # analyzeTool.printToXML(paramsDir +"/setupJR.xml")
# analyzeTool.run()
return()
os.system('cls' if os.name == 'nt' else 'clear')
| 2.15625 | 2 |
rlgym/envs/__init__.py | syKevinPeng/rocket-league-gym | 63 | 12768398 | from .environment import Environment
from .match import Match | 1.171875 | 1 |
novelsave/migrations/commands.py | mHaisham/novelsave | 15 | 12768399 | from pathlib import Path
from alembic.command import upgrade
from alembic.config import Config
def make_config(dir_: Path, url_: str, config_='alembic.ini'):
"""
:param dir_: migrations script directory
:param url_: sqlalchemy database url
:param config_: config
:return:
"""
# retrieves config file path
config_file = dir_ / config_
config = Config(file_=config_file)
config.set_main_option('script_location', str(dir_))
config.set_main_option('sqlalchemy.url', url_)
config.attributes['configure_logger'] = False
return config
def migrate(url: str):
config = make_config(Path(__file__).parent, url, 'alembic.ini')
# upgrade the database to the latest revision
upgrade(config, 'head')
| 2.59375 | 3 |
powParse.py | johnrobisoniv/powDayTexter | 0 | 12768400 | # Opens file with OTS.com's "resBox" table data created by powScrape.py
# Parse through data - append relevant data and resort to dictionary?
# Save dictionary as data file named resData-DATE.txt
| 2.140625 | 2 |
tests/test_api.py | billyweinberger/nightfall-python-sdk | 1 | 12768401 | import json
import os
from freezegun import freeze_time
import pytest
import responses
import time
from nightfall.api import Nightfall, NightfallUserError
from nightfall.detection_rules import DetectionRule, Detector, LogicalOp, Confidence, ExclusionRule, ContextRule, \
WordList, MatchType, RedactionConfig, MaskConfig, Regex
from nightfall.findings import Finding, Range
@pytest.fixture
def nightfall():
yield Nightfall(os.environ['NIGHTFALL_API_KEY'])
@pytest.mark.integration
def test_scan_text_detection_rules_v3(nightfall):
result, redactions = nightfall.scan_text(
["4916-6734-7572-5015 is my credit card number, 489-36-8350 ssn"],
detection_rules=[
DetectionRule(logical_op=LogicalOp.ANY, detectors=[
Detector(min_confidence=Confidence.LIKELY,
min_num_findings=1,
display_name="Credit Card Number",
nightfall_detector="CREDIT_CARD_NUMBER",
context_rules=[ContextRule(regex=Regex("fake regex", is_case_sensitive=False),
window_before=10, window_after=10,
fixed_confidence=Confidence.VERY_UNLIKELY)],
exclusion_rules=[ExclusionRule(MatchType.FULL,
word_list=WordList(["never", "match"],
is_case_sensitive=True))],
redaction_config=RedactionConfig(remove_finding=False,
mask_config=MaskConfig(masking_char='👀',
num_chars_to_leave_unmasked=3,
chars_to_ignore=["-"])),
),
Detector(min_confidence=Confidence.LIKELY, nightfall_detector="US_SOCIAL_SECURITY_NUMBER")])],
context_bytes=10,
default_redaction_config=RedactionConfig(remove_finding=False, substitution_phrase="[REDACTED]")
)
assert len(result) == 1
assert len(result[0]) == 2
def finding_orderer(f):
return f.codepoint_range.start
result[0].sort(key=finding_orderer)
assert result[0][0] == Finding(
"4916-6734-7572-5015",
"491👀-👀👀👀👀-👀👀👀👀-👀👀👀👀",
None, " is my cre",
"Credit Card Number",
result[0][0].detector_uuid,
Confidence.VERY_LIKELY,
Range(0, 19), Range(0, 19), "",
[], ["Inline Detection Rule #1"])
assert result[0][1] == Finding(
"489-36-8350",
"[REDACTED]",
"d number, ", " ssn",
"",
result[0][1].detector_uuid,
Confidence.VERY_LIKELY,
Range(46, 57), Range(46, 57), "",
[], ["Inline Detection Rule #1"])
assert len(redactions) == 1
assert redactions[0] == "491👀-👀👀👀👀-👀👀👀👀-👀👀👀👀 is my credit card number, [REDACTED] ssn"
@pytest.mark.filetest
@pytest.mark.integration
def test_scan_file_detection_rules(nightfall, tmpdir):
file = tmpdir.mkdir("test_data").join("file.txt")
file.write("4916-6734-7572-5015 is my credit card number")
id, message = nightfall.scan_file(
file,
os.environ['WEBHOOK_ENDPOINT'],
detection_rules=[DetectionRule(logical_op=LogicalOp.ANY, detectors=[
Detector(min_confidence=Confidence.LIKELY, min_num_findings=1,
display_name="Credit Card Number", nightfall_detector="CREDIT_CARD_NUMBER")])]
)
assert id is not None
assert message == 'scan initiated'
@responses.activate
def test_scan_text():
nightfall = Nightfall("NF-NOT_REAL")
responses.add(responses.POST, 'https://api.nightfall.ai/v3/scan',
json={
"findings":
[
[
{
"finding": "4916-6734-7572-5015",
"redactedFinding": "491👀-👀👀👀👀-👀👀👀👀-👀👀👀👀",
"afterContext": " is my cre",
"detector":
{
"name": "Credit Card Number",
"uuid": "74c1815e-c0c3-4df5-8b1e-6cf98864a454"
},
"confidence": "VERY_LIKELY",
"location":
{
"byteRange":
{
"start": 0,
"end": 19
},
"codepointRange":
{
"start": 0,
"end": 19
}
},
"redactedLocation":
{
"byteRange":
{
"start": 0,
"end": 19
},
"codepointRange":
{
"start": 0,
"end": 19
}
},
"matchedDetectionRuleUUIDs":
[],
"matchedDetectionRules":
[
"Inline Detection Rule #1"
]
},
{
"finding": "489-36-8350",
"redactedFinding": "[REDACTED]",
"beforeContext": "d number, ",
"afterContext": " ssn",
"detector":
{
"name": "",
"uuid": "e30d9a87-f6c7-46b9-a8f4-16547901e069"
},
"confidence": "VERY_LIKELY",
"location":
{
"byteRange":
{
"start": 46,
"end": 57
},
"codepointRange":
{
"start": 46,
"end": 57
}
},
"redactedLocation":
{
"byteRange":
{
"start": 46,
"end": 56
},
"codepointRange":
{
"start": 46,
"end": 56
}
},
"matchedDetectionRuleUUIDs":
[],
"matchedDetectionRules":
[
"Inline Detection Rule #1"
]
}
]
],
"redactedPayload":
[
"491👀-👀👀👀👀-👀👀👀👀-👀👀👀👀 is my credit card number, [REDACTED] ssn"
]
})
result, redactions = nightfall.scan_text(
["4916-6734-7572-5015 is my credit card number, 489-36-8350 ssn"],
detection_rules=[
DetectionRule(logical_op=LogicalOp.ANY, detectors=[
Detector(min_confidence=Confidence.LIKELY,
min_num_findings=1,
display_name="Credit Card Number",
nightfall_detector="CREDIT_CARD_NUMBER",
context_rules=[ContextRule(regex=Regex("fake regex", is_case_sensitive=False),
window_before=10, window_after=10,
fixed_confidence=Confidence.VERY_UNLIKELY)],
exclusion_rules=[ExclusionRule(MatchType.FULL,
word_list=WordList(["never", "match"],
is_case_sensitive=True))],
redaction_config=RedactionConfig(remove_finding=False,
mask_config=MaskConfig(masking_char='👀',
num_chars_to_leave_unmasked=3,
chars_to_ignore=["-"])),
),
Detector(min_confidence=Confidence.LIKELY, nightfall_detector="US_SOCIAL_SECURITY_NUMBER")])],
context_bytes=10,
default_redaction_config=RedactionConfig(remove_finding=False, substitution_phrase="[REDACTED]")
)
assert len(responses.calls) == 1
assert responses.calls[0].request.headers.get("Authorization") == "Bearer NF-NOT_REAL"
assert json.loads(responses.calls[0].request.body) == {
"payload":
[
"4916-6734-7572-5015 is my credit card number, 489-36-8350 ssn"
],
"policy":
{
"detectionRules":
[
{
"detectors":
[
{
"minConfidence": "LIKELY",
"minNumFindings": 1,
"nightfallDetector": "CREDIT_CARD_NUMBER",
"detectorType": "NIGHTFALL_DETECTOR",
"displayName": "Credit Card Number",
"contextRules":
[
{
"regex":
{
"pattern": "fake regex",
"isCaseSensitive": False
},
"proximity":
{
"windowBefore": 10,
"windowAfter": 10
},
"confidenceAdjustment":
{
"fixedConfidence": "VERY_UNLIKELY"
}
}
],
"exclusionRules":
[
{
"matchType": "FULL",
"wordList":
{
"values":
[
"never",
"match"
],
"isCaseSensitive": True
},
"exclusionType": "WORD_LIST"
}
],
"redactionConfig":
{
"removeFinding": False,
"maskConfig":
{
"maskingChar": "👀",
"numCharsToLeaveUnmasked": 3,
"maskRightToLeft": False,
"charsToIgnore":
[
"-"
]
}
}
},
{
"minConfidence": "LIKELY",
"minNumFindings": 1,
"nightfallDetector": "US_SOCIAL_SECURITY_NUMBER",
"detectorType": "NIGHTFALL_DETECTOR"
}
],
"logicalOp": "ANY"
}
],
"contextBytes": 10,
"defaultRedactionConfig":
{
"removeFinding": False,
"substitutionConfig":
{
"substitutionPhrase": "[REDACTED]"
}
}
}
}
assert len(result) == 1
assert len(result[0]) == 2
assert result[0][0] == Finding(
"4916-6734-7572-5015",
'491👀-👀👀👀👀-👀👀👀👀-👀👀👀👀',
None, " is my cre",
"Credit Card Number",
result[0][0].detector_uuid,
Confidence.VERY_LIKELY,
Range(0, 19), Range(0, 19), "",
[], ["Inline Detection Rule #1"])
assert result[0][1] == Finding(
"489-36-8350",
"[REDACTED]",
"d number, ", " ssn",
"",
result[0][1].detector_uuid,
Confidence.VERY_LIKELY,
Range(46, 57), Range(46, 57), "",
[], ["Inline Detection Rule #1"])
assert len(redactions) == 1
assert redactions[0] == "491👀-👀👀👀👀-👀👀👀👀-👀👀👀👀 is my credit card number, [REDACTED] ssn"
@responses.activate
def test_scan_text_with_policy_uuids():
nightfall = Nightfall("NF-NOT_REAL")
responses.add(responses.POST, 'https://api.nightfall.ai/v3/scan',
json={
"findings":
[
[
{
"finding": "4916-6734-7572-5015",
"redactedFinding": "491👀-👀👀👀👀-👀👀👀👀-👀👀👀👀",
"afterContext": " is my cre",
"detector":
{
"name": "Credit Card Number",
"uuid": "74c1815e-c0c3-4df5-8b1e-6cf98864a454"
},
"confidence": "VERY_LIKELY",
"location":
{
"byteRange":
{
"start": 0,
"end": 19
},
"codepointRange":
{
"start": 0,
"end": 19
}
},
"redactedLocation":
{
"byteRange":
{
"start": 0,
"end": 19
},
"codepointRange":
{
"start": 0,
"end": 19
}
},
"matchedDetectionRuleUUIDs":
["0d8efd7b-b87a-478b-984e-9cf5534a46bc"],
"matchedDetectionRules":
[]
},
]
],
"redactedPayload":
[
"491👀-👀👀👀👀-👀👀👀👀-👀👀👀👀 is my credit card number, [REDACTED] ssn"
]
})
result, redactions = nightfall.scan_text(
["4916-6734-7572-5015 is my credit card number, 489-36-8350 ssn"],
policy_uuids=["2388f83f-cd31-4689-971b-4ee94f798281"]
)
assert len(responses.calls) == 1
assert responses.calls[0].request.headers.get("Authorization") == "Bearer NF-NOT_REAL"
assert json.loads(responses.calls[0].request.body) == {
"payload":
[
"4916-6734-7572-5015 is my credit card number, 489-36-8350 ssn"
],
"policyUUIDs": ["2388f83f-cd31-4689-971b-4ee94f798281"]
}
assert len(result) == 1
assert len(result[0]) == 1
assert result[0][0] == Finding(
"4916-6734-7572-5015",
'491👀-👀👀👀👀-👀👀👀👀-👀👀👀👀',
None, " is my cre",
"Credit Card Number",
result[0][0].detector_uuid,
Confidence.VERY_LIKELY,
Range(0, 19), Range(0, 19), "",
["0d8efd7b-b87a-478b-984e-9cf5534a46bc"], [])
assert len(redactions) == 1
assert redactions[0] == "491👀-👀👀👀👀-👀👀👀👀-👀👀👀👀 is my credit card number, [REDACTED] ssn"
def test_scan_text_no_detection_rules_or_policy_uuids():
nightfall = Nightfall("NF-NOT_REAL")
with pytest.raises(NightfallUserError):
nightfall.scan_text(texts=["will", "fail"])
@responses.activate
def test_scan_file(tmpdir):
file = tmpdir.mkdir("test_data").join("file.txt")
file.write("4916-6734-7572-5015 is my credit card number")
nightfall = Nightfall("NF-NOT_REAL")
responses.add(responses.POST, 'https://api.nightfall.ai/v3/upload', status=200, json={"id": 1, "chunkSize": 22})
responses.add(responses.PATCH, 'https://api.nightfall.ai/v3/upload/1', status=204)
responses.add(responses.POST, 'https://api.nightfall.ai/v3/upload/1/finish', status=200)
responses.add(responses.POST, 'https://api.nightfall.ai/v3/upload/1/scan', status=200,
json={"id": 1, "message": "scan_started"})
id, message = nightfall.scan_file(file, "https://my-website.example/callback", detection_rule_uuids=["a_uuid"],
request_metadata="some test data")
assert len(responses.calls) == 5
for call in responses.calls:
assert call.request.headers.get("Authorization") == "Bearer NF-NOT_REAL"
assert responses.calls[0].request.body == b'{"fileSizeBytes": 44}'
assert responses.calls[1].request.body == b"4916-6734-7572-5015 is"
assert responses.calls[1].request.headers.get("X-UPLOAD-OFFSET") == '0'
assert responses.calls[2].request.body == b" my credit card number"
assert responses.calls[2].request.headers.get("X-UPLOAD-OFFSET") == '22'
assert responses.calls[4].request.body == b'{"policy": {"webhookURL": "https://my-website.example/callback", ' \
b'"detectionRuleUUIDs": ["a_uuid"]}, "requestMetadata": "some test data"}'
assert id == 1
assert message == "scan_started"
@responses.activate
def test_file_scan_upload_short(tmpdir):
file = tmpdir.mkdir("test_data").join("file.txt")
file.write("4916-6734-7572-5015 is my credit card number")
nightfall = Nightfall("NF-NOT_REAL")
responses.add(responses.PATCH, 'https://api.nightfall.ai/v3/upload/1', status=204)
assert nightfall._file_scan_upload(1, file, 200)
assert len(responses.calls) == 1
assert responses.calls[0].request.headers.get("Authorization") == "Bearer NF-NOT_REAL"
assert responses.calls[0].request.body == b"4916-6734-7572-5015 is my credit card number"
assert responses.calls[0].request.headers.get("X-UPLOAD-OFFSET") == "0"
@responses.activate
def test_file_scan_upload_long(tmpdir):
file = tmpdir.mkdir("test_data").join("file.txt")
test_str = b"4916-6734-7572-5015 is my credit card number"
file.write_binary(test_str)
responses.add(responses.PATCH, 'https://api.nightfall.ai/v3/upload/1', status=204)
nightfall = Nightfall("NF-NOT_REAL")
assert nightfall._file_scan_upload(1, file, 1)
assert len(responses.calls) == 44
for i, call in enumerate(responses.calls):
assert call.request.headers.get("Authorization") == "Bearer NF-NOT_REAL"
assert call.request.body.decode('utf-8') == test_str.decode('utf-8')[i]
assert call.request.headers.get("X-UPLOAD-OFFSET") == str(i)
@freeze_time("2021-10-04T17:30:50Z")
def test_validate_webhook(nightfall):
nightfall.signing_secret = "super-secret-shhhh"
timestamp = 1633368645
body = "hello world foo bar goodnight moon"
expected = "1bb7619a9504474ffc14086d0423ad15db42606d3ca52afccb4a5b2125d7b703"
assert nightfall.validate_webhook(expected, timestamp, body)
@freeze_time("2021-10-04T19:30:50Z")
def test_validate_webhook_too_old(nightfall):
nightfall.signing_secret = "super-secret-shhhh"
timestamp = 1633368645
body = "hello world foo bar goodnight moon"
expected = "1bb7619a9504474ffc14086d0423ad15db42606d3ca52afccb4a5b2125d7b703"
assert not nightfall.validate_webhook(expected, timestamp, body)
@freeze_time("2021-10-04T17:30:50Z")
def test_validate_webhook_incorrect_sig(nightfall):
nightfall.signing_secret = "super-secret-shhhh"
timestamp = 1633368645
body = "hello world foo bar goodnight moon"
expected = "not matching"
assert not nightfall.validate_webhook(expected, timestamp, body)
| 2.09375 | 2 |
parameters/COVID-19 Mobility Germany.py | buaacjw/Epidemic-Modeling-survey | 0 | 12768402 | import datetime
import os
import wget
from parameters.GraphData import GraphData
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def get_germany_mobility(graph_en: str):
out = 'Germany/mobility_counties_2019_baseline.csv'
url = 'https://files.de-1.osf.io/v1/resources/n53cz/providers/osfstorage/5f2d5f5c021ce20041f429f4?action=download&direct&version=1'
if not os.path.exists(out) or not os.path.exists('log/' + datetime.date.today().strftime('%Y-%m-%d') + '.germany'):
if os.path.exists(out):
os.remove(out)
wget.download(url, out)
log = open('log/' + datetime.date.today().strftime('%Y-%m-%d') + '.germany', 'w')
file = open(out)
file.readline()
data = {}
for line in file.readlines(): # day from to activity_number 1, 1, 2, 123
line_data = line[:-1].split(',')
time = line_data[0]
if time not in data:
data[time] = []
dict_temp = {
'properties': {},
'geometry': {
"type": "Point",
"coordinates": [],
}
}
dict_temp['properties']['mobility'] = line_data[3]
dict_temp['geometry']['coordinates'] = [line_data[1], line_data[2]]
data[time].append(dict_temp)
return GraphData(data, graph_en=graph_en)
def clear_germany_log():
files = []
for file in os.walk('log/'):
files = file[2]
for filename in files:
if filename.split('.')[-1] == 'germany':
os.remove('log/' + filename)
get_germany_mobility("germany")
| 2.734375 | 3 |
Day08/part1.py | NORXND/AdventOfCode2021 | 0 | 12768403 | """
Day 8 - Part 1
https://adventofcode.com/2021/day/8
By NORXND @ 08.12.2021
(C) NORXND 2021 - Under The MIT License
"""
input_file = open('Day8/input.txt', 'r')
entries = []
for entry in input_file.readlines():
entry = entry.strip().split(" | ")
patterns = entry[0].split(" ")
output = entry[1].split(" ")
entries.append({
"Patterns": patterns,
"Output": output,
})
segments = {
1: 2,
4: 4,
7: 3,
8: 7,
}
matches = []
for entry in entries:
for output in entry["Output"]:
if len(output) in segments.values():
matches.append(output)
print(len(matches)) | 2.96875 | 3 |
etk/knowledge_graph/knowledge_graph.py | donaq/etk | 77 | 12768404 | import json
from typing import Dict, List
from etk.knowledge_graph.schema import KGSchema
from etk.knowledge_graph.graph import Graph
from etk.knowledge_graph.subject import Subject
from etk.knowledge_graph.node import URI, Literal
from etk.utilities import deprecated
class KnowledgeGraph(Graph):
"""
This class is a knowledge graph object, provides API for user to construct their kg.
Add field and value to the kg object, analysis on provenance
"""
def __init__(self, schema: KGSchema, doc):
super().__init__()
self.origin_doc = doc
self.schema = schema
self._fork_namespace_manager()
@deprecated()
def add_value(self, field_name: str, value: object=None) -> None:
"""
Add a value to knowledge graph.
Input can either be a value or a json_path. If the input is json_path, the helper function _add_doc_value is
called.
If the input is a value, then it is handled
Args:
field_name: str, the field name in the knowledge graph
value: the value to be added to the knowledge graph
"""
if not self._ns.store.namespace(''):
self.bind(None, 'http://isi.edu/default-ns/')
obj = self.schema.field_type(field_name, value)
if not obj:
raise Exception() # TODO: replace with a specific Exception
self.add_triple(URI(self.origin_doc.doc_id), URI(field_name), obj)
def _find_types(self, triples):
"""
find type in root level
:param triples:
:return:
"""
types = []
for t in triples:
s, p, o = t
if self._is_rdf_type(p):
if isinstance(o, Subject):
continue
types.append(o)
return types
def add_subject(self, subjects, context=None):
if not context:
context = set([])
s_types = self._find_types(subjects)
for t in subjects:
s, p, o = t
o_types = []
if isinstance(o, Subject) and o not in context:
context.add(o)
self.add_subject(o, context)
o_types = self._find_types(o)
if self.schema.is_valid(s_types, p, o_types):
triple = self._convert_triple_rdflib((s, p, o))
self._g.add(triple)
@property
def value(self) -> Dict:
"""
Get knowledge graph object
"""
g = {}
for p, o in self._g.predicate_objects():
_, property_ = self._ns.split_uri(p)
if property_ not in g:
g[property_] = list()
g[property_].append({
'key': self.create_key_from_value(o, property_),
'value': o.toPython()
})
return g
@deprecated()
def get_values(self, field_name: str) -> List[object]:
"""
Get a list of all the values of a field.
"""
result = list()
p = self.schema.parse_field(field_name)
for o in self._g.objects(None, p):
result.append(o.toPython())
return result
def create_key_from_value(self, value, field_name: str):
key = self.schema.field_type(field_name, value)
if isinstance(key, URI):
return key
if isinstance(key, str) or isinstance(key, Literal):
key = str(key).strip().lower()
return key
def serialize(self, format='legacy', namespace_manager=None):
if format == 'legacy':
# Output DIG format
return json.dumps(self.value)
return super().serialize(format, namespace_manager)
def _fork_namespace_manager(self):
for prefix, ns in self.schema.ontology._ns.namespaces():
self.bind(prefix, ns)
def add_types(self, type_):
s = Subject(URI(self.origin_doc.doc_id))
p = URI('rdf:type')
if not isinstance(type_, list):
type_ = [type_]
for a_type in type_:
s.add_property(p, URI(a_type))
self.add_subject(s)
def validate(self):
conforms, result_graph = self.schema.validate(self)
return conforms, result_graph
| 2.265625 | 2 |
dmr/sdmr.py | taylorshin/dmr | 43 | 12768405 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import scipy.special as special
import scipy.optimize as optimize
import scipy.misc as misc
from .dmr import DMR
class SDMR(DMR):
'''
Simple Topic Model with Dirichlet Multinomial Regression
'''
def get_alpha_n_m_z(self, idx=None):
if idx is None:
return self.alpha
else:
return self.alpha[idx]
| 2.421875 | 2 |
sevdesk/client/models/get_communication_ways_type.py | HpLightcorner/SevDesk-Python-Client | 0 | 12768406 | from enum import Enum
class GetCommunicationWaysType(str, Enum):
PHONE = "PHONE"
EMAIL = "EMAIL"
WEB = "WEB"
MOBILE = "MOBILE"
# The SevDesk API might use "0" for null-enums
NULL = "0"
def __str__(self) -> str:
return str(self.value)
| 3.171875 | 3 |
src/apps/users/models/client_address.py | leonardon473/my-dinner-backend | 0 | 12768407 | # -----------------------------------------------------------------------------
# Libraries
# -----------------------------------------------------------------------------
# Core libs
from typing import TYPE_CHECKING
# Third party libs
from django.db import models
# Project libs
# If type checking, __all__
if TYPE_CHECKING:
pass
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class ClientAddress(models.Model):
client_address_id = models.AutoField(primary_key=True)
client = models.ForeignKey("users.Client", on_delete=models.CASCADE)
street = models.CharField(max_length=50)
num_ext = models.CharField(max_length=50)
num_int = models.CharField(blank=True, max_length=50)
neighborhood = models.CharField(max_length=50)
zip_code = models.CharField(max_length=5)
class Meta:
verbose_name = "Client Address"
verbose_name_plural = "Client Addresses"
def __str__(self):
return self.get_full_address()
def get_full_address(self):
address_fields = [
# 'address_name',
"street",
"num_ext",
"num_int",
"neighborhood",
"zip_code",
]
address = [
getattr(self, field)
for field in address_fields
if getattr(self, field) != None and getattr(self, field) != ""
]
return ", ".join(address)
| 1.9375 | 2 |
collector/basic.py | ninjadq/Windows-Agent | 60 | 12768408 | """
Collect Host's basic metric
Thanks to Feng_Qi a lot of code in this file was borrow from him.
"""
import psutil
import time
import json
import copy
import logging
from rpc.transfer import send_data_to_transfer
from utils import g
def collect():
logging.debug('enter basic collect')
push_interval = 60
zh_decode = "gbk"
time_now = int(time.time())
payload = []
data = {"endpoint": g.HOSTNAME, "metric": "", "timestamp": time_now,
"step": push_interval, "value": "", "counterType": "", "tags": ""}
cpu_status = psutil.cpu_times_percent()
mem_status = psutil.virtual_memory()
swap_status = psutil.swap_memory()
disk_io_status = psutil.disk_io_counters(perdisk=True)
net_io_status = psutil.net_io_counters(pernic=True)
# agent alive
data["metric"] = "agent.alive"
data["value"] = 1
data["counterType"] = "GAUGE"
payload.append(copy.copy(data))
logging.debug(cpu_status)
data["metric"] = "cpu.user"
data["value"] = cpu_status.user
data["counterType"] = "GAUGE"
payload.append(copy.copy(data))
data["metric"] = "cpu.system"
data["value"] = cpu_status.system
payload.append(copy.copy(data))
data["metric"] = "cpu.idle"
data["value"] = cpu_status.idle
payload.append(copy.copy(data))
data["metric"] = "mem.memused.percent"
data["value"] = mem_status.percent
payload.append(copy.copy(data))
data["metric"] = "mem.swapused.percent"
data["value"] = swap_status.percent
payload.append(copy.copy(data))
disk_status = psutil.disk_partitions()
for disk in disk_status:
if 'cdrom' in disk.opts or disk.fstype == '':
continue
disk_info = psutil.disk_usage(disk.mountpoint)
data["metric"] = "df.used.percent"
data["value"] = disk_info.percent
data["tags"] = "disk=" + disk.device.split(":")[0]
payload.append(copy.copy(data))
data["metric"] = "df.byte.total"
data["value"] = disk_info.total
payload.append(copy.copy(data))
data["metric"] = "df.byte.used"
data["value"] = disk_info.used
payload.append(copy.copy(data))
data["metric"] = "df.byte.free"
data["value"] = disk_info.free
payload.append(copy.copy(data))
for key in disk_io_status:
data["metric"] = "disk.io.read_count"
data["value"] = disk_io_status[key].read_count
data["tags"] = "device=" + key
data["counterType"] = "COUNTER"
payload.append(copy.copy(data))
data["metric"] = "disk.io.write_count"
data["value"] = disk_io_status[key].write_count
payload.append(copy.copy(data))
data["metric"] = "disk.io.read_bytes"
data["value"] = disk_io_status[key].read_bytes
payload.append(copy.copy(data))
data["metric"] = "disk.io.write_bytes"
data["value"] = disk_io_status[key].write_bytes
payload.append(copy.copy(data))
data["metric"] = "disk.io.read_time"
data["value"] = disk_io_status[key].read_time
payload.append(copy.copy(data))
data["metric"] = "disk.io.write_time"
data["value"] = disk_io_status[key].write_time
payload.append(copy.copy(data))
for key in net_io_status:
if is_interface_ignore(key):
continue
data["metric"] = "net.if.in.mbits"
data["value"] = net_io_status[key].bytes_recv * 8 / 100000
data["tags"] = "interface=" + key.decode(zh_decode)
payload.append(copy.copy(data))
data["metric"] = "net.if.out.mbits"
data["value"] = net_io_status[key].bytes_sent * 8 / 100000
payload.append(copy.copy(data))
data["metric"] = "net.if.in.packets"
data["value"] = net_io_status[key].packets_recv
payload.append(copy.copy(data))
data["metric"] = "net.if.out.packets"
data["value"] = net_io_status[key].packets_sent
payload.append(copy.copy(data))
data["metric"] = "net.if.in.error"
data["value"] = net_io_status[key].errin
payload.append(copy.copy(data))
data["metric"] = "net.if.out.error"
data["value"] = net_io_status[key].errout
payload.append(copy.copy(data))
data["metric"] = "net.if.in.drop"
data["value"] = net_io_status[key].dropin
payload.append(copy.copy(data))
data["metric"] = "net.if.out.drop"
data["value"] = net_io_status[key].dropout
payload.append(copy.copy(data))
logging.debug(payload)
payload = filter(lambda x: x['metric'] not in g.IGNORE, payload)
try:
result = send_data_to_transfer(payload)
except Exception as e:
logging.error(e)
else:
logging.info(result)
def is_interface_ignore(key):
"""
return if the inferface will ignore
"""
for ignore_key in g.COLLECTOR['ifacePrefixIgnore']:
if ignore_key in key.decode('gbk'):
return True
def basic_collect(period):
"""
deadloop to collect data periodically
:params: `period` is the seconds of collecting circle
"""
logging.debug('prepare collect basic data')
while True:
try:
collect()
except Exception as e:
logging.error(e, exc_info=True)
time.sleep(period)
| 2.484375 | 2 |
dudoku.py | Bojidarist/dudoku | 0 | 12768409 | <filename>dudoku.py<gh_stars>0
import arcade
import random
class Dudoku():
def __init__(self):
self.cell_width = 60
self.cell_height = 60
self.cells = [] # x[y[]]
self.cells_count = 30
pass
def new_game(self):
# create 9x9 field with values of 0
for x in range(9):
self.cells.append([])
for y in range(9):
self.cells[x].append(0)
cells_to_create = self.cells_count
while cells_to_create > 0:
x = random.randint(0, 8)
y = random.randint(0, 8)
if (self.cells[x][y] == 0):
new_value = random.randint(1, 9)
if (not self.do_exist_in_row(x, new_value) and not self.do_exist_in_column(y, new_value)):
self.cells[x][y] = new_value
cells_to_create = cells_to_create - 1
def do_exist_in_row(self, x, a):
for cell in self.cells[x]:
if (cell == a):
return True
return False
def do_exist_in_column(self, y, a):
for cell in self.cells:
if (cell[y] == a):
return True
return False
def draw_cells(self):
for i in range(1, 10):
for j in range(1, 10):
arcade.draw_rectangle_outline(i * self.cell_width - self.cell_width / 2 ,
j * self.cell_height - self.cell_height / 2, self.cell_width,
self.cell_height, arcade.color.BRITISH_RACING_GREEN, 1)
def draw_bigcells(self):
w = 3 * self.cell_width
h = 3 * self.cell_height
for i in range(1, 4):
for j in range(1, 4):
arcade.draw_rectangle_outline(i * w - w / 2, j * h - h / 2, w, h, arcade.color.BRITISH_RACING_GREEN, 3)
def draw_playground(self):
arcade.draw_rectangle_outline(self.cell_width * 9 - (self.cell_width * 9) / 2,
self.cell_height * 9 - (self.cell_height * 9) / 2,
self.cell_width * 9, self.cell_height * 9,
arcade.color.BRITISH_RACING_GREEN, 10)
self.draw_cells()
self.draw_bigcells() | 3.546875 | 4 |
lafs/matrix_generators.py | james-akl/pylafs | 0 | 12768410 | import lafs
import random
import math
# Returns an Identity Matrix of dimensions (n, n_col)
def I(n, n_col = None):
if type(n) == lafs.matrix.Matrix:
n_col = n.dim(1)
n = n.dim(0)
elif n_col == None:
n_col = n
ret = lafs.matrix.Matrix(n, n_col)
for i in range(min(n, n_col)):
ret[i][i] = 1
return ret
# Returns a Ones Matrix of dimensions (n, n_col)
# If input is Matrix A, returns a Ones Matrix of same size.
def U(n, n_col = None):
if type(n) == lafs.matrix.Matrix:
n_col = n.dim(1)
n = n.dim(0)
elif n_col == None:
n_col = n
ret = lafs.matrix.Matrix(n, n_col)
for i in range(n):
for j in range(n_col):
ret[i][j] = 1
return ret
# Returns an Zeros Matrix of dimensions (n, n_col)
def Z(n, n_col = None):
if type(n) == lafs.matrix.Matrix:
n_col = n.dim(1)
n = n.dim(0)
elif n_col == None:
n_col = n
ret = lafs.matrix.Matrix(n, n_col)
for i in range(n):
for j in range(n_col):
ret[i][j] = 0
return ret
# Returns lower triangular version of input matrix.
def lower(matrix):
if type(matrix) != lafs.matrix.Matrix:
raise ValueError("Input must be a Matrix")
ret = lafs.matrix.Matrix(matrix.dim(0), matrix.dim(1))
for i in range(matrix.dim(0)):
for j in range(i + 1):
ret[i][j] = matrix[i][j]
return ret
# Returns upper triangular version of input matrix.
def upper(matrix):
if type(matrix) != lafs.matrix.Matrix:
raise ValueError("Input must be a Matrix")
ret = lafs.matrix.Matrix(matrix.dim(0), matrix.dim(1))
for i in range(matrix.dim(0)):
for j in range(i, matrix.dim(1)):
ret[i][j] = matrix[i][j]
return ret
# Temporary function for matrix generation.
def randm(n_row, n_col=None):
if n_col == None:
n_col = n_row
rows = []
for i in range(n_row):
row = []
for j in range(n_col):
row.append(random.randint(-10, 10))
rows.append(row)
return lafs.matrix.Matrix(rows)
# Temporary function for vector generation.
def randv(n):
rows = []
for i in range(n):
rows.append(random.randint(-10, 10))
return lafs.vector.Vec(rows)
# Returns rotation matrix about the third axis of angle t.
def Rz(t):
# Assumes default angle unit is in degrees.
if lafs.unit_angle != 'rad':
t *= math.pi/180
return lafs.matrix.Matrix([
[math.cos(t), -math.sin(t), 0],
[math.sin(t), math.cos(t), 0],
[ 0, 0, 1]
])
# Returns rotation matrix about the second axis of angle t.
def Ry(t):
# Assumes default angle unit is in degrees.
if lafs.unit_angle != 'rad':
t *= math.pi/180
return lafs.matrix.Matrix([
[ math.cos(t), 0, math.sin(t)],
[ 0, 1, 0],
[-math.sin(t), 0, math.cos(t)]
])
# Returns rotation matrix about the first axis of angle t.
def Rx(t):
# Assumes default angle unit is in degrees.
if lafs.unit_angle != 'rad':
t *= math.pi/180
return lafs.matrix.Matrix([
[1, 0, 0],
[0, math.cos(t), -math.sin(t)],
[0, math.sin(t), math.cos(t)]
])
if __name__ == "__main__":
pass
| 3 | 3 |
result/pymus_image.py | pgarapon/pymus | 11 | 12768411 | import h5py
import tools.pymus_utils as pymusutil
import numpy as np
import matplotlib.pyplot as plt
import logging
logging.basicConfig(level=logging.DEBUG)
class ImageFormatError(Exception):
pass
class EchoImage(object):
''' Echogeneicity grayscale image
'''
def __init__(self,scan):
self.scan = scan
self.data_array = np.zeros((len(scan.z_axis),len(scan.x_axis)))
self.scan_x_bounds = [self.scan.x_axis.min(), self.scan.x_axis.max()]
self.scan_z_bounds = [self.scan.z_axis.min(), self.scan.z_axis.max()]
self.title = ""
def import_data(self,data):
try:
self.data_array = np.abs( np.reshape(data,self.data_array.shape) )
except:
raise ImageFormatError(" format error - cannot reshape %s to %s " % ( str(data.shape),str(self.data_array.shape) ))
return
def set_title(self,title):
self.title = title
def show_image(self,dbScale=True,dynamic_range=60,to_file=None):
z_m, z_M = self.scan_z_bounds
x_m, x_M = self.scan_x_bounds
z_span = z_M - z_m
x_span = x_M - x_m
x_ratio = x_span/z_span
print("X -> %s %s Z -> %s %s / %s %s / %s " % (x_m,x_M,z_m,z_M,z_span,x_span,x_ratio))
base_sz = 6.
im_M = self.data_array.max()
fig, ax = plt.subplots(figsize=(1.0 + x_ratio*base_sz,0.3 + base_sz))
xtent = [x_m,x_M,z_m,z_M]
if dbScale:
plt_im = 20.*np.log10((1./im_M)*self.data_array)
else:
plt_im = self.data_array
cax = ax.imshow(plt_im,interpolation='none',vmin=-1.*dynamic_range,vmax=0.,extent=xtent,cmap='gray')
ax.set_xlabel(" x [mm] ")
ax.set_ylabel(" z [mm] ")
ax.set_title(self.title)
range_ticks = [-1.*k for k in np.arange(int(dynamic_range + 1))[::-10]]
fig.colorbar(cax, ticks = range_ticks)
if to_file is not None:
plt.savefig(to_file)
plt.show()
def write_file(self,filename,prefix=None,overwrite=False):
data_to_write = {'title' : self.title, 'data' : self.data_array}
pymusutil.generic_hdf5_write(filename,prefix,overwrite,data_to_write)
def read_file(self,filename,prefix):
data_from_file = {'title' : None, 'data' : None}
res = pymusutil.generic_hdf5_read(filename,prefix,data_from_file)
logging.debug(data_from_file)
if data_from_file['title'] is None:
logging.error("title not found in %s:%s " % (filename,prefix))
else:
self.title = data_from_file['title'][0]
if data_from_file['data'] is None:
logging.error("image data not found in %s:%s " % (filename,prefix))
else:
self.data_array = data_from_file['data'][:]
| 2.4375 | 2 |
dnn_inference/BBoxTest.py | StatML-dAI/dnn-inference | 0 | 12768412 | <gh_stars>0
"""
Significance testing based on black-box models
"""
# Author: <NAME> <<EMAIL>>
from tensorflow.keras.models import load_model
import numpy as np
from scipy.stats import norm, mode
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping
import warnings
import tensorflow.keras.backend as K
from tensorflow.keras.initializers import glorot_uniform
import tensorflow as tf
from sklearn.model_selection import KFold
from scipy.stats import hmean, gmean
import scipy.optimize
import matplotlib.pyplot as plt
import os
from scipy.optimize import brentq
from copy import deepcopy
import time
def comb_p_value(P_value, cp):
"""
combining p-values
"""
P_value = np.array(P_value)
P_value
cv_num = len(P_value)
# print(P_value)
if cv_num > 1:
P_value = np.array(P_value)
if cp == 'gmean':
P_value_cp = np.e*gmean(P_value, 0)
elif cp == 'median':
P_value_cp = 2*np.median(P_value, 0)
elif cp == 'Q1':
P_value_cp = cv_num/2.*np.partition(P_value, 1)[1]
elif cp == 'min':
P_value_cp = cv_num*np.min(P_value, 0)
elif cp == 'hmean':
P_value_cp = np.e * np.log(cv_num) * hmean(P_value, 0)
elif cp == 'hommel':
const = np.sum(1. / (np.arange(cv_num) + 1.))
order_const = const*(cv_num/(np.arange(cv_num) + 1.))
P_value_cp = np.sort(P_value)*order_const
P_value_cp = np.min(P_value_cp)
elif cp == 'cauchy':
t0 = np.mean(np.tan((.5 - P_value)*np.pi))
P_value_cp = .5 - np.arctan(t0)/np.pi
else:
warnings.warn("cp should be {geometric, min, median, Q1, hmean, hommel, cauchy}")
else:
P_value_cp = np.mean(P_value, 0)
P_value_cp = np.minimum(P_value_cp, 1.)
return P_value_cp
def size_fun(x, N, min_N=2000):
return x + min_N * np.log(x) / 2 / np.log(min_N/2) - N
class split_test(object):
"""
Class for one-split/two-split test based on deep neural networks.
Parameters
----------
inf_feats: list-like | shape = (num of tests, dim of features)
List of covariates/Features under hypothesis testings, one element corresponding to a hypothesis testing.
model: {keras-defined neural network}
A neural network for original full dataset
model_mask: {keras-defined neural network}
A neural network for masked dataset by masking/changing the features under hypothesis testing
change: {'mask', 'perm'}, default='mask'
The way to change the testing features, ``'mask'`` replaces testing features as zeros, while ``'perm'`` permutes features via instances.
alpha: float (0,1), default=0.05
The nominal level of the hypothesis testing
verbose: {0, 1}, default=0
If print the testing results, 1 indicates YES, 0 indicates NO.
eva_metric: {'mse', 'zero-one', 'cross-entropy', or custom metric function}
The evaluation metric, ``'mse'`` is the l2-loss for regression, ``'zero-one'`` is the zero-one loss for classification, ``'cross-entropy'`` is log-loss for classification. It can also be custom metric function as ``eva_metric(y_true, y_pred)``.
cp_path: string, default='./splitTest_checkpoints'
The checkpoints path to save the models
Methods
-------
adaRatio
dual_feat
testing
visual
"""
def __init__(self, inf_feats, model, model_mask, change='mask', alpha=.05, verbose=0, eva_metric='mse', cp_path='./splitTest_checkpoints'):
self.inf_feats = inf_feats
self.model = model
self.model_mask = model_mask
self.alpha = alpha
self.change = change
self.eva_metric = eva_metric
self.p_values = []
self.p_values_comb = []
self.cp_path = cp_path
def metric(self, y_true, y_pred):
"""
Return the loss for `self.eva_metric`
Parameters
----------
y_true: the ground truth
y_pred: the predicted label
"""
if self.eva_metric == 'mse':
metric_tmp = ((y_true - y_pred)**2).flatten()
elif self.eva_metric == 'mae':
metric_tmp = abs(y_true - y_pred).flatten()
elif self.eva_metric == 'zero-one':
label_pred = np.argmax(y_pred, 1)
label_true = np.argmax(y_true, 1)
metric_tmp = 1. - 1.*(label_true == label_pred)
elif self.eva_metric == 'cross-entropy':
label_true = np.argmax(y_true, 1)
metric_tmp = -np.log(y_pred[range(len(y_pred)),label_true])
else:
metric_tmp = self.eva_metric(y_true, y_pred)
return metric_tmp
def save_init(self):
"""
Save the initialization for full and mask network models under class Dnn
"""
self.model.built = True
self.model_mask.built = True
# self.model.save_weights(self.cp_path+'/model_init.h5')
# self.model_mask.save_weights(self.cp_path+'/model_mask_init.h5')
self.model.save(self.cp_path+'/model_init')
self.model_mask.save(self.cp_path+'/model_mask_init')
def reset_model(self):
"""
Reset the full and mask network models under class Dnn
"""
self.model.built = True
self.model_mask.built = True
# self.model.load_weights(self.cp_path+'/model_init.h5')
# self.model_mask.load_weights(self.cp_path+'/model_mask_init.h5')
self.model = load_model(self.cp_path+'/model_init')
self.model_mask = load_model(self.cp_path+'/model_mask_init')
def reload_model(self, path_full, path_mask):
"""
reload the pre-saved model.
"""
# path_tmp = self.cp_path+'/model'+'_inf'+str(k)+'_cv'+str(h)+'.h5'
# mask_path_tmp = self.cp_path+'/model_mask'+'_inf'+str(k)+'_cv'+str(h)+'.h5'
self.model.save_weights(path_full)
self.model_mask.save_weights(path_mask)
self.model.load_weights(path_full)
self.model_mask.load_weights(path_mask)
## can be extent to @abstractmethod
def dual_feat(self, X, cat_feats=[], k=0):
"""
Return instances with masked/perm k-th hypothesized features (dual feats).
Parameters
----------
X: array-like
Target instances.
cat_feats: list-like, default = []
The col-index for categorical features
k: integer, default = 0
k-th hypothesized features in inf_feats
"""
if self.change == 'mask':
Z = self.mask_cov(X, cat_feats=cat_feats, k=k)
elif self.change == 'perm':
Z = self.perm_cov(X, k=k)
return Z
def mask_cov(self, X, cat_feats=[], k=0):
"""
Return instances with masked k-th hypothesized features.
Parameters
----------
X: array-like
Target instances.
cat_feats: list-like, default = []
The col-index for categorical features; now it's only work for tabular data
k: integer, default = 0
k-th hypothesized features in inf_feats
"""
Z = X.copy()
n_sample = len(Z)
if len(self.inf_feats[k].shape) > 1:
## for channels_last image data: shape should be (#samples, img_rows, img_cols, channel)
if len(cat_feats):
warnings.warn("cat_feats is ignored. cat_feats only works for tabular data, whereas a image dataset is given.")
Z[:, self.inf_feats[k][0][:,None], self.inf_feats[k][1], :] = 0.
else:
## this for tabular data
cat_inf_feats = set(self.inf_feats[k]).intersection(set(cat_feats))
cont_inf_feats = set(self.inf_feats[k]) - set(cat_inf_feats)
cat_inf_feats, cont_inf_feats = list(cat_inf_feats), list(cont_inf_feats)
Z[:,cont_inf_feats] = np.array([np.mean(Z[:,cont_inf_feats], axis=0)]*n_sample)
if len(cat_inf_feats) > 0:
Z[:,cat_inf_feats] = np.array([mode(Z[:,cat_inf_feats], axis=0)[0][0]]*n_sample)
return Z
def perm_cov(self, X, k=0):
"""
Return instances with permuted k-th hypothesized features.
Parameters
----------
X: array-like
Target instances.
k: integer, default = 0
k-th hypothesized features in inf_feats
"""
Z = X.copy()
if len(self.inf_feats[k].shape) > 1:
## for channels_last image data: shape should be (#samples, img_rows, img_cols, channel)
Z[:,self.inf_feats[k][0][:,None], self.inf_feats[k][1], :]= np.random.permutation(Z[:,self.inf_feats[k][0][:,None], self.inf_feats[k][1], :])
else:
Z[:,self.inf_feats[k]]= np.random.permutation(Z[:,self.inf_feats[k]])
return Z
def noise_cov(self, X, k=0):
"""
Return instances with niosed k-th hypothesized features.
Parameters
----------
X: array-like
Target instances.
k: integer, default = 0
k-th hypothesized features in inf_feats
"""
Z = X.copy()
Z[:,self.inf_feats[k]] = np.random.randn(len(X), len(self.inf_feats[k]))
return Z
def adaRatio(self, X, y, k=0, cat_feats=[], fit_params={}, split_params={}):
"""
Return a data-adaptive splitting ratio and perturbation level.
Parameters
----------
X: array-like | shape=(n_samples, dim1, dim2, ...)
Features.
y: array-like | shape=(n_samples, dim)
Outcomes.
k: integer, default = 0
k-th hypothesized features in inf_feats
fit_params: dict | shape = dict of fitting parameters
See keras ``fit``: (https://keras.rstudio.com/reference/fit.html), including ``batch_size``, ``epoch``, ``callbacks``, ``validation_split``, ``validation_data``.
split_params: {dict of splitting parameters}
split: {'one-split', 'two-split'}, default='one-split'
one-split or two-split tests.
perturb: float, default=None
Perturb level for the one-split test, if ``perturb = None``, then the perturb level is determined by adaptive tunning.
num_perm: int, default=100
Number of permutation for determine the splitting ratio.
ratio_grid: list of float (0,1), default=[.2, .4, .6, .8]**
A list of estimation/inference ratios under searching.
if_reverse: {0,1}, default=0
``if_reverse = 0`` indicates the loop of ``ratio_grid`` starts from smallest one to largest one; ``if_reverse = 1`` indicates the loop of ``ratio_grid`` starts from largest one to smallest one.
perturb_scale: integer, default=5
The scale of perturb, and the perturbation grid is generated based on 2**range(-perturb_scale, perturb_scale)*var(losses by full model)
min_inf: int, default=0
The minimal size for inference sample.
min_est: int, default=0
The minimal size for estimation sample.
ratio_method: {'fuse', 'close'}, default='fuse'
The adaptive splitting method to determine the optimal estimation/inference ratios.
cv_num: int, default=1
The number of cross-validation to shuffle the estimation/inference samples in adaptive ratio splitting. We recommend to set **cv_num** in **adaRatio** as same as **cv_num** in **testing**.
cp: {'gmean', 'min', 'hmean', 'Q1', 'hommel', 'cauchy'}, default ='hommel'
A method to combine p-values obtained from cross-validation. see (https://arxiv.org/pdf/1212.4966.pdf) for more detail.
verbose: {0,1}, default=1
if print the results the adaptive tuning procedure.
cp: {'gmean', 'min', 'hmean', 'Q1', 'hommel', 'cauchy'} | default = 'hommel'
A method to combine p-values obtained from cross-validation. see (https://arxiv.org/pdf/1212.4966.pdf) for more detail.
verbose: {0,1} | default=1
If print the adaptive splitting process.
Returns
-------
n_opt: integer
A reasonable estimation sample size.
m_opt: integer
A reasonable inference sample size.
perturb_opt: float
A reasonable perturbation level.
"""
split_params_default = {'split': 'one-split',
'perturb': None,
'num_perm': 100,
'ratio_grid': [.2, .4, .6, .8],
'if_reverse': 0,
'perturb_scale': 5,
'min_inf': 0,
'min_est': 0,
'ratio_method': 'fuse',
'cv_num': 1,
'cp': 'hommel',
'verbose': 1}
split_params_default.update(split_params)
split_params = split_params_default
perturb=split_params['perturb']
split=split_params['split']
perturb_scale=split_params['perturb_scale']
ratio_grid=split_params['ratio_grid']
if_reverse=split_params['if_reverse']
min_inf=split_params['min_inf']
min_est=split_params['min_est']
ratio_method=split_params['ratio_method']
num_perm=split_params['num_perm']
cv_num= split_params['cv_num']
cp = split_params['cp']
verbose= split_params['verbose']
ratio_grid.sort()
if if_reverse == 1:
ratio_grid = list(reversed(ratio_grid))
candidate, Err1_lst, ratio_lst, P_value_lst = [], [], [], []
found = 0
if split == 'two-split':
for ratio_tmp in ratio_grid:
ratio_tmp = ratio_tmp/2
m_tmp = int(len(X)*ratio_tmp)
n_tmp = len(X) - 2*m_tmp
if (m_tmp < min_inf) or (n_tmp < min_est):
continue
# split data
P_value = []
for h in range(cv_num):
self.reset_model()
P_value_cv = []
## generate permutated samples
X_perm = X.copy()
X_perm = self.perm_cov(X_perm, k)
## split sample
X_train, X_test, y_train, y_test = train_test_split(X_perm, y, train_size=n_tmp, random_state=1)
# training for full model
history = self.model.fit(x=X_train, y=y_train, **fit_params)
# training for mask model
Z_train = self.dual_feat(X_train, cat_feats, k)
history_mask = self.model_mask.fit(x=Z_train, y=y_train, **fit_params)
Z_test = self.dual_feat(X_test, cat_feats, k)
# evaluation for mask model
pred_y_mask = self.model_mask.predict(Z_test)
for j in range(num_perm):
X_test_perm = X_test.copy()
X_test_perm = self.perm_cov(X_test_perm, k)
pred_y = self.model.predict(X_test_perm)
ind_inf, ind_inf_mask = train_test_split(range(len(pred_y)), train_size=m_tmp, random_state=42)
metric_tmp = self.metric(y_test[ind_inf], pred_y[ind_inf])
metric_mask_tmp = self.metric(y_test[ind_inf_mask], pred_y_mask[ind_inf_mask])
p_value_tmp = self.diff_p_value(metric_tmp, metric_mask_tmp)
P_value_cv.append(p_value_tmp)
P_value.append(P_value_cv)
P_value = np.array(P_value)
P_value_cp = np.array([comb_p_value(P_value[:,i], cp=cp) for i in range(num_perm)])
## compute the type 1 error
Err1 = len(P_value_cp[P_value_cp < self.alpha]) / len(P_value_cp)
Err1_lst.append(Err1)
# P_value_lst.append(P_value)
ratio_lst.append(ratio_tmp)
if verbose==1:
print('(AdaRatio) Est. Type 1 error: %.3f; p_value_mean: %.3f, inf sample ratio: %.3f'
%(Err1, P_value_cp.mean(), ratio_tmp))
# print('(AdaRatio) p_value: %.3f, inference sample ratio: %.3f' %(P_value.mean(), ratio_tmp))
if Err1 < self.alpha:
found = 1
if ratio_method == 'fuse':
m_opt = m_tmp
n_opt = len(X) - 2*m_opt
break
if found==0:
warnings.warn("No ratio can control the Type 1 error, pls increase the sample size, and the inference sample ratio is set as the min of ratio_grid.")
Err1_lst, ratio_lst = np.array(Err1_lst), np.array(ratio_lst)
m_opt = int(ratio_lst[np.argmin(Err1_lst)] * len(X))
n_opt = len(X) - 2*m_opt
return n_opt, m_opt
if split == 'one-split':
if perturb != None:
perturb_grid = [perturb]
for perturb_idx_tmp in range(-perturb_scale, perturb_scale):
perturb_level_tmp = 2**(perturb_idx_tmp)
## stop if current perturb is enough to control the type 1 error
if found == 1:
break
Err1_lst, ratio_lst, perturb_lst, P_value_lst = [], [], [], []
for ratio_tmp in ratio_grid:
m_tmp = int(len(X)*ratio_tmp)
n_tmp = len(X) - m_tmp
if (m_tmp < min_inf) or (n_tmp < min_est):
continue
# split data
P_value = []
for h in range(cv_num):
self.reset_model()
P_value_cv = []
## generate permutated samples
X_perm = X.copy()
X_perm = self.perm_cov(X_perm, k)
# split samples
X_train, X_test, y_train, y_test = train_test_split(X_perm, y, train_size=n_tmp, random_state=h)
# training for full model
history = self.model.fit(x=X_train, y=y_train, **fit_params)
# training for mask model
Z_train = self.dual_feat(X_train, cat_feats, k)
history_mask = self.model_mask.fit(x=Z_train, y=y_train, **fit_params)
## evaluation for the mask model
Z_test = self.dual_feat(X_test, cat_feats, k)
pred_y_mask = self.model_mask.predict(Z_test)
metric_mask_tmp = self.metric(y_test, pred_y_mask)
# evaluation
for j in range(num_perm):
X_test_perm = X_test.copy()
X_test_perm = self.perm_cov(X_test_perm, k)
## compute the metric based on full model
pred_y = self.model.predict(X_test_perm)
metric_tmp = self.metric(y_test, pred_y)
## compute the p-value based on the diff metrics
p_value_tmp = self.diff_p_value(metric_tmp, metric_mask_tmp,
perturb_level=perturb_level_tmp)
P_value_cv.append(p_value_tmp)
P_value.append(P_value_cv)
P_value = np.array(P_value)
P_value_cp = np.array([comb_p_value(P_value[:,i], cp=cp) for i in range(num_perm)])
Err1 = len(P_value_cp[P_value_cp<=self.alpha])/len(P_value_cp)
Err1_lst.append(Err1)
if verbose==1:
print('(AdaRatio) Est. Type 1 err: %.3f; p_value_mean: %.3f, inf sample ratio: %.3f, perturb_level: %s'
%(Err1, P_value_cp.mean(), ratio_tmp, perturb_level_tmp))
P_value_lst.append(P_value_cp)
ratio_lst.append(ratio_tmp)
perturb_lst.append(perturb_idx_tmp)
if Err1 < self.alpha:
found = 1
m_opt = m_tmp
n_opt = len(X) - m_opt
perturb_idx_opt = perturb_idx_tmp
break
if found==0:
warnings.warn("No ratio and perturb_level can control the Type 1 error," \
"pls increase the perturb_level and sample size, and inference sample ratio is set as the one minimize the permutation Type 1 Error.")
Err1_lst, ratio_lst = np.array(Err1_lst), np.array(ratio_lst)
m_opt = int(ratio_lst[np.argmin(Err1_lst)] * len(X))
n_opt = len(X) - m_opt
perturb_idx_opt = perturb_lst[np.argmin(Err1_lst)]
return n_opt, m_opt, perturb_idx_opt
def diff_p_value(self, metric_full, metric_mask, perturb_level=0.):
"""
Return p-value for diff btw `metric_full` and `metric_mask` for the one-/two-split test.
Parameters
----------
metric_full: {array-like} of shape (n_samples)
metric for samples based on full model
metric_mask: {array-like} of shape (n_samples)
metric for samples based on mask model
Returns
-------
P_value: array of float [0, 1]
The p_values for target one-/two-split test.
"""
perturb_base = metric_full.std()
diff_tmp = metric_full - metric_mask + perturb_level * perturb_base * np.random.randn(len(metric_full))
Lambda_tmp = np.sqrt(len(diff_tmp)) * ( diff_tmp.std() )**(-1)*( diff_tmp.mean() )
p_value_tmp = norm.cdf(Lambda_tmp)
return p_value_tmp
def testing(self, X, y, fit_params, split_params={}, cat_feats=[], cv_num=5, cp='hommel', inf_ratio=None):
"""
Return p-values for hypothesis testing for inf_feats in class split_test.
Parameters
----------
X: {array-like} of shape (n_samples, dim_features)
Instances matrix/tensor, where n_samples in the number of samples and dim_features is the dimension of the features.
If X is vectorized feature, ``shape`` should be ``(#Samples, dim of feaures)``
If X is image/matrix data, ``shape`` should be ``(#samples, img_rows, img_cols, channel)``, that is, *X must channel_last image data*.
y: {array-like} of shape (n_samples,)
Output vector/matrix relative to X.
cat_feats: list, default = []
The col-index for categorical features; **now it's only work for tabular data**
fit_params: {dict of fitting parameters}
See keras ``fit``: (https://keras.rstudio.com/reference/fit.html), including ``batch_size``, ``epoch``, ``callbacks``, ``validation_split``, ``validation_data``, and so on.
split_params: {dict of splitting parameters}
split: {'one-split', 'two-split'}, default='one-split'
one-split or two-split test statistic.
perturb: float, default=None
Perturb level for the one-split test, if ``perturb = None``, then the perturb level is determined by adaptive tunning.
num_perm: int, default=100
Number of permutation for determine the splitting ratio.
ratio_grid: list of float (0,1), default=[.2, .4, .6, .8]
A list of estimation/inference ratios under searching.
if_reverse: {0,1}, default=0
``if_reverse = 0`` indicates the loop of ``ratio_grid`` starts from smallest one to largest one; ``if_reverse = 1`` indicates the loop of ``ratio_grid`` starts from largest one to smallest one.
perturb_scale: integer, default=5
The scale of perturb, and the perturbation grid is generated based on 2**range(-perturb_scale, perturb_scale)*var(losses by full model)
min_inf: int, default=0
The minimal size for inference sample.
min_est: int, default=0
The minimal size for estimation sample.
ratio_method: {'fuse', 'close'}, default='fuse'
The adaptive splitting method to determine the optimal estimation/inference ratios.
cv_num: int, default=*cv_num*
The number of cross-validation to shuffle the estimation/inference samples in adaptive ratio splitting.
cp: {'gmean', 'min', 'hmean', 'Q1', 'hommel', 'cauchy'}, default = *cp*
A method to combine p-values obtained from cross-validation. see (https://arxiv.org/pdf/1212.4966.pdf) for more detail.
verbose: {0,1}, default=1
if print the results the adaptive tuning procedure.
cv_num: int, default=5
The number of cross-validation to shuffle the estimation/inference samples in testing.
cp: {'gmean', 'min', 'hmean', 'Q1', 'hommel', 'cauchy'}, default ='hommel'
A method to combine p-values obtained from cross-validation.
inf_ratio: float, default=None**
A pre-specific inference sample ratio, if ``est_size=None``, then it is determined by adaptive splitting method ``metric``.
Returns
-------
P_value: array of float [0, 1]
The p_values for target hypothesis testings.
"""
split_params_default = {'split': 'one-split',
'perturb': None,
'num_perm': 100,
'perturb_scale': 5,
'ratio_grid': [.2, .4, .6, .8],
'if_reverse': 0,
'min_inf': 0,
'min_est': 0,
'ratio_method': 'fuse',
'cv_num': cv_num,
'cp': cp,
'verbose': 1}
split_params_default.update(split_params)
split_params = split_params_default
## create checkpoints path
if not os.path.exists(self.cp_path):
os.mkdir(self.cp_path)
## save initial weights
self.save_init()
init_lr_full = deepcopy(self.model.optimizer.lr.numpy())
init_lr_mask = deepcopy(self.model.optimizer.lr.numpy())
P_value = []
for k in range(len(self.inf_feats)):
## initialize the models and learning rates
self.reset_model()
self.model.optimizer.lr.assign(init_lr_full)
self.model_mask.optimizer.lr.assign(init_lr_mask)
## (one-split) determine the splitting ratio for est and inf samples
if split_params['split'] == 'one-split':
if ((inf_ratio == None) or (split_params['perturb'] == None)):
if split_params['ratio_method'] == 'fuse':
n, m, perturb_idx = self.adaRatio(X, y, k, cat_feats=cat_feats,
fit_params=fit_params, split_params=split_params)
perturb_level = 2**perturb_idx
print('%d-th inference; Adaptive data splitting: n: %d; m: %d; perturb: %s'
%(k, n, m, perturb_level))
elif split_params['ratio_method'] == 'log-ratio':
root, info = brentq(size_fun, 3., len(X), args=(len(X), 2000.), full_output=True)
inf_ratio = 1 - root / len(X)
if split_params['perturb'] == None:
split_params['ratio_grid'] = [inf_ratio]
n, m, perturb_idx = self.adaRatio(X, y, k, cat_feats=cat_feats,
fit_params=fit_params, split_params=split_params)
perturb_level = 2**perturb_idx
else:
perturb_idx = split_params['perturb']
perturb_level = 2**perturb_idx
m, n = int(inf_ratio * len(X)), len(X) - int(inf_ratio * len(X))
print('%d-th inference; log ratio data splitting: n: %d; m: %d; perturb: %s'
%(k, n, m, perturb_level))
else:
raise Exception("inf ratio method must be 'fuse' or 'log-ratio' if inf_ratio is not given!")
else:
m, n = int(inf_ratio * len(X)), len(X) - int(inf_ratio * len(X))
perturb_idx = split_params['perturb']
perturb_level = 2**perturb_idx
print('%d-th inference; fix data splitting: n: %d; m: %d' %(k, n, m))
## (two-split) determine the splitting ratio for est and inf samples
elif split_params['split'] == 'two-split':
perturb_level = 0.
if inf_ratio == None:
if split_params['ratio_method'] == 'fuse':
n, m = self.adaRatio(X, y, k, cat_feats=cat_feats,
fit_params=fit_params, split_params=split_params)
print('%d-th inference; Adaptive data splitting: n: %d; m: %d' %(k, n, m))
elif split_params['ratio_method'] == 'log-ratio':
root, info = brentq(size_fun, 3., len(X), args=(len(X), 2000.), full_output=True)
inf_ratio = 1 - root / len(X)
m, n = int(inf_ratio * len(X)/2)*2, len(X) - int(inf_ratio * len(X)/2)*2
print('%d-th inference; log-ratio data splitting: n: %d; m: %d' %(k, n, m))
else:
raise Exception("inf ratio method must be 'fuse' or 'log-ratio' if inf_ratio is not given!")
else:
m, n = int(inf_ratio * len(X)/2)*2, len(X) - int(inf_ratio * len(X)/2)*2
else:
raise Exception("split method must be 'one-split' or 'two-split'!")
## testing
P_value_cv = []
for h in range(cv_num):
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=n, random_state=h)
if split_params['split'] == 'two-split':
X_inf, X_inf_mask, y_inf, y_inf_mask = train_test_split(X_test, y_test, train_size=.5, random_state=42)
if split_params['split'] == 'one-split':
X_inf, X_inf_mask, y_inf, y_inf_mask = X_test.copy(), X_test.copy(), y_test.copy(), y_test.copy()
self.reset_model()
self.model.optimizer.lr.assign(init_lr_full)
self.model_mask.optimizer.lr.assign(init_lr_mask)
## fit, predict, and inference in full model
history = self.model.fit(X_train, y_train, **fit_params)
pred_y = self.model.predict(X_inf)
metric_full = self.metric(y_inf, pred_y)
# fit, predict, and inference in mask model
Z_train = self.dual_feat(X_train, cat_feats, k)
history_mask = self.model_mask.fit(Z_train, y_train, **fit_params)
Z_inf = self.dual_feat(X_inf_mask, cat_feats, k)
pred_y_mask = self.model_mask.predict(Z_inf)
metric_mask = self.metric(y_inf_mask, pred_y_mask)
## compute p-value
p_value_tmp = self.diff_p_value(metric_full, metric_mask, perturb_level)
print('cv: %d; p_value: %.5f; metric: %.5f(%.5f); metric_mask: %.5f(%.5f)'
%(h, p_value_tmp,
metric_full.mean(), metric_full.std(),
metric_mask.mean(), metric_mask.std()))
P_value_cv.append(p_value_tmp)
self.p_values_comb.append(P_value_cv)
P_value_cv = np.array(P_value_cv)
p_value_mean = comb_p_value(P_value_cv, cp=cp)
print('#'*50)
if p_value_mean < self.alpha:
print('%d-th inf: reject H0 with p_value: %.3f' %(k, p_value_mean))
else:
print('%d-th inf: accept H0 with p_value: %.3f' %(k, p_value_mean))
print('#'*50)
P_value.append(p_value_mean)
# return P_value, fit_err, P_value_cv
self.p_values = P_value
return P_value
def visual(self, X, y, plt_params={'cmap': 'RdBu', 'alpha':0.6}, plt_mask_params={'cmap': 'RdBu', 'alpha':0.6}):
"""
Visualization for the inference results based on one illustrative example
Parameters
----------
X: array-like
demo instances.
y: array-like
demo labels
plt_params: dict
dictory of parameters for the imshow for original image see: (https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.imshow.html)
plt_mask_params: dict
dictory of parameters for the imshow for mask see: (https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.imshow.html)
"""
if len(X.shape) == 2:
print('sorry, visual function only work for image data.')
else:
num_class = y.shape[1]
demo_ind = np.array([np.where(y[:,k]==1)[0][0] for k in range(num_class)])
X_demo = X[demo_ind]
cols, rows = len(self.inf_feats), num_class
fig = plt.figure(constrained_layout=False)
spec = fig.add_gridspec(ncols=cols, nrows=rows)
for row in range(rows):
for col in range(cols):
X_mask_tmp = np.nan*np.ones(X_demo.shape)
X_mask_tmp = self.mask_cov(X_mask_tmp, k=col)[0]
ax = fig.add_subplot(spec[row, col])
im1 = ax.imshow(X_demo[row], vmin=0, vmax=1, **plt_params)
ax.axis('off')
im2 = ax.imshow(X_mask_tmp, vmin=0, vmax=1, **plt_mask_params)
ax.axis('off')
if row == 0:
ax.set_title('p_values: %.3f' %self.p_values[col])
plt.subplots_adjust(top = 0.95, bottom=0.05, hspace=0.03, wspace=0.03, right=0.95)
plt.show()
class perm_test(object):
"""Class for holdout permutation test (HPT) based on deep neural networks.
Parameters
----------
inf_feats : list-like of shape (num of tests, dim of features)
List of covariates/Features under hypothesis testings, one element corresponding to a hypothesis testing.
model : {keras-defined neural network}
A neural network for original full dataset
alpha: float (0,1), default=0.05
The nominal level of the hypothesis testing
verbose: {0, 1}, default=0
If print the testing results, 1 indicates YES, 0 indicates NO.
eva_metric: {'mse', 'zero-one', 'cross-entropy', or custom metric function}
The evaluation metric, ``'mse'`` is the l2-loss for regression, ``'zero-one'`` is the zero-one loss for classification, ``'cross-entropy'`` is log-loss for classification. It can also be custom metric function as ``eva_metric(y_true, y_pred)``.
Methods
-------
testing
"""
def __init__(self, inf_feats, model, model_perm, alpha=.05, num_folds=5, num_perm=100, verbose=0, eva_metric='mse'):
self.inf_feats = inf_feats
self.model = model
self.model_perm = model_perm
self.alpha = alpha
self.num_perm = num_perm
self.num_folds = num_folds
self.eva_metric = eva_metric
def metric(self, y_true, y_pred):
if self.eva_metric == 'mse':
metric_tmp = ((y_true - y_pred)**2).flatten()
elif self.eva_metric == 'mae':
metric_tmp = abs(y_true - y_pred).flatten()
elif self.eva_metric == 'zero-one':
label_pred = np.argmax(y_pred, 1)
label_true = np.argmax(y_true, 1)
metric_tmp = 1. - 1.*(label_true == label_pred)
elif self.eva_metric == 'cross-entropy':
label_true = np.argmax(y_true, 1)
metric_tmp = np.log(y_pred[range(len(y_pred)),label_true])
else:
metric_tmp = self.eva_metric(y_true, y_pred)
return metric_tmp
def reset_model(self):
if int(tf.__version__[0]) == 2:
# for layer in self.model.layers:
# if isinstance(layer, tf.keras.Model):
# reset_weights(layer)
# continue
# for k, initializer in layer.__dict__.items():
# if "initializer" not in k:
# continue
# # find the corresponding variable
# var = getattr(layer, k.replace("_initializer", ""))
# var.assign(initializer(var.shape, var.dtype))
for layer in self.model.layers:
if isinstance(layer, tf.keras.Model): #if you're using a model as a layer
reset_weights(layer) #apply function recursively
continue
#where are the initializers?
if hasattr(layer, 'cell'):
init_container = layer.cell
else:
init_container = layer
for key, initializer in init_container.__dict__.items():
if "initializer" not in key: #is this item an initializer?
continue #if no, skip it
# find the corresponding variable, like the kernel or the bias
if key == 'recurrent_initializer': #special case check
var = getattr(init_container, 'recurrent_kernel')
else:
var = getattr(init_container, key.replace("_initializer", ""))
if var is None:
continue
else:
var.assign(initializer(var.shape, var.dtype))
if int(tf.__version__[0]) == 1:
session = K.get_session()
for layer in self.model.layers:
if hasattr(layer, 'kernel_initializer'):
layer.kernel.initializer.run(session=session)
if hasattr(layer, 'bias_initializer'):
layer.bias.initializer.run(session=session)
for layer in self.model_perm.layers:
if hasattr(layer, 'kernel_initializer'):
layer.kernel.initializer.run(session=session)
if hasattr(layer, 'bias_initializer'):
layer.bias.initializer.run(session=session)
## can be extent to @abstractmethod
def mask_cov(self, X, k=0):
Z = X.copy()
if type(self.inf_feats[k]) is list:
Z[:,self.inf_feats[k][0][:,None], self.inf_feats[k][1], 0] = 0.
else:
Z[:,self.inf_feats[k]]= 0.
return Z
def perm_cov(self, X, k=0):
Z = X.copy()
if type(self.inf_feats[k]) is list:
Z[:,self.inf_feats[k][0][:,None], self.inf_feats[k][1], 0]= np.random.permutation(Z[:,self.inf_feats[k][0][:,None], self.inf_feats[k][1], 0])
else:
Z[:,self.inf_feats[k]]= np.random.permutation(Z[:,self.inf_feats[k]])
return Z
def noise_cov(self, X, k=0):
Z = X.copy()
Z[:,self.inf_feats[k]] = np.random.randn(len(X), len(self.inf_feats[k]))
return Z
def testing(self, X, y, fit_params={}):
"""
Return p-values for hypothesis testing for inf_feats in class perm_test.
Parameters
----------
X : array-like | shape = (n_samples, dim_features)
Instances matrix/tensor, where n_samples in the number of samples and dim_features is the dimension of the features.
If X is vectorized feature, ``shape`` should be ``(#Samples, dim of feaures)``
If X is image/matrix data, ``shape`` should be ``(#samples, img_rows, img_cols, channel)``, that is, *X must channel_last image data*.
y: {array-like} of shape (n_samples,)
Output vector/matrix relative to X.
fit_params: {dict of fitting parameters}
See keras ``fit``: (https://keras.rstudio.com/reference/fit.html), including ``batch_size``, ``epoch``, ``callbacks``, ``validation_split``, ``validation_data``, and so on.
cv_num: int, default=5
The number of cross-validation to shuffle the estimation/inference samples in testing.
cp: {'gmean', 'min', 'hmean', 'Q1', 'hommel', 'cauchy'}, default ='hommel'
A method to combine p-values obtained from cross-validation.
inf_ratio: float, default=None
A pre-specific inference sample ratio, if ``est_size=None``, then it is determined by adaptive splitting method ``metric``.
Returns
-------
P_value: array of float [0, 1]
The p_values for target hypothesis testings.
"""
P_value = []
for k in range(len(self.inf_feats)):
kfold = KFold(n_splits=self.num_folds, shuffle=True)
self.reset_model()
print('%d-th permutation inference' %k)
## prediction and inference in full model
score_cv = []
for train, test in kfold.split(X, y):
self.reset_model()
history = self.model.fit(X[train], y[train], **fit_params)
pred_y = self.model.predict(X[test])
metric_full = self.metric(y[test], pred_y)
score_cv.append(metric_full.mean())
score = np.mean(score_cv)
# prediction and inference in mask model
score_perm = []
for l in range(self.num_perm):
score_perm_cv = []
Z = self.perm_cov(X, k)
for train_perm, test_perm in kfold.split(Z, y):
self.reset_model()
history_perm = self.model_perm.fit(Z[train_perm], y[train_perm], **fit_params)
pred_y_perm = self.model_perm.predict(Z[test_perm])
metric_perm = self.metric(y[test_perm], pred_y_perm)
score_perm_cv.append(metric_perm.mean())
score_perm.append(np.mean(score_perm_cv))
score_perm = np.array(score_perm)
## compute p-value
print("perf score: %.3f, perf permutation score: %.3f(%.3f)" %(score, score_perm.mean(), score_perm.std()))
p_value_tmp = (np.sum(score_perm <= score) + 1.0) / (self.num_perm + 1)
if p_value_tmp < self.alpha:
print('reject H0 with p_value: %.3f' %p_value_tmp)
else:
print('accept H0 with p_value: %.3f' %p_value_tmp)
P_value.append(p_value_tmp)
return P_value, metric_full.mean()
class Hperm_test(object):
"""Class for holdout permutation test (HPT) based on deep neural networks.
Parameters
----------
inf_feats : list-like of shape (num of tests, dim of features)
List of covariates/Features under hypothesis testings, one element corresponding to a hypothesis testing.
model : {keras-defined neural network}
A neural network for original full dataset
alpha: float (0,1), default=0.05
The nominal level of the hypothesis testing
verbose: {0, 1}, default=0
If print the testing results, 1 indicates YES, 0 indicates NO.
eva_metric: {'mse', 'zero-one', 'cross-entropy', or custom metric function}
The evaluation metric, ``'mse'`` is the l2-loss for regression, ``'zero-one'`` is the zero-one loss for classification, ``'cross-entropy'`` is log-loss for classification. It can also be custom metric function as ``eva_metric(y_true, y_pred)``.
cp_path: {string}, default='./HPT_checkpoints'
The checkpoints path to save the models
Methods
-------
testing
"""
def __init__(self, inf_feats, model, alpha=.05, num_perm=1000, verbose=0, eva_metric='mse', cp_path = './HPT_checkpoints'):
self.inf_feats = inf_feats
self.model = model
self.alpha = alpha
self.num_perm = num_perm
self.eva_metric = eva_metric
self.cp_path = cp_path
def metric(self, y_true, y_pred):
if self.eva_metric == 'mse':
metric_tmp = ((y_true - y_pred)**2).flatten()
elif self.eva_metric == 'mae':
metric_tmp = abs(y_true - y_pred).flatten()
elif self.eva_metric == 'zero-one':
label_pred = np.argmax(y_pred, 1)
label_true = np.argmax(y_true, 1)
metric_tmp = 1. - 1.*(label_true == label_pred)
elif self.eva_metric == 'cross-entropy':
label_true = np.argmax(y_true, 1)
metric_tmp = np.log(y_pred[range(len(y_pred)),label_true])
else:
metric_tmp = self.eva_metric(y_true, y_pred)
return metric_tmp
def save_init(self):
"""
Save the initialization for the network model under class HPT
"""
self.model.save_weights(self.cp_path+'/model_init.h5')
# self.model_mask.save_weights(self.cp_path+'/model_mask_init.h5')
def reset_model(self):
"""
Reset the full and mask network models under class HPT
"""
self.model.load_weights(self.cp_path+'/model_init.h5')
# self.model_mask.load_weights(self.cp_path+'/model_mask_init.h5')
# def reset_model(self):
# if int(tf.__version__[0]) == 2:
# # for layer in self.model.layers:
# # if isinstance(layer, tf.keras.Model):
# # reset_weights(layer)
# # continue
# # for k, initializer in layer.__dict__.items():
# # if "initializer" not in k:
# # continue
# # # find the corresponding variable
# # var = getattr(layer, k.replace("_initializer", ""))
# # var.assign(initializer(var.shape, var.dtype))
#
# for layer in self.model.layers:
# if isinstance(layer, tf.keras.Model): #if you're using a model as a layer
# reset_weights(layer) #apply function recursively
# continue
#
# #where are the initializers?
# if hasattr(layer, 'cell'):
# init_container = layer.cell
# else:
# init_container = layer
#
# for key, initializer in init_container.__dict__.items():
# if "initializer" not in key: #is this item an initializer?
# continue #if no, skip it
#
# # find the corresponding variable, like the kernel or the bias
# if key == 'recurrent_initializer': #special case check
# var = getattr(init_container, 'recurrent_kernel')
# else:
# var = getattr(init_container, key.replace("_initializer", ""))
#
# if var is None:
# continue
# else:
# var.assign(initializer(var.shape, var.dtype))
#
# if int(tf.__version__[0]) == 1:
# session = K.get_session()
# for layer in self.model.layers:
# if hasattr(layer, 'kernel_initializer'):
# layer.kernel.initializer.run(session=session)
# if hasattr(layer, 'bias_initializer'):
# layer.bias.initializer.run(session=session)
# for layer in self.model_perm.layers:
# if hasattr(layer, 'kernel_initializer'):
# layer.kernel.initializer.run(session=session)
# if hasattr(layer, 'bias_initializer'):
# layer.bias.initializer.run(session=session)
## can be extent to @abstractmethod
# def mask_cov(self, X, k=0):
# """
# Return instances with masked k-th hypothesized features.
#
# Parameters
# ----------
# X : array-like
# Target instances.
#
# k : integer, default = 0
# k-th hypothesized features in inf_feats
# """
# Z = X.copy()
# if type(self.inf_feats[k]) is list:
# Z[:,self.inf_feats[k][0][:,None], self.inf_feats[k][1], 0] = 0.
# else:
# Z[:,self.inf_feats[k]]= 0.
# return Z
def perm_cov(self, X, k=0):
"""
Return instances with permuted k-th hypothesized features.
Parameters
----------
X : array-like
Target instances.
k : integer, default = 0
k-th hypothesized features in inf_feats
"""
Z = X.copy()
if type(self.inf_feats[k]) is list:
## for channels_last image data: shape should be (#samples, img_rows, img_cols, channel)
Z[:,self.inf_feats[k][0][:,None], self.inf_feats[k][1], :] = np.random.permutation(Z[:,self.inf_feats[k][0][:,None], self.inf_feats[k][1], :])
else:
Z[:,self.inf_feats[k]]= np.random.permutation(Z[:,self.inf_feats[k]])
return Z
def noise_cov(self, X, k=0):
Z = X.copy()
Z[:,self.inf_feats[k]] = np.random.randn(len(X), len(self.inf_feats[k]))
return Z
def testing(self, X, y, fit_params, cv_num=5, cp='hommel', inf_ratio=.2):
"""
Return p-values for hypothesis testing for inf_feats in class HpermT.
Parameters
----------
X : array-like | shape = (n_samples, dim_features)
Instances matrix/tensor, where n_samples in the number of samples and dim_features is the dimension of the features.
If X is vectorized feature, ``shape`` should be ``(#Samples, dim of feaures)``
If X is image/matrix data, ``shape`` should be ``(#samples, img_rows, img_cols, channel)``, that is, *X must channel_last image data*.
y: {array-like} of shape (n_samples,)
Output vector/matrix relative to X.
fit_params: {dict of fitting parameters}
See keras ``fit``: (https://keras.rstudio.com/reference/fit.html), including ``batch_size``, ``epoch``, ``callbacks``, ``validation_split``, ``validation_data``, and so on.
cv_num: int, default=5
The number of cross-validation to shuffle the estimation/inference samples in testing.
cp: {'gmean', 'min', 'hmean', 'Q1', 'hommel', 'cauchy'}, default ='hommel'
A method to combine p-values obtained from cross-validation.
inf_ratio: float, default=None
A pre-specific inference sample ratio, if ``est_size=None``, then it is determined by adaptive splitting method ``metric``.
Returns
-------
P_value: array of float [0, 1]
The p_values for target hypothesis testings.
"""
## create checkpoints path
if not os.path.exists(self.cp_path):
os.mkdir(self.cp_path)
## save initial weights
self.save_init()
P_value = []
for k in range(len(self.inf_feats)):
self.reset_model()
m, n = int(inf_ratio * len(X)), len(X) - int(inf_ratio * len(X))
P_value_cv = []
score_cv, score_perm_cv = [], []
for h in range(cv_num):
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=n, random_state=h)
## prediction and inference in full model
self.reset_model()
history = self.model.fit(X_train, y_train, **fit_params)
## save and load model
# path_tmp = self.cp_path+'/model'+'_inf'+str(k)+'_cv'+str(h)+'.h5'
# self.model.save_weights(path_tmp)
# self.model.load_weights(path_tmp)
## prediction for original dataset
pred_y = self.model.predict(X_test)
metric_full = self.metric(y_test, pred_y)
score = metric_full.mean()
score_cv.append(score)
score_perm = []
for l in range(self.num_perm):
Z_test = self.perm_cov(X_test, k)
pred_y_perm = self.model.predict(Z_test)
# pred_y_perm = self.model(Z_tmp, training=False)
metric_perm = self.metric(y_test, pred_y_perm)
score_perm.append(metric_perm.mean())
score_perm_cv.append(score_perm)
score_cv = np.array(score_cv)
score_perm_cv = np.array(score_perm_cv)
cv_ave_score, cv_ave_score_perm = score_cv.mean(), np.mean(score_perm_cv, axis=0)
## compute p-value
print("%d th inf-feats perf score: %.3f, perf permutation score: %.3f(%.3f); num_perm: %d" %(k, cv_ave_score, cv_ave_score_perm.mean(), cv_ave_score_perm.std(), len(cv_ave_score_perm) ))
p_value_tmp = (np.sum(cv_ave_score_perm <= cv_ave_score) + 1.0) / (self.num_perm + 1.0)
if p_value_tmp < self.alpha:
print('reject %d th H0 with p_value: %.3f' %(k, p_value_tmp))
else:
print('accept %d th H0 with p_value: %.3f' %(k, p_value_tmp))
P_value.append(p_value_tmp)
# return P_value
self.p_values = P_value
return P_value
| 2.265625 | 2 |
tests/mztab_m_io/test_main.py | lifs-tools/pymzTab-m | 4 | 12768413 | '''
Created on 11.12.2018
@author: mirandaa
'''
import unittest
import pytest
from mztab_m_swagger_client.api_client import ApiClient
import json
from collections import namedtuple
from pprint import pprint
from mztab_m_io import mztab_parser
from pathlib import Path, PurePath
class MzTabParseTestCase(unittest.TestCase):
def setUp(self):
self.datapath = PurePath(Path(__file__).parents[1].absolute(), Path('data'))
def testJsonToModelToJson(self):
filePath = PurePath(self.datapath, 'lipidomics-example.mzTab.json')
with open(filePath, 'r') as jsonfile:
txt = jsonfile.read().replace('\n', '')
Response = namedtuple('Response', 'data')
response = Response(txt)
apiclient = ApiClient()
my_mztab = apiclient.deserialize(response, 'MzTab')
self.assertEqual("2.0.0-M", my_mztab.metadata.mz_tab_version)
self.assertEqual("ISAS-2018-1234", my_mztab.metadata.mz_tab_id)
self.assertIsNone(my_mztab.metadata.title)
self.assertEqual("Minimal proposed sample file for identification and quantification of lipids", my_mztab.metadata.description)
my_mztab_json = apiclient.sanitize_for_serialization(my_mztab)
print(my_mztab_json)
self.assertNotEqual('', my_mztab_json)
# TODO: reenable when TSV parsing works
# def testMzTabParsing(self, shared_datadir):
# # print(my_mztab_text)
#
# filePath = shared_datadir + 'lipidomics-example.mzTab'
# with open(filePath,'r') as f:
# text = f.read()
#
# res = mztab_parser.parse(text)
# pprint(res)
# self.assertNotEqual('', res)
| 1.96875 | 2 |
src/settings.py | nathanielcompton/publix-tenders | 0 | 12768414 | <reponame>nathanielcompton/publix-tenders<gh_stars>0
import os
TENDERS_SALE_URL = os.getenv(
"TENDERS_SALE_URL", "http://arepublixchickentendersubsonsale.com/"
)
ONSALE_STRING = os.getenv(
"ONSALE_STRING", "onsale:yes"
)
ONSALE_YES = "Oh yeah they are!"
ONSALE_NO = "Sorry, not today!"
| 1.5 | 2 |
{{cookiecutter.repo_name}}/src/{{cookiecutter.django_project_name}}/apps.py | ionelmc/cookiecutter-django-docker | 1 | 12768415 | <filename>{{cookiecutter.repo_name}}/src/{{cookiecutter.django_project_name}}/apps.py
from django.contrib.admin.apps import AdminConfig
class CustomAdminConfig(AdminConfig):
default_site = '{{cookiecutter.django_project_name}}.admin.CustomAdminSite'
| 1.421875 | 1 |
src/apialchemy/service/__init__.py | homedepot/apialchemy | 1 | 12768416 | <filename>src/apialchemy/service/__init__.py
import importlib
import re
def parse_uri(uri):
pattern = re.compile(
r'^(?P<vendor>\w+)(?:\+(?P<scheme>http|https))?://(?P<conn_str>.*)',
re.X,
)
m = pattern.match(uri)
if m is not None:
components = m.groupdict()
return ServiceConnection(components['vendor'], components['scheme'], components['conn_str'])
return None
def create_service(uri):
sc = parse_uri(uri)
if sc is not None:
return Service.create(sc)
return None
class ServiceConnection:
def __init__(self, vendor, scheme, conn_str):
self.vendor = vendor
self.scheme = scheme
self.conn_str = conn_str
class Service:
@staticmethod
def create(sc):
module = importlib.import_module('apialchemy.vendors.' + sc.vendor)
return module.Service(sc.scheme, sc.conn_str)
| 2.546875 | 3 |
36.Notepad.py | sarincr/GUI-With-Tkinter-using-Python | 0 | 12768417 | <gh_stars>0
from tkinter import *
import tkinter as tks
from tkinter import IntVar
tk = Tk()
tk.geometry('500x500')
tk.title('Notepad')
main_menu = tks.Menu()
file = Menu(main_menu, tearoff = False)
edit = Menu(main_menu, tearoff = False)
view = Menu(main_menu, tearoff = False)
theme = Menu(main_menu, tearoff = False)
main_menu.add_cascade(label= "File", menu = file)
main_menu.add_cascade(label= "Edit", menu = edit)
main_menu.add_cascade(label= "View", menu = view)
main_menu.add_cascade(label= "Theme", menu = theme)
view.add_checkbutton(label="Tool Bar", onvalue=True, offvalue=False, compound=tks.LEFT)
view.add_checkbutton(label="Status Bar", onvalue=True, offvalue=False, compound=tks.LEFT)
file.add_command(label ="New", compound = tks.LEFT, accelerator = "Ctrl +N")
file.add_command(label ="Open", compound = tks.LEFT, accelerator = "Ctrl +O")
file.add_command(label ="Save", compound = tks.LEFT, accelerator = "Ctrl +S")
file.add_command(label ="Save as", compound = tks.LEFT, accelerator = "Ctrl +NS")
file.add_command(label ="Exit", compound = tks.LEFT, accelerator = "Ctrl +Esc")
edit.add_command (label = 'Copy' , compound = tks.LEFT, accelerator = 'ctrl+C')
edit.add_command (label = 'Paste' , compound = tks.LEFT, accelerator = 'ctrl+V')
edit.add_command (label = 'Cut' , compound = tks.LEFT, accelerator = 'ctrl+X')
edit.add_command (label = 'Clear' , compound = tks.LEFT, accelerator = 'ctrl+alt+x')
edit.add_command (label = 'Find' , compound = tks.LEFT, accelerator = 'ctrl+F')
color_dict = {
'Light Default ' : ('#000000', '#ffffff'),
'Light Plus' : ('#474747', '#e0e0e0'),
'Dark' : ('#c4c4c4', '#2d2d2d'),
'Red' : ('#2d2d2d', '#ffe8e8'),
'Monokai' : ('#d3b774', '#474747'),
'Night Blue' :('#ededed', '#6b9dc2')
}
count = 0
for i in color_dict:
theme.add_radiobutton(label = i , compound = tks.LEFT)
count +=1
tk.config(menu=main_menu)
tk.mainloop()
| 2.609375 | 3 |
clothing/forms.py | AmitAharoni/iWear2021 | 0 | 12768418 | from django.utils import timezone
from django import forms
from clothing.models import ClothingItem, WornEvent
from users.models import User
class ItemCreationForm(forms.ModelForm):
name = forms.CharField(label='Item Name')
owner = forms.ModelChoiceField(widget=forms.HiddenInput(), queryset=User.objects.all())
class Meta:
model = ClothingItem
fields = ('name', 'owner', 'category', 'tag_id','image_url')
class WornEventCreationForm(forms.ModelForm):
item = forms.ModelChoiceField(ClothingItem.objects.all())
class Meta:
model = WornEvent
fields = ('item',)
class WornEventCreationForm(forms.Form):
tag_id = forms.CharField()
class WeatherRangeForm(forms.Form):
minimum_temperature = forms.IntegerField(min_value=-15)
maximum_temperature = forms.IntegerField(max_value=45)
| 2.390625 | 2 |
third-party-synthetic/active-gate-extensions/extension-third-party-sftp/src/sftp_extension.py | ruxit/ruxit-api | 1 | 12768419 | import logging
from typing import List
from datetime import datetime
from sftp_client import SFTPClient
from ruxit.api.base_plugin import RemoteBasePlugin
from dynatrace import Dynatrace
from dynatrace.environment_v1.synthetic_third_party import SYNTHETIC_EVENT_TYPE_OUTAGE, SyntheticTestStep, SyntheticMonitorStepResult
log = logging.getLogger(__name__)
ENGINE_NAME = "SFTP"
class SFTPExtension(RemoteBasePlugin):
def initialize(self, **kwargs):
self.dt_client = Dynatrace(self.config.get("api_url"), self.config.get("api_token"), log=log, proxies=self.build_proxy_url())
self.executions = 0
self.failures_detected = 0
def build_proxy_url(self):
proxy_address = self.config.get("proxy_address")
proxy_username = self.config.get("proxy_username")
proxy_password = self.config.get("proxy_password")
if proxy_address:
protocol, address = proxy_address.split("://")
proxy_url = f"{protocol}://"
if proxy_username:
proxy_url += proxy_username
if proxy_password:
proxy_url += f":{proxy_password}"
proxy_url += f"@{address}"
return {"https": proxy_url}
return {}
def query(self, **kwargs):
log.setLevel(self.config.get("log_level"))
hostname = self.config.get("hostname").strip()
username = self.config.get("username")
port = int(self.config.get("port", 22))
password = self.config.get("password") if self.config.get("password") else None
key = self.config.get("ssh_key_file") if self.config.get("ssh_key_file") else None
passphrase = self.config.get("ssh_key_passphrase") if self.config.get("ssh_key_passphrase") else None
test_read = self.config.get("test_read", False)
test_put = self.config.get("test_put", False)
local_file = self.config.get("local_file") if self.config.get("local_file") else None
remote_dir = self.config.get("remote_dir") if self.config.get("remote_dir") else None
test_title = self.config.get("test_name") if self.config.get("test_name") else f"{hostname}:{port}"
location = self.config.get("location") if self.config.get("location") else "ActiveGate"
location_id = location.replace(" ", "_").lower()
frequency = int(self.config.get("frequency")) if self.config.get("frequency") else 15
failure_count = self.config.get("failure_count", 1)
if self.executions % frequency == 0:
steps: List[SyntheticTestStep] = []
results: List[SyntheticMonitorStepResult] = []
test_response_time = 0
with SFTPClient(
hostname=hostname,
port=port,
username=username,
password=password,
key=key,
passphrase=<PASSWORD>phrase,
log=log
) as client:
conn_success, reason, conn_time = client.test_connect()
log.info(f"Test: {test_title}, Step: Connect, success: {conn_success}, time: {conn_time}")
success = conn_success
test_response_time += conn_time
steps.append(self.dt_client.third_part_synthetic_tests.create_synthetic_test_step(1, "SFTP Connect"))
results.append(self.dt_client.third_part_synthetic_tests.create_synthetic_test_step_result(1, datetime.now(), conn_time))
if conn_success and test_read:
read_success, reason, read_time = client.test_read(remote_dir)
log.info(f"Test: {test_title}, Step: Read, success: {read_success}, time: {read_time}")
success = success and read_success
test_response_time += read_time
steps.append(self.dt_client.third_part_synthetic_tests.create_synthetic_test_step(2, "SFTP Read"))
results.append(self.dt_client.third_part_synthetic_tests.create_synthetic_test_step_result(2, datetime.now(), read_time))
if conn_success and test_put:
put_success, reason, put_time = client.test_put(local_file, remote_dir)
log.info(f"Test: {test_title}, Step: Put, success: {put_success}, time: {put_time}")
success = success and put_success
test_response_time += put_time
steps.append(self.dt_client.third_part_synthetic_tests.create_synthetic_test_step(3, "SFTP Put"))
results.append(self.dt_client.third_part_synthetic_tests.create_synthetic_test_step_result(3, datetime.now(), put_time))
if not success:
self.failures_detected += 1
if self.failures_detected < failure_count:
log.info(f"Success: {success}. Attempt {self.failures_detected}/{failure_count}. Not reporting yet")
success = True
else:
self.failures_detected = 0
self.dt_client.third_part_synthetic_tests.report_simple_thirdparty_synthetic_test(
engine_name=ENGINE_NAME,
timestamp=datetime.now(),
location_id=location_id,
location_name=location,
test_id=self.activation.entity_id,
test_title=test_title,
schedule_interval=frequency * 60,
success=success,
response_time=test_response_time,
edit_link=f"#settings/customextension;id={self.plugin_info.name}",
icon_url="https://raw.githubusercontent.com/Dynatrace/dynatrace-api/master/third-party-synthetic/active-gate-extensions/extension-third-party-sftp/sftp.png",
detailed_steps=steps,
detailed_step_results=results
)
self.dt_client.third_part_synthetic_tests.report_simple_thirdparty_synthetic_test_event(
test_id=self.activation.entity_id,
name=f"SFTP Test failed for {test_title}",
location_id=location_id,
timestamp=datetime.now(),
state="open" if not success else "resolved",
event_type=SYNTHETIC_EVENT_TYPE_OUTAGE,
reason=reason,
engine_name=ENGINE_NAME
)
self.executions += 1 | 2 | 2 |
pythonforandroid/recipes/pyicu/__init__.py | ht-thomas/python-for-android | 19 | 12768420 | from os.path import join
from pythonforandroid.recipe import CppCompiledComponentsPythonRecipe
class PyICURecipe(CppCompiledComponentsPythonRecipe):
version = '1.9.2'
url = ('https://pypi.python.org/packages/source/P/PyICU/'
'PyICU-{version}.tar.gz')
depends = ["icu"]
patches = ['locale.patch']
def get_recipe_env(self, arch):
env = super().get_recipe_env(arch)
icu_include = join(
self.ctx.get_python_install_dir(), "include", "icu")
icu_recipe = self.get_recipe('icu', self.ctx)
icu_link_libs = icu_recipe.built_libraries.keys()
env["PYICU_LIBRARIES"] = ":".join(lib[3:-3] for lib in icu_link_libs)
env["CPPFLAGS"] += " -I" + icu_include
env["LDFLAGS"] += " -L" + join(
icu_recipe.get_build_dir(arch.arch), "icu_build", "lib"
)
return env
recipe = PyICURecipe()
| 2.3125 | 2 |
optixrap/cu/boolean_solid.py | hanswenzel/opticks | 11 | 12768421 | #
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# generated Tue Mar 14 18:57:46 2017
# from /Users/blyth/opticks/optixrap/cu
# base boolean-solid.h stem boolean-solid
# with command : /Users/blyth/opticks/bin/c_enums_to_python.py boolean-solid.h
import sys
#0
class Act_(object):
ReturnMiss = 0x1 << 0
ReturnAIfCloser = 0x1 << 1
ReturnAIfFarther = 0x1 << 2
ReturnA = 0x1 << 3
ReturnBIfCloser = 0x1 << 4
ReturnBIfFarther = 0x1 << 5
ReturnB = 0x1 << 6
ReturnFlipBIfCloser = 0x1 << 7
AdvanceAAndLoop = 0x1 << 8
AdvanceBAndLoop = 0x1 << 9
AdvanceAAndLoopIfCloser = 0x1 << 10
AdvanceBAndLoopIfCloser = 0x1 << 11
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
#1
class CTRL_(object):
RETURN_MISS = 0
RETURN_A = 1
RETURN_B = 2
RETURN_FLIP_B = 3
LOOP_A = 4
LOOP_B = 5
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
#2
class State_(object):
Enter = 0
Exit = 1
Miss = 2
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
#3
class ERROR_(object):
LHS_POP_EMPTY = 0x1 << 0
RHS_POP_EMPTY = 0x1 << 1
LHS_END_NONEMPTY = 0x1 << 2
RHS_END_EMPTY = 0x1 << 3
BAD_CTRL = 0x1 << 4
LHS_OVERFLOW = 0x1 << 5
RHS_OVERFLOW = 0x1 << 6
LHS_TRANCHE_OVERFLOW = 0x1 << 7
RHS_TRANCHE_OVERFLOW = 0x1 << 8
RESULT_OVERFLOW = 0x1 << 9
OVERFLOW = 0x1 << 10
TRANCHE_OVERFLOW = 0x1 << 11
POP_EMPTY = 0x1 << 12
XOR_SIDE = 0x1 << 13
END_EMPTY = 0x1 << 14
ROOT_STATE = 0x1 << 15
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
#4
class Union_(object):
EnterA_EnterB = Act_.ReturnAIfCloser | Act_.ReturnBIfCloser
EnterA_ExitB = Act_.ReturnBIfCloser | Act_.AdvanceAAndLoop
EnterA_MissB = Act_.ReturnA
ExitA_EnterB = Act_.ReturnAIfCloser | Act_.AdvanceBAndLoop
ExitA_ExitB = Act_.ReturnAIfFarther | Act_.ReturnBIfFarther
ExitA_MissB = Act_.ReturnA
MissA_EnterB = Act_.ReturnB
MissA_ExitB = Act_.ReturnB
MissA_MissB = Act_.ReturnMiss
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
#5
class ACloser_Union_(object):
EnterA_EnterB = CTRL_.RETURN_A
EnterA_ExitB = CTRL_.LOOP_A
EnterA_MissB = CTRL_.RETURN_A
ExitA_EnterB = CTRL_.RETURN_A
ExitA_ExitB = CTRL_.RETURN_B
ExitA_MissB = CTRL_.RETURN_A
MissA_EnterB = CTRL_.RETURN_B
MissA_ExitB = CTRL_.RETURN_B
MissA_MissB = CTRL_.RETURN_MISS
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
#6
class BCloser_Union_(object):
EnterA_EnterB = CTRL_.RETURN_B
EnterA_ExitB = CTRL_.RETURN_B
EnterA_MissB = CTRL_.RETURN_A
ExitA_EnterB = CTRL_.LOOP_B
ExitA_ExitB = CTRL_.RETURN_A
ExitA_MissB = CTRL_.RETURN_A
MissA_EnterB = CTRL_.RETURN_B
MissA_ExitB = CTRL_.RETURN_B
MissA_MissB = CTRL_.RETURN_MISS
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
#7
class Difference_(object):
EnterA_EnterB = Act_.ReturnAIfCloser | Act_.AdvanceBAndLoop
EnterA_ExitB = Act_.AdvanceAAndLoopIfCloser | Act_.AdvanceBAndLoopIfCloser
EnterA_MissB = Act_.ReturnA
ExitA_EnterB = Act_.ReturnAIfCloser | Act_.ReturnFlipBIfCloser
ExitA_ExitB = Act_.ReturnFlipBIfCloser | Act_.AdvanceAAndLoop
ExitA_MissB = Act_.ReturnA
MissA_EnterB = Act_.ReturnMiss
MissA_ExitB = Act_.ReturnMiss
MissA_MissB = Act_.ReturnMiss
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
#8
class ACloser_Difference_(object):
EnterA_EnterB = CTRL_.RETURN_A
EnterA_ExitB = CTRL_.LOOP_A
EnterA_MissB = CTRL_.RETURN_A
ExitA_EnterB = CTRL_.RETURN_A
ExitA_ExitB = CTRL_.LOOP_A
ExitA_MissB = CTRL_.RETURN_A
MissA_EnterB = CTRL_.RETURN_MISS
MissA_ExitB = CTRL_.RETURN_MISS
MissA_MissB = CTRL_.RETURN_MISS
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
#9
class BCloser_Difference_(object):
EnterA_EnterB = CTRL_.LOOP_B
EnterA_ExitB = CTRL_.LOOP_B
EnterA_MissB = CTRL_.RETURN_A
ExitA_EnterB = CTRL_.RETURN_FLIP_B
ExitA_ExitB = CTRL_.RETURN_FLIP_B
ExitA_MissB = CTRL_.RETURN_A
MissA_EnterB = CTRL_.RETURN_MISS
MissA_ExitB = CTRL_.RETURN_MISS
MissA_MissB = CTRL_.RETURN_MISS
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
#10
class Intersection_(object):
EnterA_EnterB = Act_.AdvanceAAndLoopIfCloser | Act_.AdvanceBAndLoopIfCloser
EnterA_ExitB = Act_.ReturnAIfCloser | Act_.AdvanceBAndLoop
EnterA_MissB = Act_.ReturnMiss
ExitA_EnterB = Act_.ReturnBIfCloser | Act_.AdvanceAAndLoop
ExitA_ExitB = Act_.ReturnAIfCloser | Act_.ReturnBIfCloser
ExitA_MissB = Act_.ReturnMiss
MissA_EnterB = Act_.ReturnMiss
MissA_ExitB = Act_.ReturnMiss
MissA_MissB = Act_.ReturnMiss
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
#11
class ACloser_Intersection_(object):
EnterA_EnterB = CTRL_.LOOP_A
EnterA_ExitB = CTRL_.RETURN_A
EnterA_MissB = CTRL_.RETURN_MISS
ExitA_EnterB = CTRL_.LOOP_A
ExitA_ExitB = CTRL_.RETURN_A
ExitA_MissB = CTRL_.RETURN_MISS
MissA_EnterB = CTRL_.RETURN_MISS
MissA_ExitB = CTRL_.RETURN_MISS
MissA_MissB = CTRL_.RETURN_MISS
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
#12
class BCloser_Intersection_(object):
EnterA_EnterB = CTRL_.LOOP_B
EnterA_ExitB = CTRL_.LOOP_B
EnterA_MissB = CTRL_.RETURN_MISS
ExitA_EnterB = CTRL_.RETURN_B
ExitA_ExitB = CTRL_.RETURN_B
ExitA_MissB = CTRL_.RETURN_MISS
MissA_EnterB = CTRL_.RETURN_MISS
MissA_ExitB = CTRL_.RETURN_MISS
MissA_MissB = CTRL_.RETURN_MISS
@classmethod
def enum(cls):
return filter(lambda kv:type(kv[1]) is int,cls.__dict__.items())
@classmethod
def desc(cls, typ):
kvs = filter(lambda kv:kv[1] == typ, cls.enum())
return kvs[0][0] if len(kvs) == 1 else "UNKNOWN"
@classmethod
def descmask(cls, typ):
kvs = filter(lambda kv:kv[1] & typ, cls.enum())
return ",".join(map(lambda kv:kv[0], kvs))
@classmethod
def fromdesc(cls, label):
kvs = filter(lambda kv:kv[0] == label, cls.enum())
return kvs[0][1] if len(kvs) == 1 else -1
| 1.945313 | 2 |
openslides/agenda/migrations/0002_item_duration.py | swilde/OpenSlides | 3 | 12768422 | <reponame>swilde/OpenSlides<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-22 11:08
from __future__ import unicode_literals
from django.db import migrations, models
def convert_duration(apps, schema_editor):
"""
Converts the values of the old duration CharField to new duration
IntegerField. It uses the temporary field for proper renaming the field
in the end.
"""
Item = apps.get_model("agenda", "Item")
for item in Item.objects.all():
duration = item.duration
item.duration_tmp = None
if is_int(duration):
# Assuming that these are minutes.
item.duration_tmp = int(duration)
elif isinstance(duration, str):
# Assuming format (h)h:(m)m. If not, new value is None.
split = duration.split(":")
if len(split) == 2 and is_int(split[0]) and is_int(split[1]):
# Calculate new duration: hours * 60 + minutes.
item.duration_tmp = int(split[0]) * 60 + int(split[1])
item.save(skip_autoupdate=True)
def is_int(s):
"""
Short helper for duration conversion.
"""
try:
int(s)
except (ValueError, TypeError):
return False
else:
return True
class Migration(migrations.Migration):
dependencies = [("agenda", "0001_initial")]
operations = [
migrations.AddField(
model_name="item",
name="duration_tmp",
field=models.IntegerField(blank=True, null=True),
),
migrations.RunPython(convert_duration),
migrations.RemoveField(model_name="item", name="duration"),
migrations.RenameField(
model_name="item", old_name="duration_tmp", new_name="duration"
),
]
| 2.1875 | 2 |
projects/password-generator/test_password_generator.py | duttashi/learnpy | 0 | 12768423 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 24 11:53:24 2021
@author: Ashoo
"""
from password_generator import generate_simple_password
def test_generate_simple_password():
assert generate_simple_password()=='abcd' | 2.6875 | 3 |
drivelink/_diskdict.py | cdusold/DriveLink | 0 | 12768424 | from collections import MutableMapping
from os.path import expanduser, join
from glob import glob
from drivelink import Link
from drivelink.hash import hash
class _page(dict):
currentDepth = 0
class Dict(Link, MutableMapping):
"""
A dictionary class that maintains O(1) look up and write while keeping RAM usage O(1) as well.
This is accomplished through a rudimentary (for now) hashing scheme to page the
dictionary into parts.
The object created can be used any way a normal dict would be used, and will
clean itself up on python closing. This means saving all the remaining pages
to disk. If the file_basename and file_location was used before, it will load
the old values back into itself so that the results can be reused.
There are two ways to initialize this object, as a standard object:
>>> diskDict = Dict("sampledict")
>>> for i in range(10):
... diskDict[i] = chr(97+i)
...
>>> diskDict[3]
'd'
>>> 5 in diskDict
True
>>> del diskDict[5]
>>> ", ".join(str(x) for x in diskDict.keys())
'0, 1, 2, 3, 4, 6, 7, 8, 9'
>>> 5 in diskDict
False
Or through context:
>>> with Dict("testdict") as d:
... for i in range(10):
... d[i] = chr(97+i)
... print(d[3])
d
If there is a way to break dict like behavior and you can reproduce it, please
report it to `the GitHub issues <https://github.com/cdusold/DriveLink/issues/>`_.
"""
def __init__(self, file_basename, size_limit=1024, max_pages=16, file_location=join(expanduser("~"), ".DriveLink"), compression_ratio=0):
self.pages = _page()
self._total = set()
super(Dict, self).__init__(file_basename, size_limit, max_pages, file_location, compression_ratio)
def copy_from(self, other):
for key in other:
self[key] = other[key]
def load_index(self):
other_values = super(Dict, self).load_index()
if other_values is None:
return
self.pages.currentDepth = other_values[0]
for f in glob(self._file_base + '*'):
try:
self._total.add(int(f[len(self._file_base):]))
except ValueError:
pass
def store_index(self):
super(Dict, self).store_index(self.pages.currentDepth)
def open_page(self, k):
if k in self._total:
self._load_page_from_disk(k)
if k not in self.pages:
self.pages[k] = _page()
self._total.add(k)
self.pages[k].currentDepth = self.pages.currentDepth
self._queue.append(k)
def determine_index(self, key):
"""
Figures out where the key in question should be.
Most frequently O(1), when a page becomes too large, there's
a O(ln(n)) search that refactors O(k ln(n)) elements
once every O(k) insertions. A full refactor usually
happens in strides, moving a total of O(n) elements
split up over O(ln(n)) calls. This makes the worst
time refactor O(n) and usual refactor approximately
O(n/ ln(n)). Average case lookup O(n/k).
"""
k = hash(key) & self.pages.currentDepth
i = 0
while (k & (self.pages.currentDepth >> i)) not in self._total | set([0]):
i += 1
self._branchpage(k & (self.pages.currentDepth >> i))
return k, key
def page_indices(self):
for k in list(self._total):
yield k
def __setitem__(self, key, value):
'''
Sets a value that a key maps to.
'''
super(Dict, self).__setitem__(key, value)
i, _ = self.determine_index(key)
if len(self.pages[i]) > self.size_limit:
if self.pages[i].currentDepth == self.pages.currentDepth:
self.pages.currentDepth <<= 1
self.pages.currentDepth |= 1
self._branchpage(i)
def __contains__(self, item):
try:
i, k = self._finditem(item)
except:
return False
return k in self.pages[i]
def page_removed(self, number):
self._total.remove(number)
def __str__(self):
return "Dictionary with values stored to " + self._file_base
def _branchpage(self, pagenumber):
self._guarantee_page(pagenumber)
if self.pages[pagenumber].currentDepth == self.pages.currentDepth:
return
self.pages[pagenumber].currentDepth = self.pages.currentDepth
for key in set(self.pages[pagenumber].keys()):
k = hash(key) & self.pages.currentDepth
if k != pagenumber:
self._guarantee_page(pagenumber)
v = self.pages[pagenumber][key]
del self.pages[pagenumber][key]
self._guarantee_page(k)
self.pages[k][key] = v
| 2.984375 | 3 |
sqlalchemy_ydb/__init__.py | blinkov/sqlalchemy-ydb | 0 | 12768425 | <filename>sqlalchemy_ydb/__init__.py
from __future__ import absolute_import
from __future__ import unicode_literals
from kikimr.public.dbapi.errors import NotSupportedError
try:
from sqlalchemy.engine.default import DefaultDialect, DefaultExecutionContext
from sqlalchemy.sql.compiler import DDLCompiler, IdentifierPreparer, GenericTypeCompiler
class YdbIdentifierPreparer(IdentifierPreparer):
def __init__(self, dialect):
super(YdbIdentifierPreparer, self).__init__(dialect,
initial_quote='`',
final_quote='`')
class YdbDDLCompiler(DDLCompiler):
raw_create_table = None
def visit_create_table(self, create):
self.raw_create_table = create
return super(YdbDDLCompiler, self).visit_create_table(create)
class YdbTypeCompiler(GenericTypeCompiler):
def _render_string_type(self, type_, name):
text = name
if type_.length:
pass
if type_.collation:
raise NotSupportedError
return text
class YdbExecutionContext(DefaultExecutionContext):
pass
class YdbDialect(DefaultDialect):
name = 'ydb'
supports_alter = False
max_identifier_length = 255
supports_sane_rowcount = False
supports_native_enum = False
supports_native_boolean = True
supports_smallserial = False
supports_sequences = False
sequences_optional = True
preexecute_autoincrement_sequences = True
postfetch_lastrowid = False
supports_default_values = False
supports_empty_insert = False
supports_multivalues_insert = True
default_paramstyle = 'qmark'
isolation_level = None
preparer = YdbIdentifierPreparer
ddl_compiler = YdbDDLCompiler
type_compiler = YdbTypeCompiler
execution_ctx_cls = YdbExecutionContext
@staticmethod
def dbapi():
import kikimr.public.dbapi
return kikimr.public.dbapi
def has_table(self, connection, table_name, schema=None):
if schema is not None:
raise NotSupportedError
return False # TODO
def _check_unicode_returns(self, connection, additional_tests=None):
return True
except ImportError:
class YdbDialect(object):
def __init__(self):
raise RuntimeError('could not import sqlalchemy')
| 2.140625 | 2 |
manage.py | tydonk/blogful | 1 | 12768426 | import os
from flask_script import Manager
from blog import app
from blog.database import session, Entry
manager = Manager(app)
@manager.command
def run():
port = int(os.environ.get('PORT', 8080))
app.run(host='0.0.0.0', port=port)
@manager.command
def seed():
content = """TEST TEST TEST, these entries are all just tests. Mic check one, two
, one, two, testing, testing."""
for i in range(1,26):
entry = Entry(
title="Test Entry #{}".format(i),
content=content
)
session.add(entry)
session.commit()
from getpass import getpass
from werkzeug.security import generate_password_hash
from blog.database import User
@manager.command
def adduser():
name = input("Name: ")
email = input("Email: ")
if session.query(User).filter_by(email=email).first():
print("User with that email address already exists")
return
password = ""
while len(password) < 8 or password != password_2:
password = getpass("Password: ")
password_2 = getpass("<PASSWORD>: ")
user = User(name=name, email=email,
password=generate_password_hash(password))
session.add(user)
session.commit()
from flask_migrate import Migrate, MigrateCommand
from blog.database import Base
class DB(object):
def __init__(self, metadata):
self.metadata = metadata
migrate = Migrate(app, DB(Base.metadata))
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run() | 2.578125 | 3 |
conpaas-services/src/conpaas/services/taskfarm/agent/agent.py | bopopescu/conpaas-1 | 5 | 12768427 | <reponame>bopopescu/conpaas-1
# -*- coding: utf-8 -*-
"""
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
from subprocess import Popen
from conpaas.core.expose import expose
from conpaas.core.https.server import HttpJsonResponse
from conpaas.core.agent import BaseAgent
class TaskFarmAgent(BaseAgent):
"""Agent class with the following exposed methods:
check_agent_process() -- GET
create_hub(my_ip) -- POST
create_node(my_ip, hub_ip) -- POST
"""
def __init__(self, config_parser, **kwargs):
"""Initialize TaskFarm Agent.
'config_parser' represents the agent config file.
**kwargs holds anything that can't be sent in config_parser.
"""
BaseAgent.__init__(self, config_parser)
# Path to the TaskFarm JAR file
self.taskfarm_dir = config_parser.get('agent', 'CONPAAS_HOME')
# The following two variables have the same value on the Hub
self.my_ip_address = None
self.hub_ip_address = None
@expose('POST')
def create_hub(self, kwargs):
"""Create a TaskFarm Hub by starting taskfarm server with -role hub"""
self.logger.info('Hub starting up')
self.state = 'PROLOGUE'
self.my_ip_address = self.hub_ip_address = kwargs['my_ip']
# Starting taskfarm hub
start_args = [ "java", "-jar", "taskfarm-server", "-role", "hub" ]
self.logger.debug("Running command: '%s'. cwd='%s'" % (
" ".join(start_args), self.taskfarm_dir))
proc = Popen(start_args, cwd=self.taskfarm_dir, close_fds=True)
self.state = 'RUNNING'
self.logger.info('Hub started up. TaskFarm pid=%d' % proc.pid)
return HttpJsonResponse()
@expose('POST')
def create_node(self, kwargs):
"""Create a TaskFarm Node. As this host will actually fire up browser
sessions, and we want to run the tests in a non-interactive fashion, X
output will be sent to a fake display."""
self.logger.info('Node starting up')
self.state = 'ADAPTING'
self.my_ip_address = kwargs['my_ip']
self.hub_ip_address = kwargs['hub_ip']
# Running the TaskFarm Node via xvfb-run and DISPLAY set to :1. We
# have to specify the PATH because Popen overrides all the environment
# variables if env is specified. Using port 3306 (MySQL) to avoid
# requesting yet another port to be open.
# TODO: as this file was created from a BLUEPRINT file,
# you may want to change ports, paths and/or other start_args
# to meet your specific service/server needs
start_args = [
"xvfb-run", "--auto-servernum",
"java", "-jar", "taskfarm-server",
"-role", "node", "-port", "3306",
"-hub", "http://%s:4444/grid/register" % self.hub_ip_address,
"-host", self.my_ip_address,
"-maxSession", "6",
"-browser", "browserName=firefox,maxInstances=3",
"-browser", "browserName=chrome,maxInstances=3",
]
env = {
'DISPLAY': ':1',
'PATH': '/bin:/usr/bin:/usr/local/bin'
}
self.logger.debug("Running command: '%s'. cwd='%s', env='%s'" % (
" ".join(start_args), self.taskfarm_dir, env))
proc = Popen(start_args, cwd=self.taskfarm_dir, env=env, close_fds=True)
self.state = 'RUNNING'
self.logger.info('Node started up. TaskFarm pid=%d' % proc.pid)
return HttpJsonResponse()
| 2.296875 | 2 |
python3_cron_scripts/libs3/ZoneManager.py | utkarshiam/Marinus | 0 | 12768428 | #!/usr/bin/python3
# Copyright 2019 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
This class mostly exists because almost every script needs to do a get_distinct_zones
Having it centralized, means that the included and excluded status' can be managed in one place.
"""
import logging
from datetime import datetime
from pymongo import MongoClient
from tld import get_fld
class ZoneManager(object):
# A status of confirmed typically means it was entered by a human
CONFIRMED = "confirmed"
# A status of unconfirmed means that it was added via automation
# It has not been reviewed by a human
UNCONFIRMED = "unconfirmed"
# A status of false positive means that a human identified that automation made a mistake
FALSE_POSITIVE = "false_positive"
# A status of expired means that the automation believes that the domain is no longer registered
EXPIRED = "expired"
# The MongoConnector
mongo_connector = None
# The zone collection
zone_collection = None
# The logger
_logger = None
def _log(self):
"""
Get the log
"""
return logging.getLogger(__name__)
def __init__(self, mongo_connector):
"""
Initialize the MongoDB Connector
"""
self._logger = self._log()
self.mongo_connector = mongo_connector
self.zone_collection = mongo_connector.get_zone_connection()
def _check_valid_status(self, status):
if (
status != ZoneManager.EXPIRED
and status != ZoneManager.FALSE_POSITIVE
and status != ZoneManager.CONFIRMED
and status != ZoneManager.UNCONFIRMED
):
self._logger.error("ERROR: Bad status value")
return False
return True
@staticmethod
def get_distinct_zones(mongo_connector, includeAll=False):
"""
This is the most common usage of get zones where the caller wants just the list of
active zones.
This returns the list of zones as an array of strings rather than the complete JSON objects
"""
zones_collection = mongo_connector.get_zone_connection()
if includeAll:
zone_results = mongo_connector.perform_distinct(zones_collection, "zone")
else:
zone_results = mongo_connector.perform_distinct(
zones_collection,
"zone",
{"status": {"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}},
)
zones = []
for zone in zone_results:
if zone.find(".") >= 0:
zones.append(zone)
return zones
@staticmethod
def get_reversed_zones(mongo_connector):
"""
Retrieve the list of active zones and then reverse them to match the Common Crawl format
"""
zones_collection = mongo_connector.get_zone_connection()
zone_results = mongo_connector.perform_distinct(
zones_collection,
"zone",
{"status": {"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}},
)
zones = []
for zone in zone_results:
if zone.find("."):
zone_parts = zone.split(".")
# The vertices.txt entries from common_crawl are in reverse order (e.g. org.example.www)
# To string match faster, the zones are stored in a reverse format prior to matching.
# This avoids having to reverse each entry in the file which is less efficient.
rev_zone = ""
for part in zone_parts:
rev_zone = part + "." + rev_zone
rev_zone = rev_zone[:-1]
zones.append(rev_zone)
return zones
@staticmethod
def get_zones_by_source(mongo_connector, source, includeAll=False):
"""
Returns a list of zones based on the provided reporting source
"""
zone_collection = mongo_connector.get_zone_connection()
if includeAll:
zones = mongo_connector.perform_distinct(
zone_collection, "zone", {"reporting_sources.source": source}
)
else:
zones = mongo_connector.perform_distinct(
zone_collection,
"zone",
{
"reporting_sources.source": source,
"status": {
"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]
},
},
)
return zones
@staticmethod
def get_zones(mongo_connector, includeAll=False):
"""
This is will return the full zones object for all active zones.
This returns the complete json objects for the matching descriptions
"""
zones_collection = mongo_connector.get_zone_connection()
if includeAll:
zone_results = mongo_connector.perform_find(zones_collection, {})
else:
zone_results = mongo_connector.perform_find(
zones_collection,
{"status": {"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}},
)
zones = []
for zone in zone_results:
if zone["zone"].find(".") >= 0:
zones.append(zone)
return zones
@staticmethod
def get_root_domain(value, zone=None):
"""
Get the root domain (FLD) for the provided value
"""
res = get_fld(value, fix_protocol=True, fail_silently=True)
if res is None:
return zone
return res
def get_zone(self, zone):
"""
Fetch the full individual zone record.
This is not a static method since it would probably be called repeatedly.
"""
return self.mongo_connector.perform_find_one(
self.zone_collection, {"zone": zone}
)
def get_zones_by_status(self, status):
"""
This returns the list of zones associated with the provided status.
This returns the list of zones as an array of strings rather than the complete JSON objects
"""
if not self._check_valid_status(status):
return
zone_results = self.mongo_connector.perform_distinct(
self.zone_collection, "zone", {"status": status}
)
zones = []
for zone in zone_results:
if zone.find(".") >= 0:
zones.append(zone)
return zones
def set_status(self, zone, status, caller):
"""
Set a zone to expired.
"""
count = self.mongo_connector.perform_count(self.zone_collection, {"zone": zone})
if count == 0:
self._logger.error("ERROR: Invalid zone!")
return
if (
status != ZoneManager.EXPIRED
and status != ZoneManager.FALSE_POSITIVE
and status != ZoneManager.CONFIRMED
and status != ZoneManager.UNCONFIRMED
):
self._logger.error("ERROR: Bad status value!")
return
if caller is None or caller == "":
self._logger.error("ERROR: Please provide a caller value!")
return
now = datetime.now()
note = caller + " set to " + status + " on " + str(now)
self.zone_collection.update_one(
{"zone": zone},
{"$set": {"status": status, "updated": now}, "$addToSet": {"notes": note}},
)
def add_note(self, zone, note):
"""
In the future, there should probably be restrictions on note length.
For now, it is not set until more information on usage is available.
"""
self.zone_collection.update_one({"zone": zone}, {"$addToSet": {"notes": note}})
| 2.3125 | 2 |
environment.py | zhangvwk42/modified-snake-reinforcement-learning | 0 | 12768429 | <filename>environment.py
import random
import math
from initialization import *
# random.seed(40)
class Directions:
UP = 'UP'
DOWN = 'DOWN'
RIGHT = 'RIGHT'
LEFT = 'LEFT'
ALL = [UP, DOWN, LEFT, RIGHT]
REVERSE = {UP: DOWN, DOWN: UP, LEFT: RIGHT, RIGHT: LEFT}
class Setup:
# Initial snake coordinates from head to tail
snakeCoord_X = [140, 140, 140]
snakeCoord_Y = [200, 180, 160]
trainIters = 50000
testIters = 1000
training_timeLimit = 30*60
testing_timeLimit = 15*60
direction = random.choice(Directions.ALL) # Initial direction
# Learning parameters
discount = 0.9
alpha = 0.8
# Exploration
# Epsilon-greedy
epsilon_u = 0
epsilon_l = 0.01 # epsilon lower bound
# Softmax
T = 1.0
score_threshold = 8
class Block:
def __init__(self, numBlocks):
self.numBlocks = numBlocks
# Returns list of block-unoccupied x and y coordinates
def unoccupied(self):
numBlocks = self.numBlocks
blockPos_normalized = [(random.randint(1, int_width), \
random.randint(1, int_height)) \
for i in range(numBlocks)]
blockPos = [(blockPos_normalized[i][0]*pixel, \
blockPos_normalized[i][1]*pixel) \
for i in range(numBlocks)]
blockPos_X = set(blockPos_normalized[i][0] for i in range(numBlocks))
blockPos_Y = set(blockPos_normalized[i][1] for i in range(numBlocks))
# X and Y coordinates that are block-unoccupied
possiblePos_X = list(set(i for i in range(int_width)) - blockPos_X)
possiblePos_Y = list(set(i for i in range(int_height)) - blockPos_Y)
return blockPos, possiblePos_X, possiblePos_Y
class Apple:
def __init__(self, possiblePos_X, possiblePos_Y, snakeCoord_X, snakeCoord_Y):
self.possiblePos_X = possiblePos_X
self.possiblePos_Y = possiblePos_Y
self.snakeCoord_X = snakeCoord_X
self.snakeCoord_Y = snakeCoord_Y
def position(self):
# Apple coordinates that fits
possiblePos_X = self.possiblePos_X
possiblePos_Y = self.possiblePos_Y
snakeCoord_X = self.snakeCoord_X
snakeCoord_Y = self.snakeCoord_Y
while True:
# Apple position taken randomly in block-unoccupied tiles
applePos = (random.choice(possiblePos_X)*pixel, \
random.choice(possiblePos_Y)*pixel)
# Apple cannot appear somewhere on the snake
if applePos not in zip(snakeCoord_X, snakeCoord_Y):
break
return applePos
class GameLogic:
def __init__(self, applePos, obstaclePos, snakeHead_X, snakeHead_Y):
self.applePos = applePos
self.obstaclePos = obstaclePos
self.snakeHead_X = snakeHead_X
self.snakeHead_Y = snakeHead_Y
def collisionWall(self):
snakeHead_X = self.snakeHead_X
snakeHead_Y = self.snakeHead_Y
# print 'hit wall'
return snakeHead_X < 0 or \
snakeHead_X >= windowWidth or \
snakeHead_Y < 0 or \
snakeHead_Y >= windowHeight
def collisionObstacle(self):
obstaclePos = self.obstaclePos
snakeHead_X = self.snakeHead_X
snakeHead_Y = self.snakeHead_Y
return (snakeHead_X, snakeHead_Y) in obstaclePos
def eatsApple(self):
applePos = self.applePos
snakeHead_X = self.snakeHead_X
snakeHead_Y = self.snakeHead_Y
return snakeHead_X == applePos[0] and snakeHead_Y == applePos[1] | 3.15625 | 3 |
resumeapi/main.py | ericrochow/resumeapi | 2 | 12768430 | <gh_stars>1-10
#!/usr/bin/env python3
from datetime import timedelta
import os
import logging
from typing import Dict, List, Optional
from dotenv import load_dotenv
from fastapi import Body, Depends, FastAPI, HTTPException, status
from fastapi.responses import FileResponse, JSONResponse, RedirectResponse
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jose import JWTError, jwt
import uvicorn
from controller import AuthController, ResumeController
import schema
load_dotenv()
app = FastAPI(title="Resume API", version="0.1.0")
resume = ResumeController()
auth_control = AuthController()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
logger = logging.getLogger(__name__)
async def get_current_user(token: str = Depends(oauth2_scheme)):
"""
Validate a JWT token and identifies the currently-authenticated user.
Args:
token: A string containing a full JWT token.
Returns:
A string containing the username authenticated user.
Raises:
HttpException: Could not validate credentials.
"""
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(
token, os.getenv("SECRET_KEY"), algorithms=[os.getenv("ALGORITHM")]
)
username: str = payload.get("sub")
if not username:
raise credentials_exception
except JWTError:
raise credentials_exception
user = auth_control.get_user(username)
if not user:
raise credentials_exception
return user
async def get_current_active_user(
current_user: schema.User = Depends(get_current_user),
):
"""
Determine whether the currently-authenticated user is disabled or active.
Args:
current_user:
Returns:
The same object passed in.
Raises:
HttpException: The currently-active user is disabled.
"""
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post(
"/token",
summary="Create an API token",
description="Log into the API to generate a token",
response_description="Token info",
response_model=schema.Token,
tags=["Authentication"],
)
async def login_for_access_token(
form_data: OAuth2PasswordRequestForm = Depends(),
) -> Dict[str, str]:
"""
Authenticate a user with Basic Auth and passes back a Bearer token.
Args:
form_data: An OAuth2PasswordRequest object containing Basic Auth credentials
Returns:
A dict containing the access token and token_type of "bearer".
Raises:
HttpException: Incorrect username or password.
"""
logger.debug("Attempting to log in as user %s", form_data.username)
valid_user = auth_control.authenticate_user(form_data.username, form_data.password)
if not valid_user:
raise HTTPException(
status_code=400,
detail="Incorrect username or password.",
headers={"WWW-Authenticate": "Bearer"},
)
max_token_expiration = int(os.getenv("ACCESS_TOKEN_EXPIRES_MINUTES"))
access_token_expires = timedelta(minutes=max_token_expiration)
access_token = auth_control.create_access_token(
data={"sub": valid_user.username}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
# GET methods for read-only operations
@app.get(
"/users",
summary="List all users",
description="List all users and whether the user is active",
response_description="All users",
response_model=schema.Users,
tags=["Users"],
)
async def get_all_users(current_user: schema.User = Depends(get_current_active_user)):
return resume.get_all_users()
@app.get(
"/users/me",
summary="Current user info",
description="Return info about the currently-authenticated user",
response_description="User info",
response_model=schema.User,
tags=["Users"],
)
async def read_users_me(current_user: schema.User = Depends(get_current_active_user)):
return {"username": current_user.username, "disabled": current_user.disabled}
@app.get(
"/",
summary="",
description="",
response_description="",
response_model=schema.FullResume,
tags=["Full Resume"],
)
async def get_full_resume() -> dict:
""""""
return resume.get_full_resume()
@app.get(
"/pdf", summary="", description="", response_description="", tags=["Full Resume"]
)
async def get_resume_pdf() -> FileResponse:
pdf = "ericrochowresume.pdf"
try:
return FileResponse(pdf)
except RuntimeError:
return JSONResponse(
status_code=404, content={"message": "No file at this location"}
)
@app.get(
"/html", summary="", description="", response_description="", tags=["Full Resume"]
)
async def get_resume_html() -> RedirectResponse:
return RedirectResponse("https://resume.ericroc.how")
@app.get(
"/basic_info",
summary="Basic info about me",
description="Gather basic details about me, such as contact info, pronouns, etc",
response_description="About Me",
response_model=schema.BasicInfo,
tags=["Basic Info"],
)
async def get_basic_info() -> Dict[str, List[Dict[str, str]]]:
return resume.get_basic_info()
@app.get(
"/basic_info/{fact}",
summary="Single basic info fact",
description="Find a single basic info fact about me based on the specified path",
response_description="Basic info fact",
tags=["Basic Info"],
)
async def get_basic_info_fact(fact: str) -> Dict[str, str]:
try:
return resume.get_basic_info_item(fact)
except KeyError:
return JSONResponse(
status_code=404, content={"message": f"No basic info item {fact}"}
)
@app.get(
"/education",
summary="Education history",
description="Find my full education history",
response_description="Education history",
response_model=schema.EducationHistory,
tags=["Education"],
)
async def get_education() -> Dict[str, List[Dict[str, str]]]:
return {"history": resume.get_all_education_history()}
@app.get(
"/education/{index}",
summary="Single education history item",
description="Find a single education history item specified in the path",
response_description="Education history item",
response_model=schema.Education,
responses={404: {"model": schema.Education}},
tags=["Education"],
)
async def get_education_item(index: int) -> Dict[str, str]:
try:
return resume.get_education_item(index)
except IndexError:
return JSONResponse(
status_code=404, content={"message": f"No education item {index}"}
)
@app.get(
"/experience",
summary="Full job history",
description="Find my full post-undergrad job history",
response_description="Job history",
# response_model=schema.JobHistory,
response_model=List[schema.Job],
tags=["Experience"],
)
async def get_experience() -> dict:
return resume.get_experience()
@app.get(
"/experience/{index}",
summary="Job history item",
description="Find a single job history item specified in the path",
response_description="Job history item",
response_model=schema.Job,
responses={404: {"model": schema.Job}},
tags=["Experience"],
)
async def get_experience_item(index: int) -> dict:
try:
return resume.get_experience_item(index)
except IndexError:
return JSONResponse(
status_code=404, content={"message": f"No experience item {index}"}
)
@app.get(
"/certifications",
summary="Certification list",
description=(
"Find my full list of current, previous, and in-progress certifications"
),
response_description="Certifications",
response_model=schema.CertificationHistory,
# response_model=List[schema.Certification],
tags=["Certifications"],
)
async def get_certification_history(
valid_only: Optional[bool] = False,
) -> Dict[str, List[Dict[str, str]]]:
certs = resume.get_certifications(valid_only=valid_only)
return {"certification_history": certs}
@app.get(
"/certifications/{certification}",
summary="Single certification",
description=(
"Find information about a single certification specified in the path (case"
" sensitive)"
),
response_description="Certification",
response_model=schema.Certification,
responses={404: {"model": schema.Certification}},
tags=["Certifications"],
)
async def get_certification_item(certification: str) -> dict:
try:
return resume.get_certification_by_name(certification)
except KeyError:
return JSONResponse(
status_code=404,
content={"message": f"No certification item {certification}"},
)
@app.get(
"/side_projects",
summary="Side projects",
description="Find a list of my highlighted side projects",
response_description="Side projects",
response_model=schema.SideProjects,
tags=["Side Projects"],
)
async def get_side_projects() -> dict:
return {"projects": resume.get_side_projects()}
@app.get(
"/side_projects/{project}",
summary="Single side project",
description="Find a single side side project specified in the path",
response_description="Side project",
tags=["Side Projects"],
)
async def get_side_project(project: str) -> Dict[str, str]:
try:
return resume.get_side_project(project)
except KeyError:
return JSONResponse(
status_code=404, content={"message": f"No side project {project}"}
)
@app.get(
"/interests",
summary="Interests",
description="Find all personal and technical/professional interests",
response_description="Interests",
response_model=schema.Interests,
tags=["Interests"],
)
async def get_all_interests() -> schema.Interests:
return resume.get_all_interests()
@app.get(
"/interests/{category}",
summary="Interests for the requested category",
description="Find all interests for the requested categories",
response_description="Interests",
response_model=schema.Interests,
response_model_exclude_none=True,
tags=["Interests"],
)
async def get_interests_by_category(
category: schema.InterestTypes,
) -> schema.Interests:
return resume.get_interests_by_category(category)
@app.get(
"/social_links",
summary="Social links",
description="Find a list of links to me on the web",
response_description="Social links",
response_model=schema.SocialLinks,
tags=["Social"],
)
async def get_social_links() -> Dict[str, str]:
return resume.get_social_links()
@app.get(
"/social_links/{platform}",
summary="Social link",
description="Find the social link specified in the path",
response_description="Social link",
tags=["Social"],
)
async def get_social_link_by_key(platform=schema.SocialLinkEnum) -> Dict[str, str]:
try:
return resume.get_social_link(platform)
except KeyError:
return JSONResponse(
status_code=404, content={"message": f"No link stored for {platform}"}
)
@app.get(
"/skills",
summary="Skills",
description="Find a (non-comprehensive) list of skills and info about them",
response_description="Skills",
response_model=schema.Skills,
tags=["Skills"],
)
async def get_skills() -> Dict[str, List[str]]:
return resume.get_skills()
@app.get(
"/skills/{skill}",
summary="Skill",
description="Find the skill specified in the path",
response_description="Skill",
response_model=schema.Skill,
tags=["Skills"],
)
async def get_skill(skill: str) -> dict:
try:
return resume.get_skill(skill)
except KeyError:
return JSONResponse(
status_code=404,
content={"message": f"The requested skill {skill} does not exist (yet!)"},
)
@app.get(
"/competencies",
summary="Competencies",
description="Find a list of general technical and non-technical skills",
response_description="Competencies",
response_model=schema.Competencies,
tags=["Skills"],
)
async def get_competencies() -> Dict[str, List[str]]:
return resume.get_competencies()
# PUT methods for create and update operations
@app.put(
"/basic_info",
summary="Create or update an existing fact",
description="",
response_description="ID of the new or updated fact",
tags=["Basic Info"],
)
async def add_or_update_fact(
basic_fact: schema.BasicInfoItem = Body(...),
current_user: schema.User = Depends(get_current_active_user),
) -> JSONResponse:
return JSONResponse(
status_code=200, content={"id": resume.upsert_basic_info_item(basic_fact)}
)
@app.put(
"/education",
summary="Create or update an education item",
description="",
response_description="ID of the new or updated education item",
tags=["Education"],
)
async def add_or_update_education(
education_item: schema.Education = Body(...),
current_user: schema.User = Depends(get_current_active_user),
) -> JSONResponse:
return JSONResponse(
status_code=200, content={"id": resume.upsert_education_item(education_item)}
)
@app.put(
"/experience",
summary="Create or update an experience item",
description="",
response_description="ID of the new or updated experience item",
tags=["Experience"],
)
async def add_or_update_experience(
education_item: schema.Job = Body(...),
current_user: schema.User = Depends(get_current_active_user),
) -> JSONResponse:
return JSONResponse(
status_code=200, content={"id": resume.upsert_education_item(education_item)}
)
@app.put(
"/certifications",
summary="Create or update a certification",
description="",
response_description="ID of the new or updated certification",
tags=["Certifications"],
)
async def add_or_update_certification(
certification: schema.Certification = Body(...),
current_user: schema.User = Depends(get_current_active_user),
) -> JSONResponse:
return JSONResponse(
status_code=200, content={"id": resume.upsert_education_item(certification)}
)
@app.put(
"/side_projects",
summary="Create or update a side project",
description="",
response_description="ID of the new or updated side project",
tags=["Side Projects"],
)
async def add_or_update_side_project(
side_project: schema.SideProject = Body(...),
current_user: schema.User = Depends(get_current_active_user),
) -> JSONResponse:
return JSONResponse(
status_code=200, content={"id": resume.upsert_side_project(side_project)}
)
@app.put(
"/interests/{category}",
summary="Create or update an interest",
description="",
response_description="ID of the new or updated interest",
tags=["Interests"],
)
async def add_or_update_interest(
category: schema.InterestTypes,
interest: schema.Interest = Body(...),
current_user: schema.User = Depends(get_current_active_user),
) -> JSONResponse:
return JSONResponse(
status_code=200,
content={"id": resume.upsert_interest(category, interest.interest)},
)
@app.put(
"/social_links",
summary="Create or update a social link",
description="",
response_description="",
tags=["Social"],
)
async def add_or_create_social_link(
social_link: schema.SocialLink,
current_user: schema.User = Depends(get_current_active_user),
) -> JSONResponse:
return JSONResponse(
status_code=200, content={"id": resume.upsert_social_link(social_link)}
)
@app.put(
"/skills",
summary="Create or update a skill",
description="",
response_description="ID of the new or updated skill",
tags=["Skills"],
)
async def add_or_update_skill(
skill: schema.Skill = Body(...),
current_user: schema.User = Depends(get_current_active_user),
) -> JSONResponse:
return JSONResponse(status_code=200, content={"id": resume.upsert_skill(skill)})
@app.put(
"/copetencies",
summary="Create or update a competencies",
description="",
response_description="ID of the new or updated competency",
tags=["Skills"],
)
async def add_or_update_competency(
competency: schema.Competencies = Body(...),
current_user: schema.User = Depends(get_current_active_user),
) -> JSONResponse:
return JSONResponse(
status_code=200, content={"id": resume.upsert_competency(competency)}
)
# DELETE methods for delete operations
@app.delete(
"/basic_info/{fact}",
summary="Delete an existing fact",
tags=["Basic Info"],
status_code=204,
)
async def delete_fact(
fact: str, current_user: schema.User = Depends(get_current_active_user)
):
try:
resume.delete_basic_info_item(fact)
except KeyError:
return JSONResponse(
status_code=404, content={"message": f"No such fact '{fact}'"}
)
@app.delete(
"/education/{index}",
summary="Delete an existing education history item",
tags=["Education"],
status_code=204,
)
async def delete_education_item(
index: int, current_user: schema.User = Depends(get_current_active_user)
):
try:
resume.delete_education_item(index)
except KeyError:
return JSONResponse(
status_code=404, content={"message": "No such eduction item exists"}
)
@app.delete(
"/experience/{index}",
summary="Delete an existing job history item",
tags=["Experience"],
status_code=204,
)
async def delete_experience_item(
index: int, current_user: schema.User = Depends(get_current_active_user)
):
try:
resume.delete_experience_item(index)
except KeyError:
return JSONResponse(
status_code=404, content={"message": "No such job history item exists"}
)
@app.delete(
"/certifications/{certification}",
summary="Delete an existing certification",
tags=["Certifications"],
status_code=204,
)
async def delete_certification(
certification: str, current_user: schema.User = Depends(get_current_active_user)
):
try:
resume.delete_certification(certification)
except KeyError:
return JSONResponse(
status_code=404, content={"message": "No such certification exists"}
)
@app.delete(
"/side_projects/{project}",
summary="Delete an existing side project",
tags=["Side Projects"],
status_code=204,
)
async def delete_side_project(
project: str, current_user: schema.User = Depends(get_current_active_user)
):
try:
resume.delete_side_project(project)
except KeyError:
return JSONResponse(
status_code=404, content={"message": "No such side project exists"}
)
@app.delete(
"/interests/{interest}",
summary="Delete an existing interest",
tags=["Interests"],
status_code=204,
)
async def delete_interest(
interest: str, current_user: schema.User = Depends(get_current_active_user)
):
try:
resume.delete_interest(interest)
except KeyError:
return JSONResponse(
status_code=404, content={"message": "No such interest exists"}
)
@app.delete(
"/social_links/{platform}",
summary="Delete an existing social link",
tags=["Social"],
status_code=204,
)
async def delete_social_link(
platform: str, current_user: schema.User = Depends(get_current_active_user)
):
try:
resume.delete_social_link(platform)
except KeyError:
return JSONResponse(
status_code=404, content={"message": "No such social link exists"}
)
@app.delete(
"/skills/{skill}",
summary="Delete an existing skill",
tags=["Skills"],
status_code=204,
)
async def delete_skill(
skill: str, current_user: schema.User = Depends(get_current_active_user)
):
try:
resume.delete_skill(skill)
except KeyError:
return JSONResponse(
status_code=404, content={"message": "No such skill exists"}
)
@app.delete(
"/competencies/{competency}",
summary="Delete an existing competency",
tags=["Skills"],
status_code=204,
)
async def delete_competency(
competency: str, current_user: schema.User = Depends(get_current_active_user)
):
try:
resume.delete_competency(competency)
except KeyError:
return JSONResponse(
status_code=404, content={"message": "No such competency exists"}
)
if __name__ == "__main__":
host = os.getenv("API_HOST", "127.0.0.1")
port = os.getenv("API_PORT", "8000")
log_level = os.getenv("API_LOG_LEVEL", "error")
reload_on_change = os.getenv("API_RELOAD_ON_CHANGE")
uvicorn.run(
"main:app",
host=host,
port=int(port),
log_level=log_level,
reload=(reload_on_change.title() == "True"),
)
| 2.671875 | 3 |
idcardquery/idcardquery.py | troubleforme/Tools | 1 | 12768431 | '''
Function:
身份证信息查询小工具
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import sys
from PyQt5 import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5 import QtWidgets, QtGui
from id_validator import validator
'''身份证信息查询小工具'''
class IDCardQuery(QWidget):
def __init__(self, parent=None, **kwargs):
super(IDCardQuery, self).__init__(parent)
self.setWindowTitle('身份证信息查询小工具 - 微信公众号: Charles的皮卡丘')
self.setWindowIcon(QIcon('resources/icon.jpg'))
self.setFixedSize(600, 400)
# 定义组件
self.birthday_label = QLabel('出生日期: ')
self.birthday_line_edit = QLineEdit('2000-01-01')
self.address_label = QLabel('出生地区: ')
self.address_line_edit = QLineEdit('上海市')
self.sex_label = QLabel('性别: ')
self.sex_combobox = QComboBox()
self.sex_combobox.addItem('男')
self.sex_combobox.addItem('女')
self.generate_button = QPushButton('随机生成')
self.idcard_label = QLabel('身份证号: ')
self.idcard_line_edit = QLineEdit()
self.query_button = QPushButton('验证查询')
self.result_label = QLabel('查询结果: ')
self.result_text_edit = QTextEdit()
# 排版
self.grid = QGridLayout()
self.grid.addWidget(self.birthday_label, 0, 0, 1, 1)
self.grid.addWidget(self.birthday_line_edit, 0, 1, 1, 3)
self.grid.addWidget(self.address_label, 0, 4, 1, 1)
self.grid.addWidget(self.address_line_edit, 0, 5, 1, 3)
self.grid.addWidget(self.sex_label, 0, 8, 1, 1)
self.grid.addWidget(self.sex_combobox, 0, 9, 1, 2)
self.grid.addWidget(self.generate_button, 0, 11, 1, 1)
self.grid.addWidget(self.idcard_label, 1, 0, 1, 1)
self.grid.addWidget(self.idcard_line_edit, 1, 1, 1, 10)
self.grid.addWidget(self.query_button, 1, 11, 1, 1)
self.grid.addWidget(self.result_label, 2, 0, 1, 1)
self.grid.addWidget(self.result_text_edit, 3, 0, 1, 12)
self.setLayout(self.grid)
# 事件绑定
self.generate_button.clicked.connect(self.generateID)
self.query_button.clicked.connect(self.CheckAndParseID)
'''验证并解析身份证号信息'''
def CheckAndParseID(self):
id_ = self.idcard_line_edit.text()
is_valid = validator.is_valid(id_)
if not is_valid:
self.result_text_edit.setText('身份证号是否合法: 否\n身份证号信息: 无')
return
showinfo = '身份证号是否合法: 是\n'
idinfos = validator.get_info(id_)
key_to_showtext = {
'address_code': '地址码',
'abandoned': '地址码是否废弃(1是废弃, 0是仍在使用)',
'address': '地址',
'birthday_code': '出生日期',
'constellation': '星座',
'chinese_zodiac': '生肖',
'sex': '性别',
}
for key, value in idinfos.items():
if key not in key_to_showtext: continue
showinfo += f'{key_to_showtext[key]}: {value}\n'
self.result_text_edit.setText(showinfo)
'''生成假的身份证号'''
def generateID(self):
birthday = self.birthday_line_edit.text().replace('-', '')
birthday = birthday if birthday else None
address = self.address_line_edit.text()
address = address if address else None
sex = self.sex_combobox.currentText()
sex = 1 if sex == '男' else 0
try:
id_ = validator.fake_id(True, address, birthday, sex)
except:
id_ = validator.fake_id()
self.idcard_line_edit.setText(id_)
'''run'''
if __name__ == '__main__':
app = QApplication(sys.argv)
client = IDCardQuery()
client.show()
sys.exit(app.exec_()) | 2.625 | 3 |
friends.py | auscompgeek/perfectgift | 0 | 12768432 | #!/usr/bin/env python3
# perfectgift: a tornado webapp for creating wish lists between friends
# Copyright (C) 2014, NCSS14 Group 4
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# 1. The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import epyc
import sqlite3
from tornado.ncss import Server
from tornado.log import app_log
from db.api import User, Product, UserNotFound
from login import logged_in, get_current_user
@logged_in
def friends_list(response):
current_username = get_current_user(response)
current_user = User.find(current_username)
friends_list = current_user.find_friends()
scope = {'friends': friends_list, 'logged_in': current_username}
response.write(epyc.render("templates/friends.html", scope))
def search(response):
search = response.get_field("q")
logged_in = get_current_user(response)
types = {
"people": 0,
"items": 1
}
tp = types.get(response.get_field("t"), 0)
if search:
if tp == types['people']:
items = User.search(search)
else:
items = Product.search(search)
scope = {
"query": search,
"results": items,
"tp": tp,
"types": types,
"logged_in": get_current_user(response)
}
app_log.info("[%s found for '%s'] %s" % (response.get_field('t'), search, items))
response.write(epyc.render("templates/search.html", scope))
else:
scope = {
"query": "",
"results": [],
"tp": tp,
"types": types,
"logged_in": get_current_user(response)
}
response.write(epyc.render("templates/search.html", scope))
def hello(response, match):
response.write(epyc._render('''
<html>
<header>:)</header>
<body>
<h1>Hellos peoples of the internets</h1>
</body>
</html>
'''))
if __name__ == '__main__':
server=Server()
server.register('/search',search)
server.register('/friends/([a-zA-Z0-9_]+)', friends_list)
server.run()
| 2.140625 | 2 |
python/dgl/model_zoo/chem/mgcn.py | vipermu/dgl | 6 | 12768433 | # -*- coding:utf-8 -*-
# pylint: disable=C0103, C0111, W0621
"""Implementation of MGCN model"""
import torch
import torch.nn as nn
from .layers import AtomEmbedding, RBFLayer, EdgeEmbedding, \
MultiLevelInteraction
from ...nn.pytorch import SumPooling
class MGCNModel(nn.Module):
"""
`Molecular Property Prediction: A Multilevel
Quantum Interactions Modeling Perspective <https://arxiv.org/abs/1906.11081>`__
Parameters
----------
dim : int
Size for embeddings, default to be 128.
width : int
Width in the RBF layer, default to be 1.
cutoff : float
The maximum distance between nodes, default to be 5.0.
edge_dim : int
Size for edge embedding, default to be 128.
out_put_dim: int
Number of target properties to predict, default to be 1.
n_conv : int
Number of convolutional layers, default to be 3.
norm : bool
Whether to perform normalization, default to be False.
atom_ref : Atom embeddings or None
If None, random representation initialization will be used. Otherwise,
they will be used to initialize atom representations. Default to be None.
pre_train : Atom embeddings or None
If None, random representation initialization will be used. Otherwise,
they will be used to initialize atom representations. Default to be None.
"""
def __init__(self,
dim=128,
width=1,
cutoff=5.0,
edge_dim=128,
output_dim=1,
n_conv=3,
norm=False,
atom_ref=None,
pre_train=None):
super(MGCNModel, self).__init__()
self._dim = dim
self.output_dim = output_dim
self.edge_dim = edge_dim
self.cutoff = cutoff
self.width = width
self.n_conv = n_conv
self.atom_ref = atom_ref
self.norm = norm
if pre_train is None:
self.embedding_layer = AtomEmbedding(dim)
else:
self.embedding_layer = AtomEmbedding(pre_train=pre_train)
self.rbf_layer = RBFLayer(0, cutoff, width)
self.edge_embedding_layer = EdgeEmbedding(dim=edge_dim)
if atom_ref is not None:
self.e0 = AtomEmbedding(1, pre_train=atom_ref)
self.conv_layers = nn.ModuleList([
MultiLevelInteraction(self.rbf_layer._fan_out, dim)
for i in range(n_conv)
])
self.out_project = nn.Sequential(
nn.Linear(dim * (self.n_conv + 1), 64),
nn.Softplus(beta=1, threshold=20),
nn.Linear(64, output_dim)
)
self.readout = SumPooling()
def set_mean_std(self, mean, std, device="cpu"):
"""Set the mean and std of atom representations for normalization.
Parameters
----------
mean : list or numpy array
The mean of labels
std : list or numpy array
The std of labels
device : str or torch.device
Device for storing the mean and std
"""
self.mean_per_node = torch.tensor(mean, device=device)
self.std_per_node = torch.tensor(std, device=device)
def forward(self, g, atom_types, edge_distances):
"""Predict molecule labels
Parameters
----------
g : DGLGraph
Input DGLGraph for molecule(s)
atom_types : int64 tensor of shape (B1)
Types for atoms in the graph(s), B1 for the number of atoms.
edge_distances : float32 tensor of shape (B2, 1)
Edge distances, B2 for the number of edges.
Returns
-------
prediction : float32 tensor of shape (B, output_dim)
Model prediction for the batch of graphs, B for the number
of graphs, output_dim for the prediction size.
"""
h = self.embedding_layer(atom_types)
e = self.edge_embedding_layer(g, atom_types)
rbf_out = self.rbf_layer(edge_distances)
all_layer_h = [h]
for idx in range(self.n_conv):
h, e = self.conv_layers[idx](g, h, e, rbf_out)
all_layer_h.append(h)
# concat multilevel representations
h = torch.cat(all_layer_h, dim=1)
h = self.out_project(h)
if self.atom_ref is not None:
h_ref = self.e0(atom_types)
h = h + h_ref
if self.norm:
h = h * self.std_per_node + self.mean_per_node
return self.readout(g, h)
| 2.546875 | 3 |
reversi/strategies/AlphaBetaMethods/GetScore.py | y-tetsu/othello | 10 | 12768434 | <filename>reversi/strategies/AlphaBetaMethods/GetScore.py<gh_stars>1-10
"""Get Score of AlphaBeta strategy
"""
import time
from reversi.strategies.common import Timer, Measure
def get_score(alphabeta, color, board, alpha, beta, depth, pid):
"""get_score
"""
return _get_score(_get_score, alphabeta, color, board, alpha, beta, depth, pid)
def get_score_measure(alphabeta, color, board, alpha, beta, depth, pid):
"""get_score_measure
"""
return _get_score_measure(_get_score_measure, alphabeta, color, board, alpha, beta, depth, pid)
def get_score_timer(alphabeta, color, board, alpha, beta, depth, pid):
"""get_score_timer
"""
return _get_score_timer(_get_score_timer, alphabeta, color, board, alpha, beta, depth, pid)
def get_score_measure_timer(alphabeta, color, board, alpha, beta, depth, pid):
"""get_score_measure_timer
"""
return _get_score_measure_timer(_get_score_measure_timer, alphabeta, color, board, alpha, beta, depth, pid)
def _get_score_measure(func, alphabeta, color, board, alpha, beta, depth, pid):
"""_get_score_measure
"""
measure(pid)
return _get_score(func, alphabeta, color, board, alpha, beta, depth, pid)
def _get_score_timer(func, alphabeta, color, board, alpha, beta, depth, pid):
"""_get_score_timer
"""
timeout = timer(pid)
return timeout if timeout else _get_score(func, alphabeta, color, board, alpha, beta, depth, pid)
def _get_score_measure_timer(func, alphabeta, color, board, alpha, beta, depth, pid):
"""_get_score_measure_timer
"""
measure(pid)
timeout = timer(pid)
return timeout if timeout else _get_score(func, alphabeta, color, board, alpha, beta, depth, pid)
def _get_score(func, alphabeta, color, board, alpha, beta, depth, pid):
"""_get_score
"""
# ゲーム終了 or 最大深さに到達
legal_moves_b_bits = board.get_legal_moves_bits('black')
legal_moves_w_bits = board.get_legal_moves_bits('white')
is_game_end = True if not legal_moves_b_bits and not legal_moves_w_bits else False
if is_game_end or depth <= 0:
sign = 1 if color == 'black' else -1
return alphabeta.evaluator.evaluate(color=color, board=board, possibility_b=board.get_bit_count(legal_moves_b_bits), possibility_w=board.get_bit_count(legal_moves_w_bits)) * sign # noqa: E501
# パスの場合
legal_moves_bits = legal_moves_b_bits if color == 'black' else legal_moves_w_bits
next_color = 'white' if color == 'black' else 'black'
if not legal_moves_bits:
return -func(func, alphabeta, next_color, board, -beta, -alpha, depth, pid)
# 評価値を算出
size = board.size
mask = 1 << ((size**2)-1)
for y in range(size):
skip = False
for x in range(size):
if legal_moves_bits & mask:
board.put_disc(color, x, y)
score = -func(func, alphabeta, next_color, board, -beta, -alpha, depth-1, pid)
board.undo()
if Timer.is_timeout(pid):
return alpha
alpha = max(alpha, score) # 最大値を選択
if alpha >= beta: # 枝刈り
skip = True
break
mask >>= 1
if skip:
break
return alpha
def measure(pid):
"""measure
"""
if pid:
if pid not in Measure.count:
Measure.count[pid] = 0
Measure.count[pid] += 1
def timer(pid):
"""timer
"""
if pid:
if time.time() > Timer.deadline[pid]:
Timer.timeout_flag[pid] = True # タイムアウト発生
return Timer.timeout_value[pid]
return None
| 2.328125 | 2 |
course/migrations/0005_coursesmodel_slug.py | dewale005/whitefieldcoursesite | 0 | 12768435 | # Generated by Django 3.2.6 on 2021-08-25 17:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0004_coursesmodel_thumbnail'),
]
operations = [
migrations.AddField(
model_name='coursesmodel',
name='slug',
field=models.SlugField(default=''),
),
]
| 1.570313 | 2 |
iplist.py | fritzdenim/python-codes | 0 | 12768436 | IPlist = ['172.16.31.10','172.16.58.3','192.168.127.12','172.16.17.32','172.16.31.10']
# for address in range(len(IPlist)):
# IPlist[address] = '%3s.%3s.%3s.%3s' % tuple(IPlist[address].split('.'))
# IPlist.sort(reverse=False)
# for address in range(len(IPlist)):
# IPlist[address] = IPlist[address].replace(' ', '')
# IPlist.sort(key=lambda address: list(map(int, address.split('.'))))
IPlist.sort(key=lambda address: list(map(str, address.split('.'))))
print(IPlist) | 3.1875 | 3 |
invoice/apps.py | AhmedElmawary/erp | 0 | 12768437 | <filename>invoice/apps.py
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class InvoiceConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'invoice'
verbose_name = _("Invoice")
def ready(self):
import invoice.signals
| 1.398438 | 1 |
train.py | bubbliiiing/facenet-tf2 | 20 | 12768438 | from functools import partial
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
from tensorflow.keras.optimizers import Adam
from nets.facenet import facenet
from nets.facenet_training import FacenetDataset, LFWDataset, triplet_loss
from utils.callbacks import (ExponentDecayScheduler, LFW_callback, LossHistory,
ModelCheckpoint)
from utils.utils_fit import fit_one_epoch
#------------------------------------------------#
# 计算一共有多少个人,用于利用交叉熵辅助收敛
#------------------------------------------------#
def get_num_classes(annotation_path):
with open(annotation_path) as f:
dataset_path = f.readlines()
labels = []
for path in dataset_path:
path_split = path.split(";")
labels.append(int(path_split[0]))
num_classes = np.max(labels) + 1
return num_classes
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if __name__ == "__main__":
#----------------------------------------------------#
# 是否使用eager模式训练
#----------------------------------------------------#
eager = False
#--------------------------------------------------------#
# 指向根目录下的cls_train.txt,读取人脸路径与标签
#--------------------------------------------------------#
annotation_path = "cls_train.txt"
#--------------------------------------------------------#
# 输入图像大小,常用设置如[112, 112, 3]
#--------------------------------------------------------#
input_shape = [160, 160, 3]
#--------------------------------------------------------#
# 主干特征提取网络的选择
# mobilenet;inception_resnetv1
#--------------------------------------------------------#
backbone = "mobilenet"
#----------------------------------------------------------------------------------------------------------------------------#
# 权值文件的下载请看README,可以通过网盘下载。
# 模型的 预训练权重 比较重要的部分是 主干特征提取网络的权值部分,用于进行特征提取。
#
# 如果训练过程中存在中断训练的操作,可以将model_path设置成logs文件夹下的权值文件,将已经训练了一部分的权值再次载入。
# 同时修改下方的训练参数,来保证模型epoch的连续性。
#
# 当model_path = ''的时候不加载整个模型的权值。
#
# 如果想要让模型从主干的预训练权值开始训练,则设置model_path为主干网络的权值,此时仅加载主干。
# 如果想要让模型从0开始训练,则设置model_path = '',Freeze_Train = False,此时从0开始训练,且没有冻结主干的过程。
# 一般来讲,从0开始训练效果会很差,因为权值太过随机,特征提取效果不明显。
#
# 网络一般不从0开始训练,至少会使用主干部分的权值,有些论文提到可以不用预训练,主要原因是他们 数据集较大 且 调参能力优秀。
# 如果一定要训练网络的主干部分,可以了解imagenet数据集,首先训练分类模型,分类模型的 主干部分 和该模型通用,基于此进行训练。
#----------------------------------------------------------------------------------------------------------------------------#
model_path = "model_data/facenet_mobilenet.h5"
#-------------------------------------------------------------------#
# 是否进行冻结训练,默认先冻结主干训练后解冻训练。
#-------------------------------------------------------------------#
Freeze_Train = True
#-------------------------------------------------------------------#
# 用于设置是否使用多线程读取数据,1代表关闭多线程
# 开启后会加快数据读取速度,但是会占用更多内存
# 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。
#-------------------------------------------------------------------#
num_workers = 1
#-------------------------------------------------------------------#
# 是否开启LFW评估
#-------------------------------------------------------------------#
lfw_eval_flag = True
#-------------------------------------------------------------------#
# LFW评估数据集的文件路径和对应的txt文件
#-------------------------------------------------------------------#
lfw_dir_path = "lfw"
lfw_pairs_path = "model_data/lfw_pair.txt"
num_classes = get_num_classes(annotation_path)
model = facenet(input_shape, num_classes, backbone=backbone, mode="train")
model.load_weights(model_path, by_name=True, skip_mismatch=True)
#-------------------------------------------------------------------------------#
# 训练参数的设置
# logging表示tensorboard的保存地址
# checkpoint用于设置权值保存的细节,period用于修改多少epoch保存一次
# reduce_lr用于设置学习率下降的方式
# early_stopping用于设定早停,val_loss多次不下降自动结束训练,表示模型基本收敛
#-------------------------------------------------------------------------------#
checkpoint_period = ModelCheckpoint('logs/ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=False, period=1)
reduce_lr = ExponentDecayScheduler(decay_rate = 0.94, verbose = 1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
tensorboard = TensorBoard(log_dir='logs/')
loss_history = LossHistory('logs/')
#----------------------#
# LFW估计
#----------------------#
test_loader = LFWDataset(dir=lfw_dir_path, pairs_path=lfw_pairs_path, batch_size=32, input_shape=input_shape) if lfw_eval_flag else None
lfw_callback = LFW_callback(test_loader)
#-------------------------------------------------------#
# 0.05用于验证,0.95用于训练
#-------------------------------------------------------#
val_split = 0.05
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
if backbone=="mobilenet":
freeze_layer = 81
elif backbone=="inception_resnetv1":
freeze_layer = 440
else:
raise ValueError('Unsupported backbone - `{}`, Use mobilenet, inception_resnetv1.'.format(backbone))
if Freeze_Train:
for i in range(freeze_layer):
model.layers[i].trainable = False
#---------------------------------------------------------#
# 训练分为两个阶段,分别是冻结阶段和解冻阶段。
# 显存不足与数据集大小无关,提示显存不足请调小batch_size。
# 受到BatchNorm层影响,batch_size最小为2,不能为1。
#---------------------------------------------------------#
#---------------------------------------------------------#
# Init_Epoch为起始世代
# Freeze_Epoch为冻结训练的世代
# Epoch总训练世代
# 提示OOM或者显存不足请调小Batch_size
#---------------------------------------------------------#
if True:
#----------------------------------------------------#
# 冻结阶段训练参数
# 此时模型的主干被冻结了,特征提取网络不发生改变
# 占用的显存较小,仅对网络进行微调
#----------------------------------------------------#
Batch_size = 32
Lr = 1e-3
Init_epoch = 0
Freeze_epoch = 50
epoch_step = num_train // Batch_size
epoch_step_val = num_val // Batch_size
if epoch_step == 0 or epoch_step_val == 0:
raise ValueError('数据集过小,无法进行训练,请扩充数据集。')
train_dataset = FacenetDataset(input_shape, lines[:num_train], num_train, num_classes, Batch_size)
val_dataset = FacenetDataset(input_shape, lines[num_train:], num_val, num_classes, Batch_size)
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, Batch_size))
if eager:
gen = tf.data.Dataset.from_generator(partial(train_dataset.generate), (tf.float32, tf.float32))
gen_val = tf.data.Dataset.from_generator(partial(val_dataset.generate), (tf.float32, tf.float32))
gen = gen.shuffle(buffer_size = Batch_size).prefetch(buffer_size = Batch_size)
gen_val = gen_val.shuffle(buffer_size = Batch_size).prefetch(buffer_size = Batch_size)
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate = Lr, decay_steps = epoch_step, decay_rate=0.94, staircase=True)
optimizer = tf.keras.optimizers.Adam(learning_rate = lr_schedule)
for epoch in range(Init_epoch, Freeze_epoch):
fit_one_epoch(model, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val,
Freeze_epoch, triplet_loss(batch_size=Batch_size), test_loader, lfw_eval_flag)
else:
model.compile(
loss={'Embedding' : triplet_loss(batch_size=Batch_size), 'Softmax' : 'categorical_crossentropy'},
optimizer = Adam(lr=Lr), metrics = {'Softmax' : 'categorical_accuracy'}
)
model.fit_generator(
generator = train_dataset,
steps_per_epoch = epoch_step,
validation_data = val_dataset,
validation_steps = epoch_step_val,
epochs = Freeze_epoch,
initial_epoch = Init_epoch,
use_multiprocessing = True if num_workers > 1 else False,
workers = num_workers,
callbacks = [checkpoint_period, reduce_lr, early_stopping, tensorboard, loss_history, lfw_callback] if lfw_eval_flag else [checkpoint_period, reduce_lr, early_stopping, tensorboard, loss_history]
)
if Freeze_Train:
for i in range(freeze_layer):
model.layers[i].trainable = True
if True:
#----------------------------------------------------#
# 解冻阶段训练参数
# 此时模型的主干不被冻结了,特征提取网络会发生改变
# 占用的显存较大,网络所有的参数都会发生改变
#----------------------------------------------------#
Batch_size = 32
Lr = 1e-4
Freeze_epoch = 50
Epoch = 100
epoch_step = num_train // Batch_size
epoch_step_val = num_val // Batch_size
if epoch_step == 0 or epoch_step_val == 0:
raise ValueError('数据集过小,无法进行训练,请扩充数据集。')
train_dataset = FacenetDataset(input_shape, lines[:num_train], num_train, num_classes, Batch_size)
val_dataset = FacenetDataset(input_shape, lines[num_train:], num_val, num_classes, Batch_size)
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, Batch_size))
if eager:
gen = tf.data.Dataset.from_generator(partial(train_dataset.generate), (tf.float32, tf.float32))
gen_val = tf.data.Dataset.from_generator(partial(val_dataset.generate), (tf.float32, tf.float32))
gen = gen.shuffle(buffer_size = Batch_size).prefetch(buffer_size = Batch_size)
gen_val = gen_val.shuffle(buffer_size = Batch_size).prefetch(buffer_size = Batch_size)
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate = Lr, decay_steps = epoch_step, decay_rate=0.94, staircase=True)
optimizer = tf.keras.optimizers.Adam(learning_rate = lr_schedule)
for epoch in range(Freeze_epoch, Epoch):
fit_one_epoch(model, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val,
Freeze_epoch, triplet_loss(batch_size=Batch_size), test_loader, lfw_eval_flag)
else:
model.compile(
loss={'Embedding' : triplet_loss(batch_size=Batch_size), 'Softmax' : 'categorical_crossentropy'},
optimizer = Adam(lr=Lr), metrics = {'Softmax' : 'categorical_accuracy'}
)
model.fit_generator(
generator = train_dataset,
steps_per_epoch = epoch_step,
validation_data = val_dataset,
validation_steps = epoch_step_val,
epochs = Epoch,
initial_epoch = Freeze_epoch,
use_multiprocessing = True if num_workers > 1 else False,
workers = num_workers,
callbacks = [checkpoint_period, reduce_lr, early_stopping, tensorboard, loss_history, lfw_callback] if lfw_eval_flag else [checkpoint_period, reduce_lr, early_stopping, tensorboard, loss_history]
)
| 2.09375 | 2 |
Python/Crawling/naver/article_connectDB.py | zionhan/TIL | 1 | 12768439 | import urllib.request as req
import pyodbc
import requests
from bs4 import BeautifulSoup as bs
import time
import random
import json
def sqlquote( value ):
"""Naive SQL quoting
All values except NULL are returned as SQL strings in single quotes,
with any embedded quotes doubled.
"""
if value is None or value=="":
return 'NULL'
return "'{}'".format(str(value).replace( "'", "''" ))
# for page in range(1900000001,9999999999):
for page in range(2000700783, 2000728783):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
url = 'secretUrl'.format(page)
html = requests.get(url, headers = headers).text
time.sleep(random.uniform(1,4))
json_data = json.loads( html )
print( 'articleNo : ' + str(page) )
if 'error' not in json_data:
print( 'table : ' + str(len(json_data)) )
print()
for table in json_data:
if table == "landPrice":
print( '응 landPrice는 있어' )
for column in json_data[table]:
if column == 'landPriceTax':
print("와우우우우웅웅우우우우우웅우!!!!!!")
print('=========================')
# print( table + " : " + str( len(json_data[table]) ) )
# for column in table:
# print( "column : " + column )
# print( type( column ) )
# if len(column) > 0:
# print( type(column[0]) )
print()
print('==========================================')
print()
else:
print()
print( '단지정보가 없습니다.' )
print( '=========================================' ) | 2.984375 | 3 |
test/core/diagnosticstore.py | SabineEmbacher/xcube | 0 | 12768440 | <reponame>SabineEmbacher/xcube<filename>test/core/diagnosticstore.py
import time
from collections import MutableMapping
from types import MethodType
from typing import TypeVar, Iterator, List, Callable, Any, Tuple
_KT = TypeVar('_KT')
_VT = TypeVar('_VT')
_T_co = TypeVar('_T_co')
_VT_co = TypeVar('_VT_co')
class DiagnosticStore(MutableMapping):
def __init__(self,
delegate: MutableMapping,
observer: Callable[[int, float, str, List[Tuple[str, Any]]], None] = None):
self._delegate = delegate
self._observer = observer or logging_observer()
self._counter = 0
self._add_optional_method('listdir', ['path'])
self._add_optional_method('rmdir', ['path'])
self._add_optional_method('rename', ['from_path', 'to_path'])
def _add_optional_method(self, method_name: str, arg_names: List[str]):
if hasattr(self._delegate, method_name):
def method(_self, *args) -> List[str]:
return _self.call_and_notify(method_name, *[(arg_names[i], args[i]) for i in range(len(args))])
setattr(self, method_name, MethodType(method, self))
def call_and_notify(self, method_name: str, *args):
method = getattr(self._delegate, method_name)
t0 = time.perf_counter()
result = method(*(arg[1] for arg in args))
t1 = time.perf_counter()
self._counter += 1
self._observer(self._counter, t1 - t0, method_name, *args)
return result
def __contains__(self, k: _KT) -> bool:
return self.call_and_notify('__contains__', ('k', k))
def __setitem__(self, k: _KT, v: _VT) -> None:
return self.call_and_notify('__setitem__', ('k', k), ('v', v))
def __delitem__(self, k: _KT) -> None:
return self.call_and_notify('__delitem__', ('k', k))
def __getitem__(self, k: _KT) -> _VT_co:
return self.call_and_notify('__getitem__', ('k', k))
def __len__(self) -> int:
return self.call_and_notify('__len__')
def __iter__(self) -> Iterator[_T_co]:
return self.call_and_notify('__iter__')
def logging_observer(logger_name=None, log_path=None, log_all=False):
import logging
observer_logger = logging.getLogger(logger_name or 'diagnosticstore')
if log_all:
logger = logging.getLogger()
else:
logger = observer_logger
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(log_path or 'diagnosticstore.log')
handler.setFormatter(logging.Formatter('%(asctime)s: %(levelname)s: %(name)s: %(message)s'))
logger.addHandler(handler)
def observer(counter, time_needed, method_name, *args):
msg = f'call #{counter}: {method_name}('
msg += ', '.join(map(lambda x: f'{x[0]}={repr(x[1])}', args))
msg += f'), took {int(1000 * time_needed)} ms'
observer_logger.info(msg)
return observer
| 2.0625 | 2 |
lino_book/projects/avanti1/fixtures/demo.py | lino-framework/lino_book | 3 | 12768441 | # -*- coding: UTF-8 -*-
# Copyright 2017-2020 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""General demo data for Lino Avanti.
- Course providers and courses
"""
# from django.conf import settings
# from lino.utils import mti
from lino.utils import Cycler # join_words
from lino.utils.mldbc import babel_named as named
from lino.api import rt, dd, _
from lino.modlib.users.choicelists import UserTypes
from lino_xl.lib.cal.choicelists import Recurrencies
from lino_xl.lib.courses.choicelists import EnrolmentStates
course_stages = [
_("Dispens"),
_("Eingeschrieben"),
_("Abgeschlossen"),
_("Abgebrochen"),
_("Ausgeschlossen")]
trends_config = []
trends_config.append((
_("Info Integration"),
[ "!Erstgespräch",
"Sprachtest",
"Einschreibung in Sprachkurs",
"Einschreibung in Integrationskurs",
"!Bilanzgespräch"]))
trends_config.append((_("Alphabetisation"), course_stages))
trends_config.append((_("A1"), course_stages))
trends_config.append((_("A2"), course_stages))
trends_config.append((_("Citizen course"), course_stages))
trends_config.append((_("Professional integration"), [
"Begleitet vom DSBE",
"Begleitet vom ADG",
"Erwerbstätigkeit",
]))
def objects():
Line = rt.models.courses.Line
Teacher = dd.plugins.courses.teacher_model
Course = rt.models.courses.Course
Topic = rt.models.courses.Topic
Enrolment = rt.models.courses.Enrolment
CourseStates = rt.models.courses.CourseStates
User = rt.models.users.User
EventType = rt.models.cal.EventType
Guest = rt.models.cal.Guest
GuestRole = rt.models.cal.GuestRole
GuestStates = rt.models.cal.GuestStates
EntryStates = rt.models.cal.EntryStates
Event = rt.models.cal.Event
Person = rt.models.contacts.Person
CommentType = rt.models.comments.CommentType
TrendStage = rt.models.trends.TrendStage
TrendArea = rt.models.trends.TrendArea
for area, stages in trends_config:
ta = named(TrendArea, area)
yield ta
for stage in stages:
kw = dict(trend_area=ta)
if stage[0] == "!":
stage = stage[1:]
kw.update(subject_column=True)
yield named(TrendStage, stage, **kw)
yield EventType(**dd.str2kw('name', _("First contact")))
kw = dd.str2kw('name', _("Lesson"))
kw.update(dd.str2kw('event_label', _("Lesson")))
event_type = EventType(**kw)
yield event_type
pupil = named(GuestRole, _("Pupil"))
yield pupil
yield named(GuestRole, _("Assistant"))
topic_citizen = named(Topic, _("Citizen course"))
yield topic_citizen
topic_lang = named(Topic, _("Language courses"))
yield topic_lang
kw.update(topic=topic_citizen)
kw = dict(event_type=event_type, guest_role=pupil)
yield named(Line, _("Citizen course"), **kw)
kw.update(topic=topic_lang)
alpha = named(Line, _("Alphabetisation"), **kw)
yield alpha
yield named(Line, _("German for beginners"), **kw)
yield named(Line, _("German A1+"), **kw)
yield named(Line, _("German A2"), **kw)
yield named(Line, _("German A2 (women)"), **kw)
yield named(CommentType, _("Phone call"))
yield named(CommentType, _("Visit"))
yield named(CommentType, _("Individual consultation"))
yield named(CommentType, _("Internal meeting"))
yield named(CommentType, _("Meeting with partners"))
laura = Teacher(first_name="Laura", last_name="Lieblig")
yield laura
yield User(username="laura", user_type=UserTypes.teacher,
partner=laura)
yield User(username="nathalie", user_type=UserTypes.user)
yield User(username="nelly", user_type=UserTypes.user)
yield User(username="audrey", user_type=UserTypes.auditor)
yield User(username="martina", user_type=UserTypes.coordinator)
yield User(username="sandra", user_type=UserTypes.secretary)
USERS = Cycler(User.objects.exclude(
user_type__in=(UserTypes.auditor, UserTypes.admin)))
kw = dict(monday=True, tuesday=True, thursday=True, friday=True)
kw.update(
line=alpha,
start_date=dd.demo_date(-30),
start_time="9:00", end_time="12:00",
max_date=dd.demo_date(10),
state=CourseStates.active,
every_unit=Recurrencies.daily,
user=USERS.pop(),
teacher=laura,
max_places=5)
yield Course(**kw)
kw.update(start_time="14:00", end_time="17:00", user=USERS.pop(),
max_places=15)
yield Course(**kw)
kw.update(start_time="18:00", end_time="20:00", user=USERS.pop(),
max_places=15)
yield Course(**kw)
PUPILS = Cycler(dd.plugins.courses.pupil_model.objects.all())
# print(20170302, dd.plugins.courses.pupil_model.objects.all())
COURSES = Cycler(Course.objects.all())
STATES = Cycler(EnrolmentStates.objects())
def fits(course, pupil):
if course.max_places and course.get_free_places() == 0:
return False
if Enrolment.objects.filter(course=course, pupil=pupil).count():
return False
return True
def enrol(pupil):
course = COURSES.pop()
if fits(course, pupil):
kw = dict(user=USERS.pop(), course=course, pupil=pupil)
kw.update(request_date=dd.demo_date(-i))
kw.update(state=STATES.pop())
return Enrolment(**kw)
for i, p in enumerate(
dd.plugins.courses.pupil_model.objects.order_by('id')):
yield enrol(p)
if i % 2 == 0:
yield enrol(p)
if i % 3 == 0:
yield enrol(p)
ar = rt.login('robin')
for obj in Course.objects.all():
obj.update_auto_events(ar)
# Suggested calendar entries older than 7 days should be marked as
# either took_place or cancelled.
qs = Event.objects.filter(
start_date__lte=dd.demo_date(-7),
state=EntryStates.suggested)
for i, obj in enumerate(qs):
if i % 9:
obj.state = EntryStates.took_place
else:
obj.state = EntryStates.cancelled
obj.full_clean()
obj.save()
# participants of events which took place should be marked as
# either absent or present or excused:
qs = Guest.objects.filter(
event__start_date__lte=dd.demo_date(-7),
event__state=EntryStates.took_place).order_by('id')
STATES = Cycler(GuestStates.get_list_items())
for i, obj in enumerate(qs):
obj.state = STATES.pop()
# if i % 8:
# obj.state = GuestStates.present
# elif i % 3:
# obj.state = GuestStates.missing
# else:
# obj.state = GuestStates.excused
obj.full_clean()
obj.save()
| 1.75 | 2 |
billtitles/crud.py | aih/billtitles-py | 2 | 12768442 | <reponame>aih/billtitles-py<filename>billtitles/crud.py
from datetime import datetime
from typing import List, Optional
from sqlalchemy.orm import aliased, Session
from sqlalchemy import func, desc
from sqlalchemy.sql.elements import literal_column
from . import models
from typing import TypedDict
bill_to = aliased(models.Bill)
def deep_get(d, keys, default=None):
"""
Example:
d = {'meta': {'status': 'OK', 'status_code': 200}}
deep_get(d, ['meta', 'status_code']) # => 200
deep_get(d, ['garbage', 'status_code']) # => None
deep_get(d, ['meta', 'garbage'], default='-') # => '-'
"""
assert type(keys) is list
if d is None:
return default
if not keys:
return d
return deep_get(d.get(keys[0]), keys[1:], default)
class BillsResponse(TypedDict):
bills: List[models.BillTitlePlus]
bills_title_whole: List[models.BillTitlePlus]
def get_bill_titles_by_billnumber(db: Session, billnumber: str = None):
if not billnumber:
return None
billnumber=billnumber.strip("\"'").lower()
# In postgres it may be possible to use array_agg like this:
# titles_main_resp = db.query(models.BillTitle.billnumber, func.array_agg(models.BillTitle.title).label('titles_main') ).filter(models.BillTitle.billnumber == billnumber).filter(models.BillTitle.is_for_whole_bill == True).group_by(models.BillTitle.billnumber).all()
titles_whole_resp = db.query(models.Bill, func.string_agg(models.Title.title, literal_column("'; '")).label('titles')).filter(models.Bill.billnumber==billnumber).join(models.BillTitle, models.BillTitle.bill_id == models.Bill.id).join(models.Title, models.Title.id == models.BillTitle.title_id).filter(models.BillTitle.is_for_whole_bill == True).group_by(models.Bill.id).all()
titles_all_resp = db.query(models.Bill, func.string_agg(models.Title.title, literal_column("'; '")).label('titles')).filter(models.Bill.billnumber==billnumber).join(models.BillTitle, models.BillTitle.bill_id == models.Bill.id).join(models.Title, models.Title.id == models.BillTitle.title_id).group_by(models.Bill.id).all()
if len(titles_whole_resp) > 0:
titles_whole = titles_whole_resp[0].titles.split('; ')
else:
titles_whole = []
if len(titles_all_resp) > 0:
titles_all = titles_all_resp[0].titles.split('; ')
else:
titles_all = []
return models.BillTitleResponse(billnumber= billnumber, titles= models.TitlesItem(whole=titles_whole, all= titles_all))
def get_related_bills(db: Session, billnumber: str = None, version: str = None, withTitle: bool = True, flat: Optional[bool] = True, billsonly: Optional[bool] = False) -> List[models.BillToBillModel or models.BillToBillModelDeep]:
if not billnumber:
return []
billnumber=billnumber.strip("\"'").lower()
if version:
version=version.strip("\"'").lower()
if not version:
subquery = db.query(models.Bill.billnumber, models.Bill.version, models.Bill.length,
models.BillToBill.score, models.BillToBill.score_to, models.BillToBill.reasonsstring,
models.BillToBill.sections_num, models.BillToBill.sections_match, models.BillToBill.score_es,
models.BillToBill.bill_id, models.BillToBill.bill_to_id
).filter(models.Bill.billnumber == billnumber).join(models.BillToBill, models.BillToBill.bill_id == models.Bill.id).subquery();
bills = db.query(bill_to.billnumber.label("billnumber_to"), bill_to.version.label("version_to"), bill_to.length.label("length_to"), subquery).filter(subquery.c.bill_to_id == bill_to.id).order_by(desc(subquery.c.score)).all()
else:
subquery = db.query(models.Bill.billnumber, models.Bill.version, models.Bill.length,
models.BillToBill.score, models.BillToBill.score_to, models.BillToBill.reasonsstring,
models.BillToBill.sections_num, models.BillToBill.sections_match, models.BillToBill.score_es,
models.BillToBill.bill_id, models.BillToBill.bill_to_id
).filter(models.Bill.billnumber == billnumber, models.Bill.version == version).join(models.BillToBill, models.BillToBill.bill_id == models.Bill.id).subquery();
bills = db.query(bill_to.billnumber.label("billnumber_to"), bill_to.version.label("version_to"), bill_to.length.label("length_to"), subquery).filter(subquery.c.bill_to_id == bill_to.id).order_by(desc(subquery.c.score)).all()
billdicts = []
titles = get_bill_titles_by_billnumber(db, billnumber)
title = None
if titles:
if titles.titles.whole:
title = titles.titles.whole[0]
else:
if titles.titles.all:
title = titles.titles.all[0]
for bill in bills:
bill = bill._asdict()
bill['billnumber_version'] = bill.get('billnumber', '') + bill.get('version', '')
bill['length_to'] = bill.get('length_to', 0)
bill['billnumber_version_to'] = bill.get('billnumber_to', '') + bill.get('version_to', '')
bill['reasons'] = bill.get('reasonsstring', '').split(', ')
if withTitle:
titles_to = get_bill_titles_by_billnumber(db, bill.get('billnumber_to'))
#titles = get_titles_by_bill_id(db, bill.get('bill_id'))
bill['titles_to'] = titles_to
title_to = None
if titles_to:
if titles_to.titles.whole:
title_to = titles_to.titles.whole[0]
else:
if titles_to.titles.all:
title_to = titles_to.titles.all[0]
bill['title_to'] = title_to
bill['titles_to'] = titles_to
bill['titles'] = titles
bill['title'] = title
billdicts.append(bill)
if billsonly:
billsList = [bill['billnumber_version_to'] for bill in billdicts]
return billsList
if flat == False:
extrafields = ['reasons', 'score', 'score_to', 'identified_by', 'sections_num', 'sections_match', 'sections']
billdicts_to = []
billfrom = None
for billdict in billdicts:
billdict_deep_extra = { 'reasons': billdict.get('reasons', []),
'score': billdict.get('score', 0),
'score_to': billdict.get('score_to', 0),
'identified_by': billdict.get('identified_by', None),
'sections_num': billdict.get('sections_num', None),
'sections_match': billdict.get('sections_match', None),
'sections': billdict.get('sections', None)}
billdict_deep_to = models.BillModelDeep(**{keyitem.replace("_to", ""): billdict[keyitem] for keyitem in billdict.keys() if keyitem.find('_to') > -1 and keyitem not in extrafields}, **billdict_deep_extra)
billdicts_to.append(billdict_deep_to)
# TODO add the 'from' bill at the beginning of the list
#billfrom = {'bill': models.BillModelDeep(**{keyitem: billdicts[0][keyitem] for keyitem in billdicts.keys() if not keyitem.find('_to') > -1 and keyitem not in extrafields})}
return billdicts_to
return billdicts
def create_billtobill(db: Session, billtobill: models.BillToBill):
db.add(billtobill)
db.commit()
db.refresh(billtobill)
return billtobill
def get_bills(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.BillTitle.billnumber, models.BillTitle.title).offset(skip).limit(limit).all()
# TODO: document how to use this and whether it's working
# In particular, look at adding the titles
#def create_bill(db: Session, bill: models.Bill):
# db_bill = models.Bill(billnumber=bill.billnumber, billnumberversion=bill.billnumberversion, created_at=datetime.now(), updated_at=datetime.now())
# db.add(db_bill)
# db.commit()
# db.refresh(db_bill)
# return db_bill
def get_title_by_id(db: Session, title_id: int):
return db.query(models.BillTitle).filter(models.BillTitle.title_id == title_id).first()
def get_title(db: Session, title: str) -> models.TitleBillsResponse:
title=title.strip("\"'")
titles = [models.TitleBillsResponseItem(id=item.id, title=item.title, billnumbers=item.billnumbers.split('; ')) for item in db.query(models.Bill.billnumber, models.Title.title, models.BillTitle.title_id, func.group_concat(models.Bill.billnumber, "; ").label('billnumbers') ).filter(models.Title.title == title).group_by(models.Title.title).all()]
titles_whole = [models.TitleBillsResponseItem(id=item.id, title=item.title, billnumbers=item.billnumbers.split('; ')) for item in db.query(models.Bill.billnumber, models.Title.title, models.BillTitle.title_id, func.group_concat(models.Bill.billnumber, "; ").label('billnumbers') ).filter(models.Title.title == title).filter(models.BillTitle.is_for_whole_bill == True).group_by(models.Title.title).all()]
return models.TitleBillsResponse(titles=titles, titles_whole= titles_whole)
def get_titles(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Bill.billnumber, models.Title.title).offset(skip).limit(limit).all()
def get_titles_by_bill_id(db: Session, bill_id: int):
return db.query(models.BillTitle, models.Title.title).filter(models.BillTitle.bill_id == bill_id).join(models.Title,
models.Title.id == models.BillTitle.title_id).all()
def get_title_by_billnumber(db: Session, billnumber: str = None):
if not billnumber:
return {"titles": [], "titles_whole": []}
billnumber=billnumber.strip("\"'").lower()
titles = db.query(models.Bill.billnumber, func.group_concat(models.Title.title, "; ").label('titles') ).filter(models.Bill.billnumber == billnumber).group_by(models.Bill.billnumber).all()
titles_whole = db.query(models.Bill.billnumber, func.group_concat(models.Title.title, "; ").label('titles') ).filter(models.Bill.billnumber == billnumber).filter(models.BillTitle.is_for_whole_bill == True).group_by(models.Bill.billnumber).all()
return {"titles": titles, "titles_whole": titles_whole}
def get_billtitle(db: Session, title: str, billnumber: str):
return db.query(models.Bill.billnumber, models.Title.title).filter_by(title=title, billnumber=billnumber).first()
def add_title(db: Session, title: str, billnumbers: List[str], is_for_whole_bill: bool = False):
if title:
title=title.strip("\"'")
created_at = datetime.now()
updated_at = datetime.now()
newTitle = models.Title(title=title)
db.add(newTitle)
db.commit()
msg = ""
for billnumber in billnumbers:
billtitle = get_billtitle(db, title, billnumber)
if billtitle:
newmsg = "Title already exists: '" + title + "' for bill: " + billnumber
msg = newmsg + "; " + msg
continue
created_at = datetime.now()
updated_at = datetime.now()
newBillTitle = models.BillTitle(title=title, created_at=created_at, updated_at=updated_at, billnumber=billnumber, is_for_whole_bill=is_for_whole_bill)
db.add(newBillTitle)
db.commit()
msg = "Title added: '" + title + "' for bill: " + billnumber + "; " + msg
return {"billtitle": newTitle, "message": msg}
# Removes the title entry, with all bills associated with it
def remove_title(db: Session, title: str):
rows = db.query(models.Title).filter_by(title=title).delete()
# TODO: check if this deletes the join table entries too
db.commit()
if rows >0:
return {"title": title, "message": "Title removed"}
else:
return {"title": title, "message": "Title not found"} | 2.6875 | 3 |
2019/## Python/TADs e Classes/TAD_ponto_heranca.py | ed1rac/AulasEstruturasDados | 8 | 12768443 |
class Ponto(object):
def __init__(self, x, y): #método construtor (cria ponto)
self.x = x
self.y = y
def exibe_ponto(self):
print('Coordenadas -> x: ', self.x, ', y: ', self.y)
def set_x(self, x):
self.x = x
def set_y(self, y):
self.y = y
def distancia_entre(self, q):
dx = q.x - self.x
dy = q.y - self.y
return (dx*dx+dy*dy)**0.5
class ponto_n(Ponto):
def __init__(self, x, y, nome):
super().__init__(x, y)
self.nome = nome
def exibe_ponto(self):
print('Ponto: ', self.nome)
super().exibe_ponto()
p = Ponto(2.0,1.0)
q = Ponto(3.4, 2.1)
p1 = ponto_n(2, 1, 'P')
q1 = ponto_n(3.4, 2.1, 'Q')
p.exibe_ponto()
q.exibe_ponto()
p1.exibe_ponto()
q1.exibe_ponto()
print('A distância entre o ponto p e q é: ', p.distancia_entre(q)) | 3.984375 | 4 |
ya3_report/daily.py | ecoreuse/ya3-report | 0 | 12768444 | <reponame>ecoreuse/ya3-report
# Copyright 2022 <NAME>. All rights reserved.
from pathlib import Path
from datetime import date, datetime
from typing import Optional
import pandas as pd
import numpy as np
from ya3_report import environ
def get_data(dt: Optional[date] = None) -> pd.DataFrame: # pragma: no cover
_dt: date = dt if dt else date.today()
filepath: Path = (environ.DATA_DIR / _dt.isoformat()).with_suffix(environ.DATAFILE_SUFFIX)
return pd.read_csv(filepath)
def index_hour(df: pd.DataFrame) -> pd.DataFrame:
return df. \
groupby(["aID", "title"]). \
apply(
# indexing by hour
lambda df: df. \
set_index("datetime"). \
sort_index(). \
diff(). \
apply(lambda x: np.where(x < 0, 0, x)). \
groupby(lambda x: datetime.fromisoformat(x).hour). \
sum()
). \
groupby(lambda index: index[2]). \
sum(). \
assign(hour=lambda df: df.index). \
set_index("hour")
def index_aID(df: pd.DataFrame) -> pd.DataFrame:
return df. \
groupby(["aID", "title"]). \
apply(lambda df: index_hour(df).sum()). \
reset_index("title")
if __name__ == "__main__": # pragma: no cover
from datetime import date
df = get_data(date(2022, 1, 3))
print(index_hour(df))
| 2.71875 | 3 |
Chapter04-Inheritance/Multiple.py | elephantscale/python-oop-labs-solutions | 0 | 12768445 | class Base1:
def FuncA(self):
print("Base1::FuncA")
class Base2:
def FuncA(self):
print("Base2::FuncA")
class Child(Base1, Base2):
pass
def main():
obj=Child()
obj.FuncA()
if __name__ == "__main__":
main()
| 3.078125 | 3 |
bot_python_sdk/activation_service.py | ShanavazJs/BoT-Python-SDK | 0 | 12768446 | <reponame>ShanavazJs/BoT-Python-SDK
import time
from bot_python_sdk.bot_service import BoTService
from bot_python_sdk.configuration_store import ConfigurationStore
from bot_python_sdk.logger import Logger
LOCATION = 'Activation Service'
RESOURCE = 'status'
DEVICE_ID = 'deviceID'
POLLING_INTERVAL_IN_SECONDS = 5
MAXIMUM_TRIES = 3
class ActivationService:
def __init__(self):
configuration = ConfigurationStore().get()
self.device_id = configuration.get_device_id()
self.bot_service = BoTService()
def run(self):
Logger.info(LOCATION, 'Starting to activate device...')
for tries in range(1, MAXIMUM_TRIES + 1):
Logger.info(LOCATION, 'Activating device, attempt: ' + str(tries))
if self.activate():
return True
time.sleep(POLLING_INTERVAL_IN_SECONDS)
return False
def activate(self):
try:
self.bot_service.post(RESOURCE, {DEVICE_ID: self.device_id})
Logger.success(LOCATION, 'Device successfully activated. Triggering actions enabled.')
return True
except:
Logger.error(LOCATION, 'Failed to activate device.')
return False
| 2.515625 | 3 |
data_analysis/subsidy_distribution.py | Bermuhz/DataMiningCompetitionFirstPrize | 128 | 12768447 | root_loc = "/Users/mac/Documents/contest/data/original_data/"
file_name = "subsidy_train.txt"
count_0 = 0
count_1000 = 0
count_1500 = 0
count_2000 = 0
lines = open(root_loc + file_name).readlines()
for line in lines:
temps = line.strip("\n").split(",")
subsidy = int(temps[1])
if subsidy == 0:
count_0 += 1
if subsidy == 1000:
count_1000 += 1
if subsidy == 1500:
count_1500 += 1
if subsidy == 2000:
count_2000 += 1
print (str(count_0)+"\n"+str(count_1000)+"\n"+str(count_1500)+"\n"+str(count_2000))
print count_0+count_1000+count_1500+count_2000 | 2.828125 | 3 |
data_construction/neg_mask_regen/generated_json_to_text.py | launchnlp/cliff_summ | 14 | 12768448 | import argparse
import json
from tqdm import tqdm
def align_ws(old_token, new_token):
# Align trailing whitespaces between tokens
if old_token[-1] == new_token[-1] == " ":
return new_token
elif old_token[-1] == " ":
return new_token + " "
elif new_token[-1] == " ":
return new_token[:-1]
else:
return new_token
def process_one(i, json_dict, generated_texts):
ori_summ = json_dict['summary']
text_outs = []
for ent_i, (ori_entity, new_entity_summaries) in enumerate(zip(json_dict['entitys'], generated_texts['new_ent_summary'])):
valid_entity_summaries = []
for new_entity_summary in set(new_entity_summaries[:3]):
if new_entity_summary == ori_summ:
continue
valid_entity_summaries.append(new_entity_summary)
break
text_outs.append((valid_entity_summaries, (i, 0, f'entity_{ent_i}', len(valid_entity_summaries))))
if json_dict['entity_relations']:
j = 0
for rel_i, x in enumerate(json_dict['entity_relations']):
num_heads = len(x['heads'])
num_children = len(x['children'])
j += 1 # entity chunk
for head_i in range(num_heads):
j += 1 # only head
valid_summaries = []
for new_rel_summary in set(generated_texts['new_rel_summary'][j][:3]):
if new_rel_summary == ori_summ:
continue
valid_summaries.append(new_rel_summary)
break
j += 1 # both chunk and head
text_outs.append((valid_summaries, (i, 0, f'relation_{rel_i}_head{head_i}', len(valid_summaries))))
for head_i in range(num_children):
j += 1 # only child
valid_summaries = []
for new_rel_summary in set(generated_texts['new_rel_summary'][j][:3]):
if new_rel_summary == ori_summ:
continue
valid_summaries.append(new_rel_summary)
break
j += 1 # both chunk and child
text_outs.append((valid_summaries, (i, 0, f'relation_{rel_i}_child{head_i}', len(valid_summaries))))
return text_outs
def main():
parser = argparse.ArgumentParser()
parser.add_argument('jsonl_file')
parser.add_argument('generated_file')
parser.add_argument('out_file')
parser.add_argument('new_other_file')
args = parser.parse_args()
generated_texts = []
with open(args.generated_file) as f:
for line in f:
generated_texts.append(json.loads(line))
with open(args.jsonl_file) as f:
lines = f.readlines()
results = []
for i, line in tqdm(enumerate(lines)):
# for i, line in enumerate(f):
x = json.loads(line)
if len(x['entitys']) + len(x['entity_relations']) > 0:
results.append(process_one(i, x, generated_texts[i]))
with open(args.out_file, 'w') as f, open(args.new_other_file, 'w') as fother:
for result in results:
for beams, other in result:
for beam in beams:
f.write(beam + '\n')
fother.write(' '.join([str(xx) for xx in other]) + '\n')
if __name__ == '__main__':
main()
| 2.625 | 3 |
custom_model_runner/datarobot_drum/drum/memory_monitor.py | amperie/user-models | 0 | 12768449 | <filename>custom_model_runner/datarobot_drum/drum/memory_monitor.py
import os
import psutil
from mlpiper.common.byte_conv import ByteConv
from datarobot_drum.drum.common import ArgumentsOptions
import collections
MemoryInfo = collections.namedtuple(
"MemoryInfo",
"total avail free drum_rss drum_info nginx_rss container_limit container_max_used container_used",
)
class MemoryMonitor:
TOTAL_MEMORY_LABEL = "Total"
AVAILABLE_MEMORY_LABEL = "Available"
FREE_MEMORY_LABEL = "Free"
MLAPP_RSS_LABEL = "Pipeline RSS"
def __init__(self, pid=None, include_childs=True, monitor_current_process=False):
""""""
self._pid = pid if pid is not None else os.getpid()
self._include_childs = include_childs
self._monitor_current_process = monitor_current_process
@staticmethod
def _run_inside_docker():
"""
Returns True if running inside a docker container
"""
if os.path.exists("/.dockerenv"):
return True
else:
return False
def _collect_memory_info_in_docker(self):
"""
In the case we are running inside a docker container then memory collection is
simpler. We look directly on the files inside the /sys/fs/cgroup/memory directory
and collect the usage/total for all the processes inside the container.
Returns
-------
A dictionary with: {total_mb, usage_mb, max_usage_mb}
"""
mem_sysfs_path = "/sys/fs/cgroup/memory/"
total_bytes = int(open(os.path.join(mem_sysfs_path, "memory.limit_in_bytes")).read())
total_mb = ByteConv.from_bytes(total_bytes).mbytes
usage_bytes = int(open(os.path.join(mem_sysfs_path, "memory.usage_in_bytes")).read())
usage_mb = ByteConv.from_bytes(usage_bytes).mbytes
max_usage_bytes = int(
open(os.path.join(mem_sysfs_path, "memory.max_usage_in_bytes")).read()
)
max_usage_mb = ByteConv.from_bytes(max_usage_bytes).mbytes
return {"total_mb": total_mb, "usage_mb": usage_mb, "max_usage_mb": max_usage_mb}
def collect_memory_info(self):
def get_proc_data(p):
return {
"pid": p.pid,
"cmdline": p.cmdline(),
"mem": ByteConv.from_bytes(p.memory_info().rss).mbytes,
}
drum_process = None
nginx_rss_mb = 0
drum_rss_mb = 0
current_proc = psutil.Process()
# case with Flask server, there is only one process - drum
if self._monitor_current_process:
drum_process = current_proc
# case with uwsgi, current proc is uwsgi worker, so looking for parent drum process
else:
parents = current_proc.parents()
for p in parents:
if p.name() == ArgumentsOptions.MAIN_COMMAND:
drum_process = p
break
if drum_process:
drum_rss_mb = ByteConv.from_bytes(drum_process.memory_info().rss).mbytes
drum_info = []
drum_info.append(get_proc_data(drum_process))
for child in drum_process.children(recursive=True):
drum_rss_mb += ByteConv.from_bytes(child.memory_info().rss).mbytes
drum_info.append(get_proc_data(child))
for proc in psutil.process_iter():
if "nginx" in proc.name().lower():
nginx_rss_mb += ByteConv.from_bytes(proc.memory_info().rss).mbytes
virtual_mem = psutil.virtual_memory()
total_physical_mem_mb = ByteConv.from_bytes(virtual_mem.total).mbytes
if self._run_inside_docker():
container_mem_info = self._collect_memory_info_in_docker()
container_limit_mb = container_mem_info["total_mb"]
container_max_usage_mb = container_mem_info["max_usage_mb"]
container_usage_mb = container_mem_info["usage_mb"]
available_mem_mb = container_limit_mb - container_mem_info["usage_mb"]
free_mem_mb = available_mem_mb
else:
available_mem_mb = ByteConv.from_bytes(virtual_mem.available).mbytes
free_mem_mb = ByteConv.from_bytes(virtual_mem.free).mbytes
container_limit_mb = None
container_max_usage_mb = None
container_usage_mb = None
mem_info = MemoryInfo(
total=total_physical_mem_mb,
avail=available_mem_mb,
free=free_mem_mb,
drum_rss=drum_rss_mb if drum_process else None,
drum_info=drum_info if drum_process else None,
nginx_rss=nginx_rss_mb,
container_limit=container_limit_mb,
container_max_used=container_max_usage_mb,
container_used=container_usage_mb,
)
return mem_info
| 2.109375 | 2 |
posts/OppositeDay/anti_amtrak.py | capecchi/capecchi.github.io | 0 | 12768450 | <filename>posts/OppositeDay/anti_amtrak.py
def main():
import json
import glob
import coords2geojson
local = 'C:/Python34/America_By_Train/finished_routes/'
webdir = 'C:/Users/Owner/Documents/GitHub/capecchi.github.io/posts/OppositeDay/'
rts = glob.glob(local+'*.geojson')
rts = [rts[3],rts[1],rts[4],rts[2],rts[0]]
#print(rts)
#stop
allcoords = []
for r in rts:
with open(r) as f:
data = json.load(f)
coords = data['geometry']['coordinates']
#print(len(coords))
i = 0
for c in coords: #long,lat
c[0] = c[0]+180.
c[1] = -c[1]
if i % 3 == 0: allcoords.append(c)
i += 1
coords2geojson.main(allcoords,webdir+'anti_amtrak')
| 2.734375 | 3 |