hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
67679f71ef919aae1d798cf464014c5cc22d19a4
| 1,997
|
py
|
Python
|
data/transcoder_evaluation_gfg/python/MAXIMUM_TRIPLET_SUM_ARRAY_2.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 241
|
2021-07-20T08:35:20.000Z
|
2022-03-31T02:39:08.000Z
|
data/transcoder_evaluation_gfg/python/MAXIMUM_TRIPLET_SUM_ARRAY_2.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 49
|
2021-07-22T23:18:42.000Z
|
2022-03-24T09:15:26.000Z
|
data/transcoder_evaluation_gfg/python/MAXIMUM_TRIPLET_SUM_ARRAY_2.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 71
|
2021-07-21T05:17:52.000Z
|
2022-03-29T23:49:28.000Z
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , n ) :
maxA = - 100000000
maxB = - 100000000
maxC = - 100000000
for i in range ( 0 , n ) :
if ( arr [ i ] > maxA ) :
maxC = maxB
maxB = maxA
maxA = arr [ i ]
elif ( arr [ i ] > maxB ) :
maxC = maxB
maxB = arr [ i ]
elif ( arr [ i ] > maxC ) :
maxC = arr [ i ]
return ( maxA + maxB + maxC )
#TOFILL
if __name__ == '__main__':
param = [
([4, 7, 12, 21, 22, 25, 27, 28, 28, 31, 32, 32, 41, 45, 47, 51, 53, 60, 61, 61, 63, 71, 74, 82, 83, 85, 88, 92, 96, 96],28,),
([-52, 26, 74, -62, -76],2,),
([0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],11,),
([63, 71, 15, 28, 31, 84, 8, 17, 24, 42, 66, 95, 30],6,),
([-94, -92, -92, -90, -88, -88, -86, -82, -80, -78, -66, -54, -52, -52, -46, -46, -42, -36, -32, -24, -24, -14, -14, -14, -12, -10, 0, 6, 8, 20, 24, 24, 28, 38, 38, 52, 54, 56, 64, 74, 74, 76, 82, 94, 94],31,),
([0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0],30,),
([15, 19, 80],2,),
([4, 80, 18, 74, 36, -30, -72, -28, -32, -16, -8, 38, 78, -48, 98, -64, 86, -60, -44, 84, -98, 40, 14, 30, 44, 90, -30, -42, 24, -28, 24, 40, -96, 98, 90, -68, -54, -52, 62, 34, -98, 68, -56, -94, -78, -12, 28],41,),
([0, 1, 1, 1, 1, 1],3,),
([2, 18, 96, 7, 99, 83, 3, 88, 23, 77, 6, 28, 55, 49, 69, 55, 48, 76, 43, 11, 43, 44, 17, 74, 27, 64, 76, 77, 53, 26, 73, 12, 19, 62, 18, 34, 13, 31, 97, 96, 85, 27, 30, 97, 89, 25],41,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
| 46.44186
| 220
| 0.457687
|
1b5fc09b49458bd98582a37e02eea4b4155dbf0e
| 1,333
|
py
|
Python
|
f5_agent_auditor/options.py
|
f5devcentral/f5-agent-auditor
|
dce6358346bc6832c050164f27babaf5d54228cd
|
[
"Apache-2.0"
] | null | null | null |
f5_agent_auditor/options.py
|
f5devcentral/f5-agent-auditor
|
dce6358346bc6832c050164f27babaf5d54228cd
|
[
"Apache-2.0"
] | null | null | null |
f5_agent_auditor/options.py
|
f5devcentral/f5-agent-auditor
|
dce6358346bc6832c050164f27babaf5d54228cd
|
[
"Apache-2.0"
] | 1
|
2021-07-14T02:22:10.000Z
|
2021-07-14T02:22:10.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from oslo_config import cfg
import sys
# require f5 agent is installed
import f5_openstack_agent.lbaasv2.drivers.bigip.agent_manager as manager
from f5_openstack_agent.lbaasv2.drivers.bigip import icontrol_driver
from oslo_db import options
tool_opts = [
cfg.StrOpt("f5-agent",
short="ag",
default=None,
help=("Provide an ID of an agent"))
]
cfg.CONF.register_cli_opts(tool_opts)
def load_options(conf=cfg.CONF):
conf.register_opts(manager.OPTS)
conf.register_opts(icontrol_driver.OPTS)
def load_db_options(conf=cfg.CONF):
options.set_defaults(conf)
def parse_options(args=sys.argv[1:],
conf=cfg.CONF,
project="f5-agent-auditor"):
conf(args, project)
| 28.361702
| 74
| 0.714179
|
38d6b36c682bf03998ebacee2099a12e3edf6512
| 10,058
|
py
|
Python
|
3-Crossed_Wires.py
|
minhoryang/advent-of-code-2019
|
f5b468ce583a14548346f8e415d6b05589ec564f
|
[
"MIT"
] | null | null | null |
3-Crossed_Wires.py
|
minhoryang/advent-of-code-2019
|
f5b468ce583a14548346f8e415d6b05589ec564f
|
[
"MIT"
] | null | null | null |
3-Crossed_Wires.py
|
minhoryang/advent-of-code-2019
|
f5b468ce583a14548346f8e415d6b05589ec564f
|
[
"MIT"
] | null | null | null |
line1 = ['R999', 'D586', 'L462', 'D725', 'L236', 'U938', 'R366', 'D306', 'R263', 'D355', 'R354', 'D332', 'L599', 'U48', 'R829', 'U210', 'R697', 'D534', 'L19', 'U991', 'L110', 'U981', 'L954', 'U323', 'R851', 'U290', 'R76', 'D513', 'R844', 'D780', 'L257', 'D24', 'L586', 'U865', 'L341', 'U572', 'L122', 'D304', 'R398', 'D641', 'L221', 'U726', 'R270', 'D321', 'R503', 'D112', 'L151', 'D179', 'R439', 'U594', 'R242', 'U1', 'L484', 'D259', 'L604', 'U760', 'R362', 'D93', 'R29', 'D647', 'R482', 'U814', 'L214', 'D510', 'R281', 'U327', 'L170', 'D993', 'R191', 'D33', 'L305', 'D657', 'L897', 'U609', 'R512', 'D866', 'R654', 'U980', 'L899', 'D602', 'L141', 'D365', 'L13', 'D584', 'L706', 'U404', 'L238', 'U720', 'L732', 'U716', 'R672', 'U979', 'L49', 'D352', 'R712', 'U396', 'L843', 'D816', 'L276', 'U906', 'L375', 'D410', 'R275', 'U664', 'R487', 'D158', 'L713', 'D451', 'L859', 'U194', 'L736', 'D51', 'R659', 'U632', 'R586', 'U342', 'L222', 'U184', 'R741', 'U989', 'L175', 'D521', 'R820', 'U183', 'L950', 'D888', 'R54', 'D149', 'R776', 'D200', 'R939', 'U529', 'L377', 'D226', 'R769', 'U395', 'R392', 'U570', 'L398', 'D358', 'L644', 'D975', 'R578', 'D687', 'L133', 'D884', 'R822', 'D226', 'L527', 'U439', 'R175', 'D388', 'L539', 'D450', 'L391', 'U392', 'L131', 'U134', 'R873', 'U741', 'R761', 'U620', 'R667', 'D31', 'R481', 'D945', 'L373', 'D463', 'R57', 'D402', 'R181', 'U340', 'L835', 'U81', 'R908', 'U257', 'R592', 'U702', 'R713', 'D352', 'R418', 'D486', 'L904', 'U866', 'R828', 'D545', 'R578', 'U469', 'L845', 'D437', 'R371', 'D246', 'L996', 'D920', 'L171', 'U83', 'R471', 'D152', 'R550', 'U344', 'L390', 'U287', 'L126', 'D883', 'L576', 'U303', 'L68', 'U854', 'L463', 'D915', 'R184', 'D282', 'L513', 'U909', 'R770', 'U638', 'L751', 'U168', 'R354', 'D480', 'R19', 'U144', 'R381', 'D554', 'R594', 'D526', 'L957', 'D464', 'R267', 'D802', 'L709', 'U306', 'L907', 'D266', 'L871', 'U286', 'R975', 'D549', 'L732', 'U721', 'R825', 'U753', 'R443', 'U465', 'L966', 'U982', 'L833', 'D62', 'L5', 'U299', 'R500', 'D168', 'R155', 'D102', 'R455', 'D855', 'L376', 'D479', 'L469', 'D6', 'R588', 'U301', 'R329', 'U19', 'L63', 'D488', 'L936', 'D238', 'L798', 'D452', 'L231', 'D652', 'R935', 'D522', 'L401', 'U234', 'L20', 'U285', 'L949', 'D88', 'L120', 'D159', 'R641', 'D960', 'L946', 'U516', 'L530', 'D447', 'R23', 'U962', 'R860', 'D352', 'R904', 'D241', 'R702', 'U108', 'L155', 'U99', 'L43', 'D401', 'R19']
line2 = ['L1008', 'U23', 'L793', 'D944', 'L109', 'U830', 'L103', 'U255', 'L391', 'D574', 'R433', 'U468', 'R800', 'D831', 'L39', 'U8', 'L410', 'D467', 'R655', 'D287', 'R550', 'U467', 'L627', 'D529', 'R361', 'D865', 'L755', 'D895', 'L148', 'U110', 'R593', 'U567', 'L646', 'D89', 'L133', 'D552', 'R576', 'U228', 'L119', 'U734', 'R591', 'U680', 'L163', 'D498', 'L394', 'U884', 'R217', 'U46', 'R684', 'D499', 'L522', 'U373', 'L322', 'U347', 'R48', 'D459', 'L692', 'U569', 'R267', 'U296', 'L949', 'U915', 'R599', 'D113', 'R770', 'U322', 'R304', 'U920', 'L880', 'D257', 'R915', 'D672', 'L950', 'U209', 'R601', 'U663', 'R461', 'D514', 'R415', 'U82', 'L396', 'U233', 'R606', 'U500', 'R70', 'D696', 'R945', 'D686', 'L405', 'U176', 'R728', 'U562', 'L710', 'D35', 'R707', 'D931', 'L857', 'U792', 'R337', 'D490', 'L963', 'U731', 'R909', 'U532', 'R375', 'D990', 'L154', 'U660', 'L17', 'U32', 'R593', 'U529', 'R136', 'U835', 'R717', 'U255', 'L93', 'D295', 'L473', 'U608', 'L109', 'D858', 'R719', 'U207', 'R60', 'D36', 'R790', 'D382', 'L684', 'D233', 'R988', 'U625', 'R410', 'U804', 'R552', 'D578', 'L440', 'D749', 'R653', 'U362', 'L900', 'U549', 'R790', 'D870', 'R672', 'U503', 'R343', 'D343', 'R738', 'D270', 'R494', 'D527', 'L182', 'U654', 'R933', 'D594', 'R447', 'U933', 'R4', 'U364', 'L309', 'U967', 'R648', 'U537', 'R990', 'U203', 'R584', 'D474', 'L852', 'U736', 'R305', 'D781', 'R774', 'D92', 'L398', 'U207', 'R472', 'D664', 'R369', 'U807', 'L474', 'U588', 'R339', 'D536', 'R305', 'D506', 'R516', 'U772', 'R177', 'U450', 'L211', 'U850', 'R777', 'U483', 'L595', 'U104', 'L916', 'U548', 'R256', 'U173', 'L27', 'D167', 'L574', 'D288', 'R569', 'U192', 'R771', 'D98', 'R432', 'U165', 'L651', 'D524', 'L582', 'D698', 'L393', 'D152', 'L280', 'U461', 'R573', 'D771', 'R833', 'D409', 'R991', 'U996', 'R780', 'U617', 'R63', 'U563', 'L844', 'D63', 'R15', 'U634', 'R643', 'D124', 'L147', 'D583', 'R716', 'D28', 'L799', 'D59', 'R819', 'D723', 'L43', 'D975', 'L755', 'D635', 'R118', 'U325', 'L969', 'D445', 'R374', 'D797', 'L821', 'U118', 'R962', 'D643', 'R127', 'U267', 'R768', 'D50', 'L343', 'U80', 'R281', 'U575', 'R618', 'D718', 'L74', 'U146', 'R242', 'D547', 'L492', 'U71', 'R826', 'D483', 'L402', 'U953', 'R184', 'U707', 'L973', 'D550', 'L593', 'U281', 'L652', 'D247', 'L254', 'D60', 'R908', 'U581', 'L731', 'D634', 'R286', 'D186', 'R9', 'D983', 'L181', 'U262', 'R241', 'D674', 'R463', 'U238', 'R600']
# matrix = (-5000, 5000)
def run(line1, line2):
matrix = []
collision = []
start = (0, 0)
def update(pos, first=True):
if not first and pos in matrix:
collision.append(pos)
else:
matrix.append(pos)
for idx, line in enumerate((line1, line2)):
first = idx == 0
start = (0, 0)
max = len(line)
for pos_idx, direction_pos in enumerate(line):
print(start, direction_pos, pos_idx, max)
direction, pos = direction_pos[0], int(direction_pos[1:])
if direction == 'R':
for i in range(pos):
update((start[0]+i+1, start[1]), first)
start = (start[0]+pos, start[1])
elif direction == 'L':
for i in range(pos):
update((start[0]-i-1, start[1]), first)
start = (start[0]-pos, start[1])
elif direction == 'U':
for i in range(pos):
update((start[0], start[1]+i+1), first)
start = (start[0], start[1]+pos)
elif direction == 'D':
for i in range(pos):
update((start[0], start[1]-i-1), first)
start = (start[0], start[1]-pos)
else:
raise Exception()
return collision
result = run(line1, line2)
# >>> c = [(-1971, -91), (-1171, 23), (-1042, -1232), (-1436, -3231), (-1436, -2664), (-1584, -3305), (-1436, -3305), (-1194, -3231), (-1313, -2664), (-1313, -2446), (-1062, -1232), (-1042, -1231), (-415, -1231), (-681, -1730), (-722, -1730), (-885, -1730), (-900, -1581), (-900, -1519), (-965, -1357), (-1042, -1357), (-1222, -1277), (-1222, -1232), (-1174, -1232), (-1174, -1277), (-1174, -1357), (-1279, -1469), (-1866, -921), (-1801, -900), (-1620, -900), (-1599, -765), (-1801, -604), (-1910, -604), (-875, 1183), (-1179, 511), (-1189, 1183), (-875, 1383), (-788, 1383), (-728, 1306), (-103, 1306), (-33, 1306), (397, 988), (673, 988), (853, 988), (912, 478), (1128, 478), (1235, 653), (853, 1040), (673, 1040), (673, 1005), (853, 1005), (1232, 653), (1232, 478), (1232, 330), (375, 548), (397, 866), (673, 866), (712, 548), (712, 478), (507, 376), (375, 376), (181, 376), (-33, 1107), (397, 1107), (1033, 1571), (1033, 1175), (1033, 1040), (1033, 1005), (912, 649), (879, 988), (879, 1005), (879, 1040), (879, 1175), (1033, 1341), (1455, 1571), (1591, 1956), (1591, 2021), (1591, 2324), (1591, 2550), (2096, 2705), (2308, 2719), (2215, 2719), (2215, 2705), (2096, 2665), (1742, 2705), (1742, 2931), (1633, 2931), (1633, 2705), (1633, 2550), (1704, 2415), (2096, 2415), (4028, 2435), (3781, 2596), (3916, 2984), (4468, 2984), (4571, 2435), (4681, 2114), (6636, 2004), (9081, 4338), (9386, 4541), (9386, 4067), (9762, 4022), (9762, 4067), (9933, 4137), (10521, 6751), (11297, 6943), (10715, 6485), (10615, 5788), (11255, 5326), (11448, 5048), (11466, 4917), (12453, 7093), (12949, 7093), (13282, 6957), (13282, 6929), (13282, 6870), (12887, 4862), (12410, 3738), (13103, 3362), (14002, 3395), (14170, 2919), (14102, 2848), (14102, 2919), (14170, 3389), (12410, 3827), (12068, 3738), (12068, 3620), (12551, 3520), (12722, 3546), (12410, 4101), (11991, 3738), (11991, 3620), (11991, 3580), (11991, 3520), (12286, 2560)]
best = 10000
for i in d:
a, b = i
now = abs(a) + abs(b)
if best > now:
best = now
print(best)
################################################
def run2(line1, line2):
matrix = []
counters = {}
collision = []
start = (0, 0)
def update(pos, counts=0, first=True):
if first:
if pos not in counters:
counters[pos] = counts
else:
counters[pos] = min(counts, counters[pos])
matrix.append(pos)
if not first and pos in matrix:
collision.append((pos, counts + counters[pos]))
for idx, line in enumerate((line1, line2)):
first = idx == 0
start = (0, 0)
max = len(line)
counts = 0
for pos_idx, direction_pos in enumerate(line):
print(start, direction_pos, pos_idx, max)
direction, pos = direction_pos[0], int(direction_pos[1:])
if direction == 'R':
for i in range(pos):
counts += 1
update((start[0]+i+1, start[1]), counts, first)
start = (start[0]+pos, start[1])
elif direction == 'L':
for i in range(pos):
counts += 1
update((start[0]-i-1, start[1]), counts, first)
start = (start[0]-pos, start[1])
elif direction == 'U':
for i in range(pos):
counts += 1
update((start[0], start[1]+i+1), counts, first)
start = (start[0], start[1]+pos)
elif direction == 'D':
for i in range(pos):
counts += 1
update((start[0], start[1]-i-1), counts, first)
start = (start[0], start[1]-pos)
else:
raise Exception()
return collision
result2 = run2(line1, line2)
result2.sort(key=lambda _: _[1])
print(result2[0]) # ((1128, 478), 56410)
| 94.886792
| 2,384
| 0.491151
|
a58046960ca54e2a6b0218d8d088e432bb54b136
| 3,408
|
py
|
Python
|
test/resttest/comments.py
|
informatics-isi-edu/ermrest
|
1a4002c94c46b43089f704a65a6d2be8730396fd
|
[
"Apache-2.0"
] | 4
|
2015-04-27T21:25:54.000Z
|
2022-01-15T18:56:37.000Z
|
test/resttest/comments.py
|
informatics-isi-edu/ermrest
|
1a4002c94c46b43089f704a65a6d2be8730396fd
|
[
"Apache-2.0"
] | 215
|
2015-05-06T23:59:19.000Z
|
2022-02-07T23:37:56.000Z
|
test/resttest/comments.py
|
informatics-isi-edu/ermrest
|
1a4002c94c46b43089f704a65a6d2be8730396fd
|
[
"Apache-2.0"
] | 8
|
2015-08-26T19:23:39.000Z
|
2018-06-13T00:18:52.000Z
|
import unittest
import common
import basics
_S = 'comments'
_T2b = basics._T2b
_defs = basics.defs(_S)
_table_defs = _defs['schemas'][_S]['tables']
def setUpModule():
r = common.primary_session.get('schema/%s' % _S)
if r.status_code == 404:
# idempotent because unittest can re-enter module several times...
common.primary_session.post('schema', json=_defs).raise_for_status()
def add_comment_tests(klass):
# generate comment API tests over many resources in table
resources = basics.expand_table_resources(_S, _table_defs, klass.table)
for i in range(len(resources)):
def make_test_absent(i):
def test_absent(self):
r = self.session.get(resources[i])
self.assertHttp(r, 200, 'application/json')
d = r.json()
if isinstance(d, list):
for x in d:
# foreign key resource returns a list of objects
self.assertEqual(x['comment'], None)
else:
self.assertEqual(d['comment'], None)
self.assertHttp(self.session.get('%s/comment' % resources[i]), 404)
return test_absent
setattr(klass, 'test_%02d_1_absent' % i, make_test_absent(i))
def make_test_apply(i):
newval = 'Comment on %s.' % resources[i]
def test_apply(self):
self.assertHttp(self.session.put('%s/comment' % resources[i], data=newval, headers={"Content-Type": "text/plain"}), 204)
return test_apply
setattr(klass, 'test_%02d_2_apply' % i, make_test_apply(i))
def make_test_confirm(i):
newval = 'Comment on %s.' % resources[i]
def test_confirm(self):
r = self.session.get(resources[i])
self.assertHttp(r, 200, 'application/json')
d = r.json()
if isinstance(d, list):
for x in d:
# foreign key resource returns a list of objects
self.assertEqual(x['comment'], newval)
else:
self.assertEqual(d['comment'], newval)
r = self.session.get('%s/comment' % resources[i])
self.assertHttp(r, 200, 'text/plain')
# TODO: is this trailing newline a bug?
self.assertEqual(r.text[0:-1], newval)
return test_confirm
setattr(klass, 'test_%02d_3_confirm' % i, make_test_confirm(i))
def make_test_delete(i):
def test_delete(self):
self.assertHttp(self.session.delete('%s/comment' % resources[i]), 200)
self.assertHttp(self.session.get('%s/comment' % resources[i]), 404)
return test_delete
setattr(klass, 'test_%02d_4_delete' % i, make_test_delete(i))
def make_test_bad_apply(i):
newval = [ 'Comment on %s.' % resources[i], ]
def test_bad_apply(self):
self.assertHttp(self.session.put('%s' % resources[i], json={"comment": newval}, headers={"Content-Type": "text/plain"}), 400)
return test_bad_apply
setattr(klass, 'test_%02d_5_bad_apply' % i, make_test_bad_apply(i))
return klass
@add_comment_tests
class Comments (common.ErmrestTest):
table = _T2b
if __name__ == '__main__':
unittest.main(verbosity=2)
| 40.094118
| 141
| 0.575704
|
fc78e95dc653c3abc49425c5a966c38605f1c080
| 2,373
|
py
|
Python
|
tests/integration/compare_test.py
|
numenta/cortipy
|
908fc461c8116b0dfb4d66bbd91fa68b1d05d642
|
[
"MIT"
] | 8
|
2015-05-13T22:04:23.000Z
|
2018-01-24T19:38:06.000Z
|
tests/integration/compare_test.py
|
numenta/cortipy
|
908fc461c8116b0dfb4d66bbd91fa68b1d05d642
|
[
"MIT"
] | 25
|
2015-04-30T19:02:16.000Z
|
2016-02-25T22:50:03.000Z
|
tests/integration/compare_test.py
|
numenta/cortipy
|
908fc461c8116b0dfb4d66bbd91fa68b1d05d642
|
[
"MIT"
] | 16
|
2015-04-30T15:51:33.000Z
|
2018-08-25T05:10:53.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Numenta, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This test verifies that compare correctly does the call to Cortical.io's
API and gets a dictionary of distances
"""
import cortipy
import unittest
class CompareTest(unittest.TestCase):
"""Requires CORTICAL_API_KEY to be set"""
def testCompare(self):
"""
Tests client.createClassification(). Asserts the returned object has fields
with expected values for both the classifciation name and bitmap.
"""
client = cortipy.CorticalClient(useCache=False)
bitmap1 = client.getBitmap("one")["fingerprint"]["positions"]
bitmap2 = client.getBitmap("two")["fingerprint"]["positions"]
distances = client.compare(bitmap1, bitmap2)
types = ["cosineSimilarity", "euclideanDistance", "jaccardDistance",
"overlappingAll", "overlappingLeftRight", "overlappingRightLeft",
"sizeLeft", "sizeRight", "weightedScoring"]
self.assertIsInstance(distances, dict,
"The returned object is not a dictionary")
for t in types:
self.assertIn(t, distances,
"No \'{}\' field in the distances".format(t))
for t in types:
self.assertIsInstance(distances[t], (float, int),
"No \'{}\' field in the distances".format(t))
if __name__ == '__main__':
unittest.main()
| 35.954545
| 79
| 0.726507
|
1b452296748877444304f1af1613984754254bf8
| 2,135
|
py
|
Python
|
ptsites/sites/hd-space.py
|
czahoi/flexget_qbittorrent_mod
|
c001d9ece050136bbff5876697b12079a841af3e
|
[
"MIT"
] | null | null | null |
ptsites/sites/hd-space.py
|
czahoi/flexget_qbittorrent_mod
|
c001d9ece050136bbff5876697b12079a841af3e
|
[
"MIT"
] | null | null | null |
ptsites/sites/hd-space.py
|
czahoi/flexget_qbittorrent_mod
|
c001d9ece050136bbff5876697b12079a841af3e
|
[
"MIT"
] | null | null | null |
from ..schema.xbtit import XBTIT
from ..utils import net_utils
from ..utils.value_hanlder import handle_infinite
class MainClass(XBTIT):
URL = 'https://hd-space.org/'
SUCCEED_REGEX = 'Welcome back .*?</span> '
USER_CLASSES = {
'uploaded': [2199023255552],
'share_ratio': [4.25]
}
@property
def details_selector(self) -> dict:
selector = super().details_selector
net_utils.dict_merge(selector, {
'user_id': 'index.php\\?page=usercp&uid=(\\d+)',
'detail_sources': {
'default': {
'link': '/index.php?page=usercp&uid={}',
'elements': {
'bar': 'table.lista table.lista',
'table': 'body > div:nth-child(2) > table > tbody > tr > td > table > tbody > tr > td > table > tbody > tr > td > table > tbody > tr > td > table > tbody > tr > td > table:nth-child(9) > tbody > tr:nth-child(2) > td > table:nth-child(2) > tbody > tr > td:nth-child(4) > table'
}
}
},
'details': {
'uploaded': {
'regex': 'UP: ([\\d.]+ [ZEPTGMK]B)'
},
'downloaded': {
'regex': 'DL: ([\\d.]+ [ZEPTGMK]B)'
},
'share_ratio': {
'regex': 'Ratio: (---|[\\d.]+)',
'handle': handle_infinite
},
'points': {
'regex': 'Bonus: (---|[\\d,.]+)',
'handle': handle_infinite
},
'join_date': {
'regex': 'Joined on.{5}(.*?\\d{4})',
'handle': self.handle_join_date
},
'seeding': None,
'leeching': None,
'hr': None
}
})
return selector
def get_messages(self, entry, config):
self.get_XBTIT_message(entry, config,
MESSAGES_URL_REGEX='index.php\\?page=usercp&uid=\\d+&do=pm&action=list')
| 36.186441
| 300
| 0.43185
|
358f8bf1afe781cfae54e0bb5eb051ba80dc16cb
| 6,890
|
py
|
Python
|
core/migrations/0053_remove_wagtail_personalisation.py
|
uktrade/great-cms
|
f13fa335ddcb925bc33a5fa096fe73ef7bdd351a
|
[
"MIT"
] | 10
|
2020-04-30T12:04:35.000Z
|
2021-07-21T12:48:55.000Z
|
core/migrations/0053_remove_wagtail_personalisation.py
|
uktrade/great-cms
|
f13fa335ddcb925bc33a5fa096fe73ef7bdd351a
|
[
"MIT"
] | 1,461
|
2020-01-23T18:20:26.000Z
|
2022-03-31T08:05:56.000Z
|
core/migrations/0053_remove_wagtail_personalisation.py
|
uktrade/great-cms
|
f13fa335ddcb925bc33a5fa096fe73ef7bdd351a
|
[
"MIT"
] | 3
|
2020-04-07T20:11:36.000Z
|
2020-10-16T16:22:59.000Z
|
# Generated by Django 2.2.18 on 2021-02-02 12:36
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
from django.db import migrations
import core.blocks
class Migration(migrations.Migration):
dependencies = [
('core', '0052_update_meta_options_for_snippets'),
]
operations = [
# DELIBERATELY DISABLED AS PART OF CONTROLLED REMOVAL - NO LONGER CREATED (SEE SQUASHED MIGRATION)
# migrations.RemoveField(
# model_name='matchfirstcountryofinterestrule',
# name='country',
# ),
# migrations.RemoveField(
# model_name='matchfirstcountryofinterestrule',
# name='segment',
# ),
# migrations.RemoveField(
# model_name='matchfirstindustryofinterestrule',
# name='segment',
# ),
# migrations.RemoveField(
# model_name='matchproductexpertise',
# name='product',
# ),
# migrations.RemoveField(
# model_name='matchproductexpertise',
# name='segment',
# ),
migrations.AlterField(
model_name='detailpage',
name='body',
field=wagtail.core.fields.StreamField(
[
(
'paragraph',
wagtail.core.blocks.StructBlock(
[('paragraph', wagtail.core.blocks.RichTextBlock())],
icon='fa-font',
template='core/struct_paragraph_block.html',
),
),
(
'video',
wagtail.core.blocks.StructBlock(
[('video', core.blocks.MediaChooserBlock())],
help_text='Video displayed within a full-page-width block',
template='core/includes/_video_full_width.html',
),
),
('case_study', core.blocks.CaseStudyStaticBlock(icon='fa-book')),
(
'Step',
wagtail.core.blocks.StructBlock(
[
('title', wagtail.core.blocks.CharBlock(max_length=255)),
('body', wagtail.core.blocks.RichTextBlock()),
('image', wagtail.images.blocks.ImageChooserBlock(required=False)),
],
icon='cog',
),
),
(
'fictional_example',
wagtail.core.blocks.StructBlock(
[('fiction_body', wagtail.core.blocks.RichTextBlock(icon='openquote'))],
icon='fa-commenting-o',
template='learn/fictional_company_example.html',
),
),
(
'ITA_Quote',
wagtail.core.blocks.StructBlock(
[
('quote', wagtail.core.blocks.RichTextBlock()),
('author', wagtail.core.blocks.CharBlock(max_length=255)),
],
icon='fa-quote-left',
),
),
(
'pros_cons',
wagtail.core.blocks.StructBlock(
[
(
'pros',
wagtail.core.blocks.StreamBlock(
[
(
'item',
wagtail.core.blocks.StructBlock(
[('item', wagtail.core.blocks.CharBlock(max_length=255))],
icon='fa-arrow-right',
),
)
]
),
),
(
'cons',
wagtail.core.blocks.StreamBlock(
[
(
'item',
wagtail.core.blocks.StructBlock(
[('item', wagtail.core.blocks.CharBlock(max_length=255))],
icon='fa-arrow-right',
),
)
]
),
),
],
icon='fa-arrow-right',
template='learn/pros_and_cons.html',
),
),
(
'choose_do_not_choose',
wagtail.core.blocks.StructBlock(
[
('choose_title', wagtail.core.blocks.CharBlock(max_length=255)),
('choose_body', wagtail.core.blocks.RichTextBlock(features=())),
('do_not_choose_title', wagtail.core.blocks.CharBlock(max_length=255)),
('do_not_choose_body', wagtail.core.blocks.RichTextBlock(features=())),
]
),
),
(
'image',
core.blocks.ImageBlock(
help_text='Image displayed within a full-page-width block',
template='core/includes/_image_full_width.html',
),
),
]
),
),
# DELIBERATELY DISABLED AS PART OF CONTROLLED REMOVAL - NO LONGER CREATED (SEE SQUASHED MIGRATION)
# migrations.DeleteModel(
# name='MatchCountryQuerystring',
# ),
# migrations.DeleteModel(
# name='MatchFirstCountryOfInterestRule',
# ),
# migrations.DeleteModel(
# name='MatchFirstIndustryOfInterestRule',
# ),
# migrations.DeleteModel(
# name='MatchProductExpertise',
# ),
]
| 42.530864
| 110
| 0.367925
|
7cc6452ca6df8872233f200da00a75234ebc4e53
| 1,653
|
py
|
Python
|
marshpy/fields/path_field.py
|
an-otter-world/marshpy
|
42aed8e5f316358792356c7e550f844a08bf206e
|
[
"WTFPL"
] | null | null | null |
marshpy/fields/path_field.py
|
an-otter-world/marshpy
|
42aed8e5f316358792356c7e550f844a08bf206e
|
[
"WTFPL"
] | 16
|
2021-03-26T08:32:29.000Z
|
2021-03-27T10:37:24.000Z
|
marshpy/fields/path_field.py
|
an-otter-world/marshpy
|
42aed8e5f316358792356c7e550f844a08bf206e
|
[
"WTFPL"
] | null | null | null |
"""Path field class & utilities."""
from gettext import gettext as _
from pathlib import Path
from typing import Any
from typing import Optional
from marshpy.core.constants import UNDEFINED
from marshpy.core.errors import ErrorCode
from marshpy.core.interfaces import ILoadingContext
from marshpy.core.validation import ValidateCallback
from marshpy.fields.scalar_field import ScalarField
class PathField(ScalarField):
"""Path YAML object field."""
def __init__(
self,
required: bool = False,
validate: Optional[ValidateCallback] = None,
must_exist: bool = True
):
"""Initialize the Path field.
Args:
required: See BaseField constructor.
validate: See BaseField constructor.
must_exist: If true, a VALIDATION_ERROR will be emmited if the file
doesn't exist when the field is deserialized.
"""
super().__init__(required=required, validate=validate)
self._must_exist = must_exist
def _convert(self, context: ILoadingContext, value: str) -> Any:
path = Path(value)
if not path.is_absolute() and not path.exists():
location_str = context.current_location()
if location_str is not None:
location = Path(location_str)
parent = location.parent
path = parent / path
if self._must_exist and not path.exists():
context.error(
ErrorCode.VALIDATION_ERROR,
_('Cannot find path {}.'),
path
)
return UNDEFINED
return path
| 30.611111
| 79
| 0.626134
|
6923d1dece74e68b3979bdd00b798b38f6412719
| 813
|
py
|
Python
|
lists_and_dicts.py
|
acroooo/intermediate-python
|
e2cf1d5c397cc94fd5ce38085802d099b3633c6c
|
[
"MIT"
] | null | null | null |
lists_and_dicts.py
|
acroooo/intermediate-python
|
e2cf1d5c397cc94fd5ce38085802d099b3633c6c
|
[
"MIT"
] | null | null | null |
lists_and_dicts.py
|
acroooo/intermediate-python
|
e2cf1d5c397cc94fd5ce38085802d099b3633c6c
|
[
"MIT"
] | null | null | null |
def run():
my_list = [1, 'Hi', True, 4.5]
my_dict = {
"first_name": "Hernan",
"last_name": "Chamorro",
}
super_list = [
{ "first_name": "Hernan", "last_name": "Chamorro",},
{ "first_name": "Gustavo", "last_name": "Ramon",},
{ "first_name": "Bruno", "last_name": "Facundo",},
{ "first_name": "Geronimo", "last_name": "Atahualpa",},
]
super_dict = {
"natural_nums": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"integer_nums": [-2, -1, 0, 1, 2, 3, 4, 5,],
"floating_nums": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
}
for key, value in super_dict.items():
print(key, "-", value)
for dict in super_list:
print(dict['first_name'], "-", dict['last_name'])
if __name__ == '__main__':
run()
| 29.035714
| 76
| 0.494465
|
416dca22e0655e26460b2735ef811a814b83981e
| 703
|
py
|
Python
|
src/ggrc/contributions.py
|
sbilly/ggrc-core
|
59a6825c6a8e15e42ebdb9e08d079cefd1800120
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/contributions.py
|
sbilly/ggrc-core
|
59a6825c6a8e15e42ebdb9e08d079cefd1800120
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/contributions.py
|
sbilly/ggrc-core
|
59a6825c6a8e15e42ebdb9e08d079cefd1800120
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Lists of ggrc contributions."""
from ggrc.notifications import common
from ggrc.notifications import notification_handlers
from ggrc.notifications import data_handlers
CONTRIBUTED_CRON_JOBS = [
common.send_todays_digest_notifications
]
NOTIFICATION_LISTENERS = [
notification_handlers.register_handlers
]
def contributed_notifications():
"""Get handler functions for ggrc notification file types."""
return {
"Request": data_handlers.get_assignable_data,
"Assessment": data_handlers.get_assignable_data,
"Comment": data_handlers.get_comment_data,
}
| 26.037037
| 78
| 0.775249
|
b0ce6bfbdd5c904a44a206094523a4f8298cdb36
| 698
|
py
|
Python
|
api_restful/__init__.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 1
|
2020-06-21T04:08:26.000Z
|
2020-06-21T04:08:26.000Z
|
api_restful/__init__.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 13
|
2019-10-18T17:19:32.000Z
|
2022-01-13T00:44:43.000Z
|
api_restful/__init__.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 5
|
2019-02-07T03:15:16.000Z
|
2021-09-04T14:06:28.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: __init__.py
@time: 2020-02-28 21:01
"""
from flask import Flask
from logging.config import dictConfig
from api_restful.apis import api_bearing
from api_restful.blueprints import bp_bearing
from config import current_config
# from api_restful.middlewares.logger_middleware import LoggerMiddleware
app = Flask(__name__)
# app.wsgi_app = LoggerMiddleware(app.wsgi_app)
# Load Config
app.config.from_object(current_config)
# Register Blueprint
app.register_blueprint(bp_bearing)
# 配置日志
dictConfig(app.config['LOG_CONFIG'])
# Add Resource Urls
from api_restful import urls
from api_restful.user import url
| 19.388889
| 72
| 0.797994
|
9caceeffa47b7892783b2074d1b678aa7ec6202a
| 359
|
py
|
Python
|
pybook/ch13/WirteDemo.py
|
YanhaoXu/python-learning
|
856687a71635a2ca67dab49d396c238f128e5ec0
|
[
"MIT"
] | 2
|
2021-12-06T13:29:48.000Z
|
2022-01-20T11:39:45.000Z
|
pybook/ch13/WirteDemo.py
|
YanhaoXu/python-learning
|
856687a71635a2ca67dab49d396c238f128e5ec0
|
[
"MIT"
] | null | null | null |
pybook/ch13/WirteDemo.py
|
YanhaoXu/python-learning
|
856687a71635a2ca67dab49d396c238f128e5ec0
|
[
"MIT"
] | null | null | null |
from pybook.ch13.FIleTestConst import TEST_PATH
def main():
# Open file the output
outfile = open(TEST_PATH + "Presidents.txt", "w")
# Write data to the file
outfile.write("Bill Clinton\n")
outfile.write("George Bush\n")
outfile.write("Barack Obama")
outfile.close() # Close the output file
main() # Call the main function
| 21.117647
| 53
| 0.671309
|
f2935611bc2ce80e397bdcbced78980c0ba606cc
| 3,343
|
py
|
Python
|
tests/test_initializers.py
|
OliverZijia/tensorlayer2
|
01113b53e84a3bbb298b9c35ebd53254e487350f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_initializers.py
|
OliverZijia/tensorlayer2
|
01113b53e84a3bbb298b9c35ebd53254e487350f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_initializers.py
|
OliverZijia/tensorlayer2
|
01113b53e84a3bbb298b9c35ebd53254e487350f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorlayer as tl
import numpy as np
from tests.utils import CustomTestCase
class Test_Leaky_ReLUs(CustomTestCase):
@classmethod
def setUpClass(cls):
cls.ni = tl.layers.Input(shape=[16, 10])
cls.w_shape = (10, 5)
cls.eps = 0.0
@classmethod
def tearDownClass(cls):
pass
def init_dense(self, w_init):
return tl.layers.Dense(n_units=self.w_shape[1], in_channels=self.w_shape[0], W_init=w_init)
def test_zeros(self):
dense = self.init_dense(tl.initializers.zeros())
self.assertEqual(np.sum(dense.weights[0].numpy() - np.zeros(shape=self.w_shape)), self.eps)
nn = dense(self.ni)
def test_ones(self):
dense = self.init_dense(tl.initializers.ones())
self.assertEqual(np.sum(dense.weights[0].numpy() - np.ones(shape=self.w_shape)), self.eps)
nn = dense(self.ni)
def test_constant(self):
dense = self.init_dense(tl.initializers.constant(value=5.0))
self.assertEqual(np.sum(dense.weights[0].numpy() - np.ones(shape=self.w_shape) * 5.0), self.eps)
nn = dense(self.ni)
# test with numpy arr
arr = np.random.uniform(size=self.w_shape).astype(np.float32)
dense = self.init_dense(tl.initializers.constant(value=arr))
self.assertEqual(np.sum(dense.weights[0].numpy() - arr), self.eps)
nn = dense(self.ni)
def test_RandomUniform(self):
dense = self.init_dense(tl.initializers.random_uniform(minval=-0.1, maxval=0.1, seed=1234))
print(dense.weights[0].numpy())
nn = dense(self.ni)
def test_RandomNormal(self):
dense = self.init_dense(tl.initializers.random_normal(mean=0.0, stddev=0.1))
print(dense.weights[0].numpy())
nn = dense(self.ni)
def test_TruncatedNormal(self):
dense = self.init_dense(tl.initializers.truncated_normal(mean=0.0, stddev=0.1))
print(dense.weights[0].numpy())
nn = dense(self.ni)
def test_deconv2d_bilinear_upsampling_initializer(self):
rescale_factor = 2
imsize = 128
num_channels = 3
num_in_channels = 3
num_out_channels = 3
filter_shape = (5, 5, num_out_channels, num_in_channels)
ni = tl.layers.Input(shape=(1, imsize, imsize, num_channels))
bilinear_init = tl.initializers.deconv2d_bilinear_upsampling_initializer(shape=filter_shape)
deconv_layer = tl.layers.DeConv2dLayer(shape=filter_shape,
outputs_shape=(1, imsize * rescale_factor, imsize * rescale_factor,
num_out_channels),
strides=(1, rescale_factor, rescale_factor, 1),
W_init=bilinear_init,
padding='SAME',
act=None, name='g/h1/decon2d')
nn = deconv_layer(ni)
def test_config(self):
init = tl.initializers.constant(value=5.0)
new_init = tl.initializers.Constant.from_config(init.get_config())
if __name__ == '__main__':
unittest.main()
| 35.946237
| 114
| 0.608436
|
2865698e096f995fee16fe0884ef4253ec40b3e3
| 522
|
py
|
Python
|
Notebook_surface/spyder_test.py
|
Jaknil/Anaconda-python
|
de80d7360c36c2abeb5ac922211e815a0e9e57ca
|
[
"MIT"
] | null | null | null |
Notebook_surface/spyder_test.py
|
Jaknil/Anaconda-python
|
de80d7360c36c2abeb5ac922211e815a0e9e57ca
|
[
"MIT"
] | null | null | null |
Notebook_surface/spyder_test.py
|
Jaknil/Anaconda-python
|
de80d7360c36c2abeb5ac922211e815a0e9e57ca
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
mpl.rcParams['legend.fontsize'] = 10
res = 100 # Line segments
#%%
fig = plt.figure()
ax = fig.gca(projection='3d')
theta = np.linspace(-4 * np.pi, 4 * np.pi,res)
z = np.linspace(-2, 2, res)
r = z**number_2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
ax.plot(x, y, z, label='parametric curve')
#ax.legend()
plt.show()
| 18.642857
| 46
| 0.662835
|
0e6d8448dc4a5d25b58266a9a1e2d6f7fc20c35f
| 703
|
py
|
Python
|
clmm/__init__.py
|
nicolahunfeld/CLMM
|
a431649713e56b907a7366bdf21693c30851dee7
|
[
"BSD-3-Clause"
] | null | null | null |
clmm/__init__.py
|
nicolahunfeld/CLMM
|
a431649713e56b907a7366bdf21693c30851dee7
|
[
"BSD-3-Clause"
] | null | null | null |
clmm/__init__.py
|
nicolahunfeld/CLMM
|
a431649713e56b907a7366bdf21693c30851dee7
|
[
"BSD-3-Clause"
] | null | null | null |
""" CLMM is a cluster mass modeling code. """
from .gcdata import GCData
from .galaxycluster import GalaxyCluster
from .dataops import compute_tangential_and_cross_components, make_radial_profile
from .utils import compute_radial_averages, make_bins, convert_units
from .theory import (
compute_reduced_shear_from_convergence, compute_magnification_bias_from_magnification,
compute_3d_density, compute_surface_density, compute_excess_surface_density,
compute_critical_surface_density,compute_tangential_shear, compute_convergence,
compute_reduced_tangential_shear, compute_magnification, compute_magnification_bias,
Modeling, Cosmology
)
from . import support
__version__ = '1.1.7'
| 43.9375
| 90
| 0.846373
|
67193ce3222280f4a7e817879df26bac3cadb4d5
| 166
|
py
|
Python
|
backend/venv/lib/python3.6/site-packages/tatsu/g2e/__main__.py
|
HalmonLui/square-hackathon
|
62d5be7a229f9e39e27a546c164facd779d28aa4
|
[
"MIT"
] | 3
|
2020-06-13T02:47:29.000Z
|
2020-06-20T17:34:15.000Z
|
backend/venv/lib/python3.6/site-packages/tatsu/g2e/__main__.py
|
HalmonLui/square-hackathon
|
62d5be7a229f9e39e27a546c164facd779d28aa4
|
[
"MIT"
] | 2
|
2020-06-14T20:29:26.000Z
|
2020-06-14T20:29:34.000Z
|
backend/venv/lib/python3.6/site-packages/tatsu/g2e/__main__.py
|
HalmonLui/square-hackathon
|
62d5be7a229f9e39e27a546c164facd779d28aa4
|
[
"MIT"
] | 1
|
2020-09-04T01:45:39.000Z
|
2020-09-04T01:45:39.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from . import main
if __name__ == '__main__':
main()
| 20.75
| 82
| 0.716867
|
028ec1ff77df315fc950da5dfc6867640b62c16d
| 3,891
|
py
|
Python
|
pymatgen/io/abinitio/tests/test_abiobjects.py
|
NadezhdaBzhilyanskaya/pymatgen
|
fae11a8142d457a649fa84ff9781eb2b39334bdc
|
[
"MIT"
] | 1
|
2022-02-28T04:24:46.000Z
|
2022-02-28T04:24:46.000Z
|
pymatgen/io/abinitio/tests/test_abiobjects.py
|
NadezhdaBzhilyanskaya/pymatgen
|
fae11a8142d457a649fa84ff9781eb2b39334bdc
|
[
"MIT"
] | null | null | null |
pymatgen/io/abinitio/tests/test_abiobjects.py
|
NadezhdaBzhilyanskaya/pymatgen
|
fae11a8142d457a649fa84ff9781eb2b39334bdc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import division, print_function
import os
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ha_to_eV
from pymatgen.io.abinitio.abiobjects import *
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
def cif_paths():
cifpaths = []
print(test_dir)
for fname in os.listdir(test_dir):
fname = os.path.join(test_dir, fname)
if os.path.isfile(fname) and fname.endswith(".cif"):
cifpaths.append(fname)
assert cifpaths
return cifpaths
class SpinModeTest(PymatgenTest):
def test_base(self):
polarized = SpinMode.asspinmode("polarized")
other_polarized = SpinMode.asspinmode("polarized")
unpolarized = SpinMode.asspinmode("unpolarized")
polarized.to_abivars()
self.assertTrue(polarized is other_polarized)
self.assertTrue(polarized == other_polarized)
self.assertTrue(polarized != unpolarized)
# Test pickle
self.serialize_with_pickle(polarized)
class SmearingTest(PymatgenTest):
def test_base(self):
fd1ev = Smearing.assmearing("fermi_dirac:1 eV")
print(fd1ev)
fd1ev.to_abivars()
self.assertTrue(fd1ev)
same_fd = Smearing.assmearing("fermi_dirac:"+ str(1.0/Ha_to_eV))
self.assertTrue(same_fd == fd1ev)
nosmear = Smearing.nosmearing()
self.assertFalse(nosmear)
self.assertTrue(nosmear != fd1ev)
new_fd1ev = Smearing.from_dict(fd1ev.to_dict)
self.assertTrue(new_fd1ev == fd1ev)
# Test pickle
self.serialize_with_pickle(fd1ev)
class ElectronsAlgorithmTest(PymatgenTest):
def test_base(self):
algo = ElectronsAlgorithm(nstep=70)
print(algo.to_abivars())
# Test pickle
self.serialize_with_pickle(algo)
class ElectronsTest(PymatgenTest):
def test_base(self):
default_electrons = Electrons()
self.assertTrue(default_electrons.nsppol==2)
self.assertTrue(default_electrons.nspinor==1)
self.assertTrue(default_electrons.nspden==2)
print(default_electrons.to_abivars())
#new = Electron.from_dict(default_electrons.to_dict())
# Test pickle
self.serialize_with_pickle(default_electrons, test_eq=False)
class AbiStructureTest(PymatgenTest):
def setUp(self):
self.cif_paths = cif_paths()
def test_asabistructure(self):
for cif_path in self.cif_paths:
print("about to init abistructure from %s " % cif_path)
st = asabistructure(cif_path)
self.assertTrue(st is asabistructure(st))
self.assertTrue(isinstance(st, Structure))
# TODO
if not st.is_ordered:
print("Unordered structures are not supported")
continue
print(st.to_abivars())
# Test pickle
# FIXME: protocol 2 does not work due to __new__
self.serialize_with_pickle(st, protocols=[0, 1], test_eq=True)
#class KSamplingTest(PymatgenTest):
#class RelaxationTest(PymatgenTest):
class PPModelTest(PymatgenTest):
def test_base(self):
godby = PPModel.asppmodel("godby:12 eV")
print(godby)
print(repr(godby))
godby.to_abivars()
self.assertTrue(godby)
same_godby = PPModel.asppmodel("godby:"+ str(12.0/Ha_to_eV))
self.assertTrue(same_godby == godby)
noppm = PPModel.noppmodel()
self.assertFalse(noppm)
self.assertTrue(noppm != godby)
new_godby = PPModel.from_dict(godby.to_dict)
self.assertTrue(new_godby == godby)
# Test pickle
self.serialize_with_pickle(godby)
if __name__ == '__main__':
import unittest
unittest.main()
| 26.469388
| 74
| 0.653559
|
b445c7a240a183904d3d03ac359dae79963d2bde
| 6,483
|
py
|
Python
|
TheNetwork/whisper_detector.py
|
llmaayanll/TheImageWhisperer
|
6525663aaeab5b9dfc454b69d1b17041f4003ec7
|
[
"MIT"
] | null | null | null |
TheNetwork/whisper_detector.py
|
llmaayanll/TheImageWhisperer
|
6525663aaeab5b9dfc454b69d1b17041f4003ec7
|
[
"MIT"
] | null | null | null |
TheNetwork/whisper_detector.py
|
llmaayanll/TheImageWhisperer
|
6525663aaeab5b9dfc454b69d1b17041f4003ec7
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
import numpy as np
import os
import json
from TheNetwork.veggie import VeGGieModel
class WhisperDetector(object):
""""""
def __init__(self, max_num_pics_per_category=None, epochs=250, batch_size=24):
self.max_num_pics_per_category = max_num_pics_per_category or float('inf')
self.epochs = epochs
self.batch_size = batch_size
self.model = None
def build(self):
"""Build the VeGGie architecture."""
veggie_model = VeGGieModel()
self.model = veggie_model.build_veggie_model()
def load_weights(self, h5_filename):
"""For fail-safe reasons, sometimes we train in separate epochs, and save weights between epochs."""
print("Loading weights of previously trained model.")
self.model.load_weights(h5_filename)
def json_filename_to_array(self, json_filename):
"""Load .json filename into a numpy array that fits into VeGGie network."""
a = json.load(open(json_filename))
a = np.array([[[pix for pix in row] for row in color] for color in a])
a = a.transpose(1, 2, 0)
return a
def folder_to_array(self, folder_path):
"""Load all images from a folder and put in a numpy array of one batch."""
array_list = []
for i, filename in enumerate(os.listdir(folder_path)):
arr = self.json_filename_to_array(folder_path + "/" + filename)
array_list.append(arr)
if i > self.max_num_pics_per_category:
break
res = np.asarray(array_list)
return res
def unison_shuffled_copies(self, a, b):
"""Shuffle order of input photos in the batch."""
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def load_data(self):
"""
Loads the data, split between train and test sets and shuffles it
"""
train_path_stegged = 'C:/Users/Rey/Projects/TheImageWhisperer/Data/train/stegged'
train_path_not_stegged = 'C:/Users/Rey/Projects/TheImageWhisperer/Data/train/not_stegged'
test_path_stegged = 'C:/Users/Rey/Projects/TheImageWhisperer/Data/validate/stegged'
test_path_not_stegged = 'C:/Users/Rey/Projects/TheImageWhisperer/Data/validate/not_stegged'
x_train_stegged = self.folder_to_array(train_path_stegged)
x_train_not_stegged = self.folder_to_array(train_path_not_stegged)
x_test_stegged = self.folder_to_array(test_path_stegged)
x_test_not_stegged = self.folder_to_array(test_path_not_stegged)
x_train = np.concatenate((x_train_stegged, x_test_not_stegged), axis=0)
x_test = np.concatenate((x_test_stegged, x_test_not_stegged), axis=0)
y_train = np.zeros(len(x_train_stegged) + len(x_train_not_stegged))
y_test = np.zeros(len(x_test_stegged) + len(x_test_not_stegged))
y_train[:len(x_train_stegged)] = 1
y_test[:len(x_test_stegged)] = 1
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = self.normalize(x_train, x_test)
x_train, y_train = self.unison_shuffled_copies(x_train, y_train)
x_test, y_test = self.unison_shuffled_copies(x_test, y_test)
return (x_train, y_train), (x_test, y_test)
def normalize(self, X_train, X_test):
"""
this function normalize inputs for zero mean and unit variance
it is used when training a model.
Input: training set and test set
Output: normalized training set and test set according to the trianing set statistics.
"""
mean = np.mean(X_train, axis=(0, 1, 2, 3))
std = np.std(X_train, axis=(0, 1, 2, 3))
X_train = (X_train - mean) / (std + 1e-7)
X_test = (X_test - mean) / (std + 1e-7)
return X_train, X_test
def train(self):
"""
Train the model with new data.
This is where the Transfer Learning is happening - the VGG part of the network is already trained,
and now we are exposing the model to a new data set -
of CIFAR10 images that a random half of them were manipulated using various steganography algorithms.
"""
# training parameters
batch_size = self.batch_size
maxepoches = self.epochs
learning_rate = 0.1
lr_decay = 1e-6
lr_drop = 20
(x_train, y_train), (x_test, y_test) = self.load_data()
# data augmentation - only flip as we don't want to harm the stegged data
datagen = ImageDataGenerator(
horizontal_flip=True, # randomly flip images
vertical_flip=True) # randomly flip images
datagen.fit(x_train)
# optimization details
sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)
self.model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])
# training process in a for loop with learning rate drop every 25 epoches.
reduce_lr = self.reduce_lr(learning_rate, lr_drop)
self.model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=maxepoches,
validation_data=(x_test, y_test), callbacks=[reduce_lr], verbose=2)
# self.model.fit_generator(datagen.flow(x_train, y_train,
# batch_size=batch_size),
# steps_per_epoch=x_train.shape[0] // batch_size,
# epochs=maxepoches,
# validation_data=(x_test, y_test), verbose=2)
self.save_trained_model('veggie.h5')
def reduce_lr(self, learning_rate, lr_drop):
"""Keras callback to reduce learning rate as the learning progresses."""
return keras.callbacks.LearningRateScheduler(
lambda epoch: learning_rate * (0.5 ** (epoch // lr_drop)))
def predict(self, json_file):
arr = self.json_filename_to_array(json_file)
arr = np.array(arr)
return self.model.predict(arr)
def save_trained_model(self, h5_filename='veggie.h5'):
self.model.save_weights(h5_filename)
| 42.372549
| 109
| 0.641678
|
36f9e614e5f5cfa7041a7da8060d6e22ec1c943a
| 1,468
|
py
|
Python
|
src/rdbms/setup.py
|
southworkscom/azure-cli-extensions
|
543252eb78107a98e22dcf9fdb64ab1e5887bf9f
|
[
"MIT"
] | null | null | null |
src/rdbms/setup.py
|
southworkscom/azure-cli-extensions
|
543252eb78107a98e22dcf9fdb64ab1e5887bf9f
|
[
"MIT"
] | null | null | null |
src/rdbms/setup.py
|
southworkscom/azure-cli-extensions
|
543252eb78107a98e22dcf9fdb64ab1e5887bf9f
|
[
"MIT"
] | 1
|
2018-03-20T23:36:57.000Z
|
2018-03-20T23:36:57.000Z
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
VERSION = "0.0.3"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = []
setup(
name='rdbms',
version=VERSION,
description='An Azure CLI Extension to manage Azure MySQL and Azure PostgreSQL resources',
long_description='An Azure CLI Extension to manage Azure MySQL and Azure PostgreSQL resources',
license='MIT',
author='Rohit Joy',
author_email='rohitjoy@microsoft.com',
url='https://github.com/Azure/azure-cli-extensions',
classifiers=CLASSIFIERS,
packages=find_packages(exclude=["tests"]),
install_requires=DEPENDENCIES
)
| 34.952381
| 99
| 0.604905
|
0665626d6f957b29cb715cf3994ce2ec2ad3ceb0
| 63
|
py
|
Python
|
braingraphgeo/__init__.py
|
scott-trinkle/braingraphgeo
|
990c4956acf8fe56f9bdb8871c265c4ea28da9a9
|
[
"MIT"
] | null | null | null |
braingraphgeo/__init__.py
|
scott-trinkle/braingraphgeo
|
990c4956acf8fe56f9bdb8871c265c4ea28da9a9
|
[
"MIT"
] | null | null | null |
braingraphgeo/__init__.py
|
scott-trinkle/braingraphgeo
|
990c4956acf8fe56f9bdb8871c265c4ea28da9a9
|
[
"MIT"
] | null | null | null |
from . import utils
from . import vis
from . import surrogates
| 15.75
| 24
| 0.761905
|
aa86ddc2d5e1404a2366a590ef65ee6a4a1f8b93
| 3,423
|
py
|
Python
|
_fpl_process.py
|
leoleolam/fpl_analytics
|
ef06e9dd929d2eed17e5481b61f1921e3092371d
|
[
"MIT"
] | 2
|
2019-02-16T18:38:03.000Z
|
2021-09-24T16:30:10.000Z
|
_fpl_process.py
|
leoleolam/fpl_analytics
|
ef06e9dd929d2eed17e5481b61f1921e3092371d
|
[
"MIT"
] | null | null | null |
_fpl_process.py
|
leoleolam/fpl_analytics
|
ef06e9dd929d2eed17e5481b61f1921e3092371d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
fpl_analytics._fpl_data
This contains all utils for fpl data process
"""
import pandas as pd
from collections import defaultdict
def get_player_id(fpl_data, name):
"""
get player id from the first, second or web name
"""
return {i:fpl_data["elements"][i]["first_name"]+
fpl_data["elements"][i]["second_name"]
for i in range(len(fpl_data["elements"]))
if (fpl_data["elements"][i]["first_name"].upper()== name.upper() or
fpl_data["elements"][i]["second_name"].upper()== name.upper() or
fpl_data["elements"][i]["web_name"].upper()== name.upper())}
def team_map(fpl_data):
"""
team mapping
"""
return {team["id"]: team["name"] for team in fpl_data["teams"]}
def pos_map(fpl_data):
"""
positon mapping
"""
return {team["id"]: team["singular_name"]
for team in fpl_data["element_types"]}
def generate_pos_data(fpl_data):
""" map of data per position """
res = defaultdict(list)
m = pos_map(fpl_data)
for data in fpl_data["elements"]:
pos_key = m[data["element_type"]]
res[pos_key].append(data)
return res
footballer_key = lambda x, m: (
f"{x['first_name']} {x['second_name']} {m[x['team']]} {x['id']}")
def get_performance(fpl_data, pid):
"""
get player performance (by player id) from fpl_data
"""
return fpl_data["elements"][pid]["history"]["history_summary"]
def opp_next_map(fpl_data):
"""
get next opponent map
"""
res = {}
m = team_map(fpl_data)
i = 0
seen = set()
while len(seen)<20:
fixt = fpl_data["elements"][i]["history"]["fixtures"][0]
team_h = fixt["team_h"]
team_a = fixt["team_a"]
seen.add(team_h)
seen.add(team_a)
res[m[team_a]] = m[team_h]
res[m[team_h]] = m[team_a]
i += 1
return res
def achived_from(fpl_data, pid, minutes=False):
"""
achieved points from fpl_data,
fpl_data - dict
pid - int
minutes - True/False,
whether to include minutes in the output series index
"""
p = fpl_data["elements"][pid]["history"]["history"]
m=team_map(fpl_data)
if minutes:
return pd.Series({(m[pp["opponent_team"]],
pp["minutes"]):pp["total_points"]
for pp in p}).sort_index()
else:
return pd.Series({m[pp["opponent_team"]]:pp["total_points"]
for pp in p}).sort_index()
def score_detail(fpl_data):
"""
convert fpl_data into Series
Index- multi-index of team, pos, player, opp, minutes
"""
l =[]
basic_index = ["player", "opp", "minutes"]
for i in range(len(fpl_data["elements"])):
ts=achived_from(fpl_data, i, True)
name = (fpl_data["elements"][i]["first_name"]+
fpl_data["elements"][i]["second_name"])
if len(ts)==0:
continue
ts=pd.concat([ts,], keys=[name], names=basic_index)
ele = pos_map(fpl_data)[fpl_data["elements"][i]['element_type']]
ts=pd.concat([ts,], keys=[ele], names=["pos"]+basic_index)
team = team_map(fpl_data)[fpl_data["elements"][i]['team']]
ts=pd.concat([ts,], keys=[team], names=["team", "pos"]+basic_index)
l.append(ts)
return pd.concat(l)
| 30.026316
| 76
| 0.566754
|
860a3d13b69b3111148a6a5b637df62dbe91b5e8
| 1,733
|
py
|
Python
|
scprojects/migrations/0016_auto_20200123_1721.py
|
shescoding/projects-platform-backend
|
b5ebce71e2377970283da0f8f3ddd7dae201c80e
|
[
"MIT"
] | 2
|
2020-10-11T07:51:49.000Z
|
2021-05-12T15:04:38.000Z
|
scprojects/migrations/0016_auto_20200123_1721.py
|
shescoding/projects-platform-backend
|
b5ebce71e2377970283da0f8f3ddd7dae201c80e
|
[
"MIT"
] | 20
|
2019-08-25T22:18:25.000Z
|
2022-02-10T09:04:47.000Z
|
scprojects/migrations/0016_auto_20200123_1721.py
|
shescoding/projects-platform-backend
|
b5ebce71e2377970283da0f8f3ddd7dae201c80e
|
[
"MIT"
] | 2
|
2020-09-26T22:27:58.000Z
|
2020-10-01T17:33:43.000Z
|
# Generated by Django 2.2.6 on 2020-01-23 17:21
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('scprojects', '0015_project_lead'),
]
operations = [
migrations.AddField(
model_name='project',
name='contributors',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='userprofile',
name='avatar_url',
field=models.URLField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='userprofile',
name='experience_lvl',
field=models.PositiveSmallIntegerField(blank=True),
),
migrations.AlterField(
model_name='userprofile',
name='github_id',
field=models.PositiveIntegerField(blank=True),
),
migrations.AlterField(
model_name='userprofile',
name='github_url',
field=models.URLField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='userprofile',
name='github_username',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='userprofile',
name='gravatar_url',
field=models.URLField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='userprofile',
name='position',
field=models.CharField(blank=True, max_length=255),
),
]
| 30.946429
| 70
| 0.587421
|
4cb9b9575c3b5a173e3d8f6278db1e96d0ac5a27
| 498
|
py
|
Python
|
rpi/helper.py
|
GeorgeShao/HomeNode
|
259295ff8715ebe4348d5098d32fb6bbc60a8a7a
|
[
"MIT"
] | null | null | null |
rpi/helper.py
|
GeorgeShao/HomeNode
|
259295ff8715ebe4348d5098d32fb6bbc60a8a7a
|
[
"MIT"
] | null | null | null |
rpi/helper.py
|
GeorgeShao/HomeNode
|
259295ff8715ebe4348d5098d32fb6bbc60a8a7a
|
[
"MIT"
] | null | null | null |
"""
Helper file for random functions
"""
def format_serial_data(data_string):
data_list = data_string.strip().replace('\n','').replace('\r','/').replace('\\','').split('/')
data_dict = {}
for index, value in enumerate(data_list):
if index % 2 == 0 and index + 1 < len(data_list):
if data_list[index + 1] != "":
data_dict[data_list[index]] = float(data_list[index+1].replace('\\','').strip())
return data_dict
| 38.307692
| 102
| 0.546185
|
16d1cab34aebbde17303856576113eb5e3a47f3f
| 258
|
py
|
Python
|
Networking/Packets/Incoming/BuyResultPacket.py
|
henriquelino/pyrelay
|
b448cca3accc9a566616b756a03958ba096a5ebf
|
[
"MIT"
] | 26
|
2020-07-24T05:47:02.000Z
|
2022-03-31T16:03:13.000Z
|
Networking/Packets/Incoming/BuyResultPacket.py
|
henriquelino/pyrelay
|
b448cca3accc9a566616b756a03958ba096a5ebf
|
[
"MIT"
] | 17
|
2020-07-27T08:11:19.000Z
|
2022-03-29T05:26:16.000Z
|
Networking/Packets/Incoming/BuyResultPacket.py
|
henriquelino/pyrelay
|
b448cca3accc9a566616b756a03958ba096a5ebf
|
[
"MIT"
] | 16
|
2021-01-20T14:30:37.000Z
|
2022-03-18T05:31:51.000Z
|
class BuyResultPacket:
def __init__(self):
self.type = "BUYRESULT"
self.result = 0
self.resultString = ""
def read(self, reader):
self.result = reader.readInt32()
self.resultString = reader.readStr()
| 25.8
| 45
| 0.581395
|
f4ecda7c082bcfe8f3fabaee2dfad28342f7c446
| 896
|
py
|
Python
|
Python/climbingStairs.py
|
dianeyeo/LeetCode
|
b814831e7a4296a4e95785b75ea5c540a3fca63d
|
[
"MIT"
] | null | null | null |
Python/climbingStairs.py
|
dianeyeo/LeetCode
|
b814831e7a4296a4e95785b75ea5c540a3fca63d
|
[
"MIT"
] | null | null | null |
Python/climbingStairs.py
|
dianeyeo/LeetCode
|
b814831e7a4296a4e95785b75ea5c540a3fca63d
|
[
"MIT"
] | null | null | null |
"""
https://leetcode.com/problems/climbing-stairs/
Difficulty: Easy
You are climbing a staircase. It takes n steps to reach the top.
Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
Example 1:
Input: n = 2
Output: 2
Explanation: There are two ways to climb to the top.
1. 1 step + 1 step
2. 2 steps
Example 2:
Input: n = 3
Output: 3
Explanation: There are three ways to climb to the top.
1. 1 step + 1 step + 1 step
2. 1 step + 2 steps
3. 2 steps + 1 step
Constraints:
1 <= n <= 45
"""
class Solution:
def climbStairs(self, n: int) -> int:
# using iteration
steps = [1,2] # can either take 1 or 2 steps
i = 2 # steps index
while i < n:
steps.append(steps[i-1] + steps[i-2]) # accessing the steps[i]
i += 1
return steps[n-1] # the nth number in steps
| 24.216216
| 96
| 0.618304
|
752107d49f9d1fb5b3b701b760135b110516d36e
| 2,296
|
py
|
Python
|
py/examples/header.py
|
orlandoojr1/wave
|
e86d0c87c6c67e510fb4e1fa571982ca0a09f33c
|
[
"Apache-2.0"
] | 1
|
2022-03-02T21:54:36.000Z
|
2022-03-02T21:54:36.000Z
|
py/examples/header.py
|
orlandoojr1/wave
|
e86d0c87c6c67e510fb4e1fa571982ca0a09f33c
|
[
"Apache-2.0"
] | null | null | null |
py/examples/header.py
|
orlandoojr1/wave
|
e86d0c87c6c67e510fb4e1fa571982ca0a09f33c
|
[
"Apache-2.0"
] | null | null | null |
# Header
# Use a header card to display a page #header.
# ---
from h2o_wave import site, ui
image = 'https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&h=750&w=1260'
commands = [
ui.command(name='profile', label='Profile', icon='Contact'),
ui.command(name='preferences', label='Preferences', icon='Settings'),
ui.command(name='logout', label='Logout', icon='SignOut'),
]
page = site['/demo']
page['header1'] = ui.header_card(
box='1 1 9 1',
title='Transparent header',
subtitle='And now for something completely different!',
image='https://wave.h2o.ai/img/h2o-logo.svg',
items=[
ui.button(name='btn1', label='Button 1'),
ui.button(name='btn2', label='Button 2'),
ui.button(name='btn3', label='Button 3'),
],
secondary_items=[ui.textbox(name='search', icon='Search', width='300px', placeholder='Search...')],
color='transparent'
)
page['header2'] = ui.header_card(
box='1 2 9 1',
title='Card color header',
subtitle='And now for something completely different!',
items=[ui.menu(image=image, items=commands)],
secondary_items=[
ui.button(name='btn1', label='Link 1', link=True),
ui.button(name='btn2', label='Link 2', link=True),
ui.button(name='btn3', label='Link 3', link=True),
],
nav=[
ui.nav_group('Menu', items=[
ui.nav_item(name='#menu/spam', label='Spam'),
ui.nav_item(name='#menu/ham', label='Ham'),
ui.nav_item(name='#menu/eggs', label='Eggs'),
]),
ui.nav_group('Help', items=[
ui.nav_item(name='#about', label='About'),
ui.nav_item(name='#support', label='Support'),
])
],
color='card',
)
page['header3'] = ui.header_card(
box='1 3 9 1',
title='Primary color header',
subtitle='And now for something completely different!',
icon='Cycling',
icon_color='$violet',
items=[ui.menu(icon='Add', items=commands)],
secondary_items=[
ui.tabs(name='menu', value='email', link=True, items=[
ui.tab(name='email', label='Mail', icon='Mail'),
ui.tab(name='events', label='Events', icon='Calendar'),
ui.tab(name='spam', label='Spam', icon='Heart'),
]),
]
)
page.save()
| 35.323077
| 103
| 0.598432
|
161e7f199ddc24a0ec1c2c2acc07b3343b47d558
| 13,889
|
py
|
Python
|
notebooks/src/code/data/base.py
|
verdimrc/amazon-textract-transformer-pipeline
|
f3ae99ec3b8808d9edf7bc5ac003494cf1548293
|
[
"MIT-0"
] | 22
|
2021-11-10T17:16:10.000Z
|
2022-03-31T19:39:50.000Z
|
notebooks/src/code/data/base.py
|
verdimrc/amazon-textract-transformer-pipeline
|
f3ae99ec3b8808d9edf7bc5ac003494cf1548293
|
[
"MIT-0"
] | 4
|
2021-11-03T03:45:51.000Z
|
2022-01-28T03:30:57.000Z
|
notebooks/src/code/data/base.py
|
verdimrc/amazon-textract-transformer-pipeline
|
f3ae99ec3b8808d9edf7bc5ac003494cf1548293
|
[
"MIT-0"
] | 4
|
2021-12-14T22:41:40.000Z
|
2022-02-04T15:30:10.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""Base/common task data utilities for Amazon Textract + LayoutLM
This module defines utilities common across the different task types (e.g. MLM, NER)
"""
# Python Built-Ins:
from dataclasses import dataclass
import json
from math import ceil
from numbers import Real
import os
import re
from typing import Callable, Dict, Generator, List, Optional, Tuple
# External Dependencies:
import numpy as np
import torch
from torch.utils.data import Dataset
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import EvalPrediction
import trp
# Local Dependencies:
from ..logging_utils import getLogger
logger = getLogger("data.base")
@dataclass
class TaskData:
"""Base data interface exposed by the different task types (MLM, NER, etc) to training scripts
Each new task module should implement a method get_task(data_args, tokenizer) -> TaskData
"""
train_dataset: Dataset
data_collator: Optional[Callable] = None
eval_dataset: Optional[Dataset] = None
metric_computer: Optional[Callable[[EvalPrediction], Dict[str, Real]]] = None
class ExampleSplitterBase:
"""Base interface for a dataset example splitter
In dense document processing individual pages may often be significantly longer than the
max_seq_len of a model - rendering simple truncation of the page a poor strategy. A splitter
defines a reproducible algorithm to split document/page text into multiple examples to stay
within the maximum sequence length supported by the model.
"""
@classmethod
def n_examples(cls, n_tokens: int, max_content_seq_len: int) -> int:
"""Calculate how many individual examples are available within a given (long) text source"""
raise NotImplementedError(
"ExampleSplitterBase child class %s must implement n_examples()" % cls
)
@classmethod
def split(
cls,
word_texts: List[str],
tokenizer: PreTrainedTokenizerBase,
max_content_seq_len: int,
) -> List[Tuple[int, int]]:
"""Find a set of (start, end) slices to split words for samples <= max_content_seq_len"""
# How do we split a tokenized? What makes sense to return?
raise NotImplementedError("ExampleSplitterBase child class %s must implement split()" % cls)
class NaiveExampleSplitter(ExampleSplitterBase):
"""Split sequences by word, and pull final sequence start forward if it comes up <50% max len
This algorithm produces examples by splitting tokens on word boundaries, extending each sample
until max_content_seq_len is filled. *IF* the final generated example is less than 50% of the
maximum tokens, its start index will be pulled forward to consume as many words as will fit.
Apart from this, there will be no overlap between examples.
"""
@classmethod
def n_examples(cls, n_tokens: int, max_content_seq_len: int) -> int:
return int(ceil(n_tokens / max_content_seq_len))
@classmethod
def split(
cls,
word_texts: List[str],
tokenizer: PreTrainedTokenizerBase,
max_content_seq_len: int,
) -> List[Tuple[int, int]]:
if not (word_texts and len(word_texts)):
return []
tokenized = tokenizer(word_texts, add_special_tokens=False, is_split_into_words=True)
# word_ids is List[Union[None, int]] mapping token index to word_texts index. In this case,
# since special tokens are turned off, there are no None entries.
word_ids = np.array(tokenized.word_ids(), dtype=int)
n_tokens_total = len(word_ids)
# Assuming word_ids is monotonically increasing (are there languages/tokenizers where it
# wouldn't?), we can find the tokens which start a new word by seeing when word_ids goes up:
token_is_new_word = np.diff(word_ids, prepend=-1) # (1 if token is new word, 0 otherwise)
word_start_ixs = np.squeeze(np.argwhere(token_is_new_word > 0), axis=1)
ix_start_word = 0
n_words = len(word_texts)
splits = []
while ix_start_word < n_words:
start_token = word_start_ixs[ix_start_word]
end_token = start_token
ix_end_word = ix_start_word
# Seek forward to include as many words as fit:
while ix_end_word < n_words:
next_ix_end_word = ix_end_word + 1
next_end_token = (
word_start_ixs[next_ix_end_word]
if next_ix_end_word < n_words
else n_tokens_total
)
if next_end_token - start_token > max_content_seq_len:
break
else:
ix_end_word = next_ix_end_word
end_token = next_end_token
# Extreme edge case:
# If the current word was longer than max_content_seq_len by itself, we need to skip it
# to avoid an infinite loop
if end_token == start_token:
logger.warning(
"Skipping individual 'word' which is longer than max_content_seq_len. "
"Something is probably wrong with your data prep. Got word '%s'"
% word_texts[ix_start_word]
)
ix_start_word += 1
continue
# If the resultant sample is short, also seek backward to add extra context:
if end_token - start_token < max_content_seq_len * 0.5:
while ix_start_word > 0:
next_ix_start_word = ix_start_word - 1
next_start_token = word_start_ixs[next_ix_start_word]
if end_token - next_start_token > max_content_seq_len:
break
else:
ix_start_word = next_ix_start_word
start_token = next_start_token
# Log the split and move on to find the next one
splits.append((ix_start_word, ix_end_word))
ix_start_word = ix_end_word
return splits
class TextractLayoutLMDatasetBase(Dataset):
"""Base class for PyTorch/Hugging Face dataset using Amazon Textract for LayoutLM-based models
The base dataset assumes fixed/known length, which typically requires analyzing the source data
on init - but avoids the complications of shuffling iterable dataset samples in a multi-process
environment, or introducing SageMaker Pipe Mode and RecordIO formats.
Source data is provided as a folder of Amazon Textract result JSONs, with an optional JSONLines
manifest file annotating the documents in case the task is supervised.
"""
def __init__(
self,
textract_path: str,
tokenizer: PreTrainedTokenizerBase,
manifest_file_path: Optional[str] = None,
textract_prefix: str = "",
max_seq_len: int = 512,
):
"""Initialize a TextractLayoutLMDatasetBase
Arguments
---------
textract_path : str
The local folder where Amazon Textract result JSONs (OCR outputs) are stored.
tokenizer : transformers.tokenization_utils_base.PreTrainedTokenizerBase
The tokenizer for the model to be used.
manifest_file_path : Optional[str]
Local path to a JSON-Lines Augmented Manifest File: Optional for self-supervised
tasks, but typically mandatory for tasks that use annotations (like entity
recognition).
textract_prefix : str
s3://... URI root prefix against which the files in `textract_path` are relative.
This is used to map `textract-ref` URIs given in the manifest file to local paths.
max_seq_len : int
The maximum number of tokens per sequence for the target model to be trained.
"""
if not os.path.isdir(textract_path):
raise ValueError("textract_path '%s' is not a valid folder" % textract_path)
if not textract_path.endswith("/"):
textract_path = textract_path + "/"
self.textract_path = textract_path
if manifest_file_path:
if os.path.isfile(manifest_file_path):
self.manifest_file_path = manifest_file_path
elif os.path.isdir(manifest_file_path):
contents = os.listdir(manifest_file_path)
if len(contents) == 1:
self.manifest_file_path = os.path.join(manifest_file_path, contents[0])
else:
json_contents = list(
filter(
lambda s: re.search(r"\.jsonl?$", s), map(lambda s: s.lower(), contents)
)
)
if len(json_contents) == 1:
self.manifest_file_path = os.path.join(
manifest_file_path,
json_contents[0],
)
else:
raise ValueError(
"Data manifest folder %s must contain exactly one file or exactly one "
".jsonl/.json file ...Got %s" % (manifest_file_path, contents)
)
else:
raise ValueError("Data manifest '%s' is not a local file or folder")
else:
self.manifest_file_path = manifest_file_path
self.textract_prefix = textract_prefix
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
def textract_s3uri_to_file_path(self, s3uri: str) -> str:
"""Map a textract-ref S3 URI from manifest to local file path, via textract_prefix"""
textract_s3key = s3uri[len("s3://") :].partition("/")[2]
if not textract_s3key.startswith(self.textract_prefix):
raise ValueError(
"Textract S3 URI %s object key does not start with provided "
"textract_prefix '%s'" % (s3uri, self.textract_prefix)
)
textract_relpath = textract_s3key[len(self.textract_prefix) :]
if textract_relpath.startswith("/"):
# Because os.path.join('anything', '/slash/prefixed') = '/slash/prefixed'
textract_relpath = textract_relpath[1:]
return os.path.join(self.textract_path, textract_relpath)
def dataset_inputs(self) -> Generator[dict, None, None]:
"""Generate the sequence of manifest items with textract-ref URIs resolved locally
Whether this dataset was instantiated with a manifest file (for annotations) or just as a
folder of Amazon Textract JSON files, this method will yield a sequence of dicts containing
{'textract-ref': str} resolved to the *local* path of the file, plus whatever other fields
were present unchanged (in a manifest).
"""
if self.manifest_file_path:
with open(self.manifest_file_path, "r") as f:
for linenum, line in enumerate(f, start=1):
logger.debug("Reading manifest line %s", linenum)
record = json.loads(line)
if "textract-ref" not in record:
raise ValueError(
f"Manifest line {linenum} missing required field 'textract-ref'"
)
else:
textract_ref = record["textract-ref"]
if textract_ref.lower().startswith("s3://"):
# Map S3 URI to local path:
textract_ref = self.textract_s3uri_to_file_path(textract_ref)
else:
# textract_fle_path in manifest isn't an S3 URI - assume rel to channel
if textract_ref.startswith("/"):
textract_ref = self.textract_path + textract_ref[1:]
else:
textract_ref = self.textract_path + textract_ref
# Check the resolved file path exists:
if not os.path.isfile(textract_ref):
raise ValueError(
"(Manifest line {}) could not find textract file {}".format(
linenum,
textract_ref,
)
)
record["textract-ref"] = textract_ref
yield record
else:
for currpath, _, files in os.walk(self.textract_path):
for file in files:
yield {"textract-ref": os.path.join(currpath, file)}
@classmethod
def parse_textract_file(cls, file_path: str) -> trp.Document:
"""Load an Amazon Textract result JSON file via the Textract Response Parser library"""
with open(file_path, "r") as f:
return trp.Document(json.loads(f.read()))
@property
def max_content_seq_len(self):
"""Maximum content tokens per sequence after discounting required special tokens
At this base level, datasets are assumed to have 2 special tokens: <CLS> (beginning of
example) and <SEP> (end of example).
"""
return self.max_seq_len - 2
@dataclass
class DummyDataCollator:
"""Data collator that just stacks tensors from inputs.
For use with Dataset classes where the tokenization and collation leg-work is already done and
HF's default "DataCollatorWithPadding" should explicitly *not* be used.
"""
def __call__(self, features):
return {k: torch.stack([f[k] for f in features]) for k in features[0]}
| 44.94822
| 100
| 0.614947
|
fd77ad4c2c32403dcb0f798fe9ead6ba293ba928
| 1,280
|
py
|
Python
|
setup.py
|
fgregg/centered-potts
|
8140d17dc908370aeeef01165c720861aab01c4f
|
[
"MIT"
] | 1
|
2017-05-02T10:40:15.000Z
|
2017-05-02T10:40:15.000Z
|
setup.py
|
fgregg/pseudolikelihood
|
8140d17dc908370aeeef01165c720861aab01c4f
|
[
"MIT"
] | 1
|
2016-10-06T22:06:38.000Z
|
2016-10-29T14:23:01.000Z
|
setup.py
|
fgregg/centered-potts
|
8140d17dc908370aeeef01165c720861aab01c4f
|
[
"MIT"
] | 1
|
2019-02-12T02:13:23.000Z
|
2019-02-12T02:13:23.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError :
raise ImportError("setuptools module required, please go to https://pypi.python.org/pypi/setuptools and follow the instructions for installing setuptools")
setup(
name='pseudolikelihood',
url='https://github.com/fgregg/psuedolikelihood',
version='0.1',
author='Forest Gregg',
author_email='fgregg@uchicago.edu',
description='Estimate models with categorical, coupled outcomes using pseudolikelihood',
packages=['pseudolikelihood'],
install_requires=['numpy', 'sklearn', 'scipy'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis'],
)
| 38.787879
| 159
| 0.653125
|
9005e528d095892a0674a73bae3e4886c682c0f3
| 937
|
py
|
Python
|
src/SnapSearch/error.py
|
liuyu81/SnapSearch-Client-Python
|
41857806c2b26f0537de2dcc23a145107a4ecd04
|
[
"MIT"
] | null | null | null |
src/SnapSearch/error.py
|
liuyu81/SnapSearch-Client-Python
|
41857806c2b26f0537de2dcc23a145107a4ecd04
|
[
"MIT"
] | null | null | null |
src/SnapSearch/error.py
|
liuyu81/SnapSearch-Client-Python
|
41857806c2b26f0537de2dcc23a145107a4ecd04
|
[
"MIT"
] | 1
|
2018-03-04T20:24:14.000Z
|
2018-03-04T20:24:14.000Z
|
# -*- coding: utf-8 -*-
"""
SnapSearch.error
~~~~~~~~~~~~~~~~
:copyright: 2014 by `SnapSearch <https://snapsearch.io/>`_
:license: MIT, see LICENSE for more details.
:author: `LIU Yu <liuyu@opencps.net>`_
:date: 2014/03/08
"""
class SnapSearchError(Exception):
"""
Common base class for all SnapSearch errros.
"""
def __init__(self, *args, **kwds):
super(SnapSearchError, self).__init__(*args)
self.__data = kwds
pass # void return
def __getattr__(self, name):
if name in self.__data:
return self.__data[name]
return getattr(super(SnapSearchError, self), name)
pass
class SnapSearchConnectionError(SnapSearchError):
"""
Cannot communicate with SnapSearch backend service.
"""
pass
class SnapSearchDependencyError(SnapSearchError):
"""
Cannot import package(s) required by SnapSearch.
"""
pass
| 21.295455
| 62
| 0.621131
|
b61b35930c17bb80f2c7918b376f37f9465a28ee
| 16
|
py
|
Python
|
nutcracker/tests/__init__.py
|
FXIhub/nutcracker
|
6725166fb3ac1e3ead717e5a57a76238e10a9049
|
[
"BSD-2-Clause"
] | 3
|
2017-04-30T18:00:19.000Z
|
2017-07-10T09:25:08.000Z
|
nutcracker/tests/__init__.py
|
FXIhub/nutcracker
|
6725166fb3ac1e3ead717e5a57a76238e10a9049
|
[
"BSD-2-Clause"
] | null | null | null |
nutcracker/tests/__init__.py
|
FXIhub/nutcracker
|
6725166fb3ac1e3ead717e5a57a76238e10a9049
|
[
"BSD-2-Clause"
] | 1
|
2020-12-17T20:03:10.000Z
|
2020-12-17T20:03:10.000Z
|
import test_all
| 8
| 15
| 0.875
|
93cfe8ee3acd6f9808d4467df1b81e4165c51e9e
| 2,268
|
py
|
Python
|
pushCube.py
|
nclslbrn/blender_script
|
19f8809826e9cfdc79422d815614f0834fa95930
|
[
"MIT"
] | 2
|
2019-11-29T23:44:05.000Z
|
2019-11-30T11:16:28.000Z
|
pushCube.py
|
nclslbrn/blender_script
|
19f8809826e9cfdc79422d815614f0834fa95930
|
[
"MIT"
] | null | null | null |
pushCube.py
|
nclslbrn/blender_script
|
19f8809826e9cfdc79422d815614f0834fa95930
|
[
"MIT"
] | null | null | null |
import bpy
import os
import sys
import bmesh # noqa
dir = os.path.dirname(bpy.data.filepath)
print(dir)
if dir not in sys.path:
sys.path.append(dir)
from functions.cleanScene import cleanScene # noqa: E731
from classes.Pool import Pool # noqa: E731
D = bpy.data
C = bpy.context
# Delete everythings in the scene
cleanScene('MESH')
# Your creative code here
N = 52
width = Pool(maxItems=N)
width.update()
height = []
depth = []
for i in range(N):
height.append(Pool(maxItems=N))
height[i].update()
for j in range(N):
depth.append(Pool(maxItems=N))
depth[i*N + j].update()
# Create a default
mesh = bpy.data.meshes.new('Voxel')
basic_cube = bpy.data.objects.new('original-voxel', mesh)
basic_cube.location = (0, 0, 0)
# Add the object into the scene.
# C.scene.collection.objects.link(basic_cube)
# Construct the bmesh cube and assign it to the blender mesh.
bm = bmesh.new()
bmesh.ops.create_cube(bm, size=0.25)
bm.to_mesh(mesh)
bm.free()
cubeID = 0
def cloneCube(position, size):
clone = basic_cube.copy()
clone.name = 'VoxCopy-' + str(cubeID)
# clone.data = basic_cube.data.copy()
clone.scale = size
clone.location = position
C.scene.collection.objects.link(clone)
x = 1
for nX in range(N):
y = 1
dx = width.items[nX]
for nY in range(N):
z = 1
dy = height[nX].items[nY]
for nZ in range(N):
dz = depth[(nY * N) + nX].items[nZ]
if dx > 0 and dy > 0 and dz > 0:
# top left front
cloneCube((x, y, z), (dx, dy, dz))
# top right front
cloneCube((-x, y, z), (dx, dy, dz))
# bottom left front
cloneCube((x, -y, z), (dx, dy, dz))
# top right front
cloneCube((-x, -y, z), (dx, dy, dz))
# top left back
cloneCube((x, y, -z), (dx, dy, dz))
# top right back
cloneCube((-x, y, -z), (dx, dy, dz))
# bottom left back
cloneCube((x, -y, -z), (dx, dy, dz))
# top right back
cloneCube((-x, -y, -z), (dx, dy, dz))
cubeID += 1
z -= dz
y -= dy
x -= dx
| 22.909091
| 61
| 0.541887
|
69e3c694af947c49135bc8343511b0d30e289759
| 24,587
|
py
|
Python
|
bindings/java/c_generator.py
|
nirbheek/openwebrtc
|
838d6eedf2b4e53224a60f3da8529e6cc621359f
|
[
"BSD-2-Clause"
] | null | null | null |
bindings/java/c_generator.py
|
nirbheek/openwebrtc
|
838d6eedf2b4e53224a60f3da8529e6cc621359f
|
[
"BSD-2-Clause"
] | null | null | null |
bindings/java/c_generator.py
|
nirbheek/openwebrtc
|
838d6eedf2b4e53224a60f3da8529e6cc621359f
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2014, Ericsson AB. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import config
from functools import partial
from collections import defaultdict
from itertools import imap
from java_type_signatures import type_signatures
from base_generator import *
C = BaseGenerator(
default_line_prefix=config.C_INDENTATION,
)
def jni_param(param):
if param.jni_type:
return param.jni_type + ' ' + param.jni_name
return ()
def c_param(param):
if param.c_type:
return param.c_type + ' ' + param.c_name
return ()
def c_arg(param):
if param.c_type:
return param.c_name
return ()
def jni_arg(param):
if param.jni_type:
return param.jni_name
return ()
@add_to(C)
class Log(C.Lines):
def __init__(self, level, msg, *args):
self.msg = msg
self.args = args
self.level = level
def _make_logfunc(level):
@classmethod
def logfunc(cls, msg, *args):
return cls(level, msg, *args)
return logfunc
error = _make_logfunc('error')
warning = _make_logfunc('warning')
debug = _make_logfunc('debug')
info = _make_logfunc('info')
def __iter__(self):
yield 'log_%s("%s"%s);' % (self.level, self.msg, (', ' if self.args else '') + flatjoin(self.args, ', '))
@add_to(C)
class Assert(C.Lines):
def __init__(self, val):
self.val = val
def __iter__(self):
yield semi('g_assert(' + flatjoin(self.val, '') + ')')
@add_to(C)
class Throw(C.Lines):
def __init__(self, *args):
self.args = args
def __iter__(self):
yield 'THROW(' + flatjoin(self.args, '') + ');'
@add_to(C)
class ExceptionCheck(C.Lines):
def __init__(self, value):
self.value = value
def __iter__(self):
yield C.If(C.Env('ExceptionCheck'),
C.Log('warning', 'exception at %s:%d', '__FILE__', '__LINE__'),
C.Return(self.value),
)
@classmethod
def default(cls, value):
return cls(value.parent.return_value.default_value)
@add_to(C)
class CommentHeader(C.Comment):
def __iter__(self):
l = len(self.text)
yield '/**' + l * '*' + '**/'
yield '/* ' + self.text + ' */'
yield '/**' + l * '*' + '**/'
@add_to(C)
class Function(C.FunctionBlock):
modifiers = ['static']
def __init__(self,
name,
return_type='void',
params=None,
**kwargs):
super(Function, self).__init__(**kwargs)
self.name = name
self.return_type = return_type
self.params = params or []
@property
def start(self):
return [self.definition, '{']
@staticmethod
def callback(callback, body=None, **kwargs):
args = {
'return_type': callback.params.return_value.c_type,
'name': 'callback_' + callback.value.gir_type,
'params': map(c_param, callback.params),
'body': [TypeConversions.params_to_jni(callback.params, body=body or [], push_frame=True)],
}
if callback.params.return_value.name is not None:
args['body'] += [C.Return(callback.params.return_value.c_name)]
args.update(kwargs)
return C.Function(**args)
@add_to(C)
class JniExport(C.FunctionBlock):
modifiers = ['JNIEXPORT']
def __init__(self,
package=None,
clazz=None,
subclass=None,
method_name=None,
return_type='void',
params=None,
**kwargs):
super(JniExport, self).__init__(**kwargs)
self.package = package
self.clazz = clazz
self.subclass = subclass
self.method_name = method_name
self.return_type = return_type
self.java_params = params or []
@property
def name(self):
return '_'.join(prune_empty('Java',
self.package.replace('.', '_'),
self.clazz,
self.subclass,
self.method_name,
))
@property
def params(self):
return ['JNIEnv* env'] + self.java_params
@property
def start(self):
return [self.definition, '{']
@staticmethod
def default(function, body=[], **kwargs):
params = map(jni_param, function.params.java_params)
if function.params.instance_param is None:
params = ['jclass jclazz'] + params
else:
params = [jni_param(function.params.instance_param)] + params
args = {
'return_type': function.params.return_value.jni_type,
'method_name': function.name,
'params': params,
'body': [C.TypeConversions.params_to_c(function.params, body=body, get_env=False)],
}
if function.params.return_value.name is not None:
args['body'] += [C.Return(function.params.return_value.jni_name)]
args.update(kwargs)
return JniExport(**args)
@add_to(C)
class Helper(C.Call):
helper_functions = {}
used_helpers = []
def __init__(self, name, *args):
super(Helper, self).__init__(name, *args)
func = self.helper_functions.pop(name, None)
if func is not None:
self.used_helpers.append(func)
@classmethod
def add_helper(cls, name, func):
cls.helper_functions[name] = func
@classmethod
def enumerate_used_helpers(cls):
return cls.used_helpers
@add_to(C)
class Cache(C.Lines):
cached_classes = defaultdict(partial(defaultdict, dict))
def __init__(self, *args):
self.args = list(args)
def __iter__(self):
yield 'cache_' + flatjoin(self.args, '_')
@classmethod
def clazz(cls, *args):
classname = flatjoin(args, '$')
cls.cached_classes[type_signatures[classname]['_path']]
return cls(*args)
def _make_cacher(func):
@classmethod
def cacher(cls, *args):
methodname = args[-1]
signatures = type_signatures[flatjoin(args[:-1], '$')]
cls.cached_classes[signatures['_path']][func][methodname] = signatures[methodname]
return cls(*args)
return cacher
method = _make_cacher('GetMethodID')
static_method = _make_cacher('GetStaticMethodID')
field = _make_cacher('GetFieldID')
static_field = _make_cacher('GetStaticFieldID')
@classmethod
def default_class(cls, clazz):
cls.cached_classes[clazz.java_class_path]
return cls(clazz.java_type)
@classmethod
def default_method(cls, func):
val = func.value
args = None
if hasattr(val, 'outer_java_type'):
args = [val.outer_java_type, val.java_type, func.name]
else:
args = [val.java_type, func.name]
cls.cached_classes[val.java_class_path]['GetMethodID'][func.name] = func.method_signature
return cls(*args)
@classmethod
def default_enum_member(cls, enum, member):
typ = enum.type
if hasattr(enum.type, 'inner_type'):
typ = enum.type.inner_type
cls.cached_classes[typ.java_class_path]['GetStaticFieldID'][member.name] = typ.java_signature
return cls(enum.name, member.name)
@classmethod
def enumerate_cached_classes(cls):
cache_declarations = []
jni_onload_cache = []
for classpath, clazz in Cache.cached_classes.items():
classname = classpath[classpath.rfind('/')+1:]
to_cache_var = lambda *args: '_'.join(['cache'] + classname.split('$') + list(args))
classvar = to_cache_var()
cache_declarations += [C.Decl('static jclass', classvar)]
jni_onload_cache += [
C.Assign(classvar, C.Env('FindClass', quot(classpath))),
C.ExceptionCheck('0'),
C.Assign(classvar, C.Env('NewGlobalRef', classvar)),
C.ExceptionCheck('0'),
]
for getfunc, method in clazz.items():
var_type = 'jmethodID' if 'Method' in getfunc else 'jfieldID'
for methodname, signature in method.items():
methodvar = to_cache_var(methodname)
if methodname == '_constructor':
methodname = '<init>'
cache_declarations += [C.Decl('static ' + var_type, methodvar)]
jni_onload_cache += [
C.Log('debug', 'getting %s.%s', quot(classname), quot(methodname)),
C.Assign(methodvar, C.Env(getfunc, classvar, quot(methodname), quot(signature))),
C.ExceptionCheck('0'),
]
cache_declarations.append('')
jni_onload_cache.append('')
return cache_declarations[:-1], jni_onload_cache[:-1]
@add_to(C)
class Env(C.Lines):
return_type_table = {
'V': 'Void',
';': 'Object',
'Z': 'Boolean',
'B': 'Byte',
'C': 'Char',
'S': 'Short',
'I': 'Int',
'J': 'Long',
'F': 'Float',
'D': 'Double',
}
def __init__(self, name, *args):
self.name = name
self.args = args
@staticmethod
def tuple_to_type(args):
clazz = type_signatures[flatjoin(args[:-1], '$')]
method = clazz[args[-1]]
return Env.return_type_table[method[-1]]
@classmethod
def method(cls, name, method_tuple, *args):
return cls('Call' + Env.tuple_to_type(method_tuple) + 'Method', name, C.Cache.method(*method_tuple), *args)
@classmethod
def static_method(cls, method_tuple, *args):
return cls('CallStatic' + Env.tuple_to_type(method_tuple) + 'Method', C.Cache.clazz(method_tuple[:-1]), C.Cache.static_method(*method_tuple), *args)
@classmethod
def field(cls, name, field_tuple):
return cls('Get' + Env.tuple_to_type(field_tuple) + 'Field', name, C.Cache.field(*field_tuple))
@classmethod
def new(cls, clazz, *args):
return cls('NewObject', C.Cache.clazz(clazz), C.Cache.method(clazz, '_constructor'), *args)
@classmethod
def throw(cls, clazz, msg):
return cls('ThrowNew', C.Cache.clazz(clazz), msg)
@classmethod
def callback(cls, callback):
type = Env.return_type_table[callback.params.return_value.java_signature[-1]]
cached = None
if hasattr(callback.value, 'outer_java_type'):
cached = (callback.value.outer_java_type, callback.value.java_type, callback.name)
else:
cached = (callback.value.java_type, callback.name)
return cls('Call' + type + 'Method',
map(jni_arg, callback.params.closure_params),
C.Cache.default_method(callback),
*map(jni_arg, callback.params.java_params)
)
def __iter__(self):
yield semi('(*env)->{name}({args})'.format(
name=self.name,
args=flatjoin(['env'] + list(flatten(self.args)), ', '),
))
@add_to(C)
class TypeConversions(C.Lines):
def __init__(self, conversions, return_conversion, body=None, get_env=True, push_frame=False, **kwargs):
super(TypeConversions, self).__init__(**kwargs)
self.conversions = list(conversions)
self.return_conversion = return_conversion
self.body = body or []
self.get_env = get_env
self.push_frame = push_frame
def __iter__(self):
conversion = [
prune_empty([p.declarations for p in self.conversions] + [self.get_env and C.Decl('JNIEnv*', 'env')]),
self.get_env and C.Assign('env', C.Call('get_jni_env')),
C.If(Env('PushLocalFrame', str(config.LOCAL_FRAME_SIZE)),
C.Log('warning', 'failed to push local frame at %s:%d', '__FILE__', '__LINE__')
) if self.push_frame else [],
prune_empty([p.conversion for p in self.conversions]),
self.body,
prune_empty(p.cleanup for p in reversed(self.conversions)),
Env('PopLocalFrame', 'NULL') if self.push_frame else [],
]
if self.return_conversion is not None:
conversion = [self.return_conversion.declarations] + conversion + [
self.return_conversion.conversion, self.return_conversion.cleanup,
]
return iter(intersperse(prune_empty(conversion), ''))
@staticmethod
def params_to_c(params, **kwargs):
ret = params.return_value
return TypeConversions([param.transform_to_c() for param in params],
ret.transform_to_jni() if ret.name is not None else None, **kwargs)
@staticmethod
def params_to_jni(params, **kwargs):
ret = params.return_value
return TypeConversions([param.transform_to_jni() for param in params],
ret.transform_to_c() if ret.name is not None else None, **kwargs)
def make_function_gen(package, classname):
def gen(function):
call = C.Call(function.c_name, map(c_arg, function.params))
ret = function.params.return_value
if ret.name is not None:
call = C.Assign(ret.c_name, call)
out = JniExport.default(function, package=package, clazz=classname, body=call)
if ret.name is not None:
out.body = [C.Decl(ret.c_type, ret.c_name)] + out.body
return out
return gen
def make_callback_gen(package, classname):
def gen(callback):
call = C.Env.callback(callback)
ret = callback.params.return_value
if ret.name is not None:
call = C.Assign(ret.jni_name, call)
out = C.Function.callback(callback, package=package, clazz=classname, body=call)
if ret.name is not None:
out.body = [C.Decl(ret.jni_type, ret.jni_name)] + out.body
return out
return gen
def make_signal_accessors_gen(package, classname):
def gen(signal):
connect_args = map(c_arg, signal.add_listener.params)
connect_args[0] = 'G_OBJECT(' + connect_args[0] + ')'
connect_args.insert(1, quot(signal.signal_name))
connect_args += [C.Helper('jobject_wrapper_closure_notify').name, '0']
ret = signal.add_listener.params.return_value
connecter = C.JniExport.default(signal.add_listener, package=package, clazz=classname,
body=[C.Assign(ret.c_name, C.Call('g_signal_connect_data', connect_args))],
)
connecter.body = [C.Decl(ret.c_type, ret.c_name)] + connecter.body
disconnect_args = map(c_arg, signal.remove_listener.params)
disconnect_args[0] = 'G_OBJECT(' + disconnect_args[0] + ')'
disconnecter = C.JniExport.default(signal.remove_listener, package=package, clazz=classname,
body=C.Call('g_signal_handler_disconnect', disconnect_args),
)
return [connecter, disconnecter]
return gen
def gen_class(package, clazz):
body = [C.CommentHeader(clazz.name)]
gen_signal_accessors = make_signal_accessors_gen(package, clazz.name)
for attr in ['constructors', 'functions', 'methods']:
body += [C.Comment(attr) if getattr(clazz, attr) else None]
body += map(make_function_gen(package, clazz.name), getattr(clazz, attr))
body += [C.Comment('signals') if clazz.signals else None]
body += map(make_callback_gen(package, clazz.name), clazz.signals)
body += map(gen_signal_accessors, clazz.signals)
body += [C.Comment('properties') if clazz.properties else None]
for prop in clazz.properties:
body += [C.Comment(prop.name)]
if prop.readable:
# getter
ret = prop.getter.params.return_value
get_params = map(c_arg, prop.getter.params) + [quot(prop.name), '&' + ret.c_name, 'NULL']
func = C.JniExport.default(prop.getter, package=package, clazz=clazz.name, body=[
C.Call('g_object_get', get_params),
])
if ret.name is not None:
func.body = [C.Decl(ret.c_type, ret.c_name)] + func.body
body.append(func)
# change listener
transform = ret.transform_to_jni()
func = C.Function(
package=package,
clazz=clazz.name,
name='callback_' + prop.signal.value.gir_type,
return_type=prop.signal.params.return_value.c_type,
params=map(c_param, prop.signal.params),
body=[TypeConversions([p.transform_to_jni() for p in prop.signal.params.params], None, push_frame=True, body=[
'(void) c_pspec;',
C.Call('g_object_get', get_params),
transform.conversion,
C.Env.callback(prop.signal),
transform.cleanup,
])],
)
func.body = [
C.Decl(ret.c_type, ret.c_name),
transform.declarations,
] + func.body
body.append(func)
body += gen_signal_accessors(prop.signal)
if prop.writable:
# setter
ret = prop.setter.params.return_value
params = map(c_arg, prop.setter.params)
params.insert(1, quot(prop.name))
params.append('NULL')
func = C.JniExport.default(prop.setter, package=package, clazz=clazz.name, body=[
C.Call('g_object_set', params)
])
body += [func]
return intersperse(prune_empty(body), '')
def gen_namespace(namespace, package):
body = []
package = package + '.' + namespace.symbol_prefix
body += map(make_callback_gen(package, namespace.identifier_prefix), namespace.callbacks)
body += map(make_function_gen(package, namespace.identifier_prefix), namespace.functions)
body += map(partial(gen_class, package), namespace.classes)
return body
def add_helpers(namespace):
for enum in namespace.enums:
C.Helper.add_helper(enum.name + '_to_java_enum',
C.Function(enum.name + '_to_java_enum',
return_type='jobject',
params=['JNIEnv* env', enum.type.c_type + ' value'],
body=[
C.Decl('jfieldID', 'fieldId'),
C.Decl('jobject', 'result'),
'',
C.Switch('value', cases=[
(member.c_name, C.Assign('fieldId', C.Cache.default_enum_member(enum, member)))
for member in enum.members
]),
'',
C.Assert('fieldId'),
C.Assign('result', Env('GetStaticObjectField', C.Cache(enum.name), 'fieldId')),
C.ExceptionCheck('NULL'),
C.Return('result'),
]
)
)
def gen_source(namespaces, include_headers):
body = []
package = config.PACKAGE_ROOT
for namespace in namespaces:
add_helpers(namespace)
for namespace in namespaces:
body += gen_namespace(namespace, package)
jobject_wrapper_struct = C.Block(
_start = 'typedef union {',
body = [
C.Decl('jobject', 'obj'),
C.Decl('jweak', 'weak'),
],
_end = '} JObjectWrapper;',
)
native_destructor = [C.JniExport(
package=package,
clazz='NativeInstance',
method_name='nativeDestructor',
return_type='void',
params=['jclass clazz', 'jlong instance_pointer'],
body=[
C.Decl('GWeakRef*', 'ref'),
C.Decl('GObject*', 'gobj'),
C.Decl('JObjectWrapper*', 'wrapper'),
'(void) clazz;',
'',
C.Assign('ref', 'instance_pointer', cast='GWeakRef*'),
C.Assign('gobj', C.Call('g_weak_ref_get', 'ref')),
C.Call('g_weak_ref_clear', 'ref'),
C.Call('g_free', 'ref'),
'',
C.If('!gobj',
C.Env.throw('IllegalStateException', '"GObject ref was NULL at finalization"'),
C.Return()),
C.Log('debug', 'unrefing GObject[%p]', 'gobj'),
C.Assign('wrapper', C.Call('g_object_get_data', 'gobj', '"java_instance"'), cast='JObjectWrapper*'),
C.If('wrapper', [
C.Call('g_object_set_data', 'gobj', '"java_instance"', 'NULL'),
C.Helper('jobject_wrapper_destroy', 'wrapper', 'TRUE'),
]),
C.Call('g_object_unref', 'gobj'),
]),
]
helper_functions = Helper.enumerate_used_helpers()
# cached classes need to be enumerated last
cache_declarations, jni_onload_cache = C.Cache.enumerate_cached_classes()
jni_onload = Function(
name='JNI_OnLoad',
return_type='jint',
params=['JavaVM* vm', 'void* reserved'],
modifiers=[],
body=[
C.Decl('JNIEnv*', 'env'),
'',
C.Assign('jvm', 'vm'),
C.Assign('env', C.Call('get_jni_env')),
'',
jni_onload_cache,
'',
C.Return('JNI_VERSION_1_6'),
]
)
include_headers = ['jni.h', 'android/log.h'] + include_headers
includes = '\n'.join('#include <' + h + '>' for h in include_headers)
body = [
includes,
HEADER,
cache_declarations,
GET_JNI_ENV,
jni_onload,
jobject_wrapper_struct,
] + helper_functions + [native_destructor] + body
body = intersperse(prune_empty(body), '')
return flatjoin(body, '\n')
HEADER = """
#define android_assert(st) if (!(st)) {{ __android_log_write(ANDROID_LOG_ERROR, "OpenWebRTC", "Assertion failed at "G_STRINGIFY(__LINE__));}}
#undef g_assert
#define g_assert android_assert
#define log_verbose(st, ...) __android_log_print(ANDROID_LOG_VERBOSE, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__);
#define log_debug(st, ...) __android_log_print(ANDROID_LOG_DEBUG, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__);
#define log_info(st, ...) __android_log_print(ANDROID_LOG_INFO, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__);
#define log_warning(st, ...) __android_log_print(ANDROID_LOG_WARN, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__);
#define log_error(st, ...) __android_log_print(ANDROID_LOG_ERROR, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__);
""".format(config.LOG_TAG)
GET_JNI_ENV = [
C.Decl('static JavaVM*', 'jvm'),
'',
C.Function('get_jni_env',
return_type='JNIEnv*',
params=[],
body=[
C.Decl('JNIEnv*', 'env'),
C.Decl('int', 'ret'),
'',
C.Assign('env', 'NULL'),
C.Assign('ret', C.Call('(*jvm)->GetEnv', 'jvm', '(void**)&env', 'JNI_VERSION_1_6')),
'',
C.IfElse(ifs=['ret == JNI_EDETACHED', 'ret == JNI_EVERSION'],
bodies=[
C.IfElse(ifs=['(*jvm)->AttachCurrentThread(jvm, (JNIEnv**) &env, NULL) != 0'],
bodies=[
C.Log.error('JNI: failed to attach thread'),
C.Log.info('JNI: successfully attached to thread'),
]),
C.Log.error('JNI: version not supported'),
]
),
'',
C.Assert('env'),
C.Return('env'),
]
),
]
| 35.07418
| 156
| 0.591288
|
f7ce2354a30ac2c192f9ee8b0bd81398ee83201c
| 1,913
|
py
|
Python
|
apps/department/models.py
|
xiaozhi-12121/Django_web
|
4d54b205542c52b8bd8309eaedc16fcdee405273
|
[
"Apache-2.0"
] | null | null | null |
apps/department/models.py
|
xiaozhi-12121/Django_web
|
4d54b205542c52b8bd8309eaedc16fcdee405273
|
[
"Apache-2.0"
] | 2
|
2020-05-12T01:15:38.000Z
|
2020-05-12T01:15:38.000Z
|
apps/department/models.py
|
xiaozhi-12121/Django_web
|
4d54b205542c52b8bd8309eaedc16fcdee405273
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# __author__ : stray_camel
# __description__ : 用户部门管理等
# __REFERENCES__ :
# __date__: 2020/09/28 09
from django.db import models
from django.conf import settings
from datetime import datetime
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class Department(models.Model):
class Meta:
verbose_name = """部门信息管理"""
verbose_name_plural = verbose_name
db_table = "department_message"
parent_department = models.IntegerField(
verbose_name=u"父类部门id", null=True, blank=True)
name = models.CharField(max_length=20, verbose_name=u"部门名称", default="")
manager = models.IntegerField(verbose_name=u"部门经理", null=True, blank=True)
def __str__(self):
return self.name
class Staff(models.Model):
class Meta:
verbose_name = """员工信息管理"""
verbose_name_plural = verbose_name
db_table = "staff_message"
department = models.IntegerField(verbose_name="部门", null=True, blank=True)
name = models.CharField(max_length=20, verbose_name=u"员工姓名")
email = models.EmailField(
default='straycamel@straycamel.com', verbose_name=u"邮箱")
gradSchool = models.CharField(max_length=20, verbose_name=u"毕业学校")
address = models.CharField(max_length=50, verbose_name=u"住址", default='2')
sex = models.CharField(max_length=10, choices=(
('female', u'女'), ('male', u'男')), verbose_name=u"性别")
age = models.IntegerField(verbose_name=u"年龄")
birthday = models.DateField(verbose_name=u"生日")
tel = models.CharField(max_length=20, verbose_name=u"手机号")
salary_num = models.IntegerField(default=0, verbose_name=u"薪资")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"入职时间")
user = models.IntegerField(blank=True, null=False)
is_activate = models.BooleanField(default=True)
def __str__(self):
return self.name
| 38.26
| 79
| 0.701516
|
d7cc12445229b4aab2cf21a1859676a492396478
| 7,723
|
py
|
Python
|
scripts/smile.py
|
hummat/occupancy_networks
|
c7b89d58f3839fb56df53c37288d22c33529aeac
|
[
"MIT"
] | null | null | null |
scripts/smile.py
|
hummat/occupancy_networks
|
c7b89d58f3839fb56df53c37288d22c33529aeac
|
[
"MIT"
] | null | null | null |
scripts/smile.py
|
hummat/occupancy_networks
|
c7b89d58f3839fb56df53c37288d22c33529aeac
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import torch
import trimesh
from im2mesh import config
from im2mesh.utils import binvox_rw, voxels
from im2mesh.checkpoints import CheckpointIO
from im2mesh.utils.visualize import visualize_pointcloud, visualize_voxels
def load_binvox(file_path: str):
with open(file_path, "rb") as f:
voxels_in = binvox_rw.read_as_3d_array(f)
return voxels_in.data.astype(np.float32)
def load_pointcloud(file_path):
pointcloud_dict = np.load(file_path)
return pointcloud_dict['points'].astype(np.float32)
def load_mesh(file_path: str, process: bool = True, padding: float = 0.1):
mesh = trimesh.load(file_path, process=False)
if process:
total_size = (mesh.bounds[1] - mesh.bounds[0]).max()
scale = total_size / (1 - padding)
centers = (mesh.bounds[1] + mesh.bounds[0]) / 2
mesh.apply_translation(-centers)
mesh.apply_scale(1 / scale)
return mesh
def process_mesh(mesh, padding: float = 0, flip_yz: bool = False, with_transforms: bool = False):
bbox = mesh.bounding_box.bounds
loc = (bbox[0] + bbox[1]) / 2
scale = (bbox[1] - bbox[0]).max() / (1 - padding)
mesh.apply_translation(-loc)
mesh.apply_scale(1 / scale)
if flip_yz:
angle = 90 / 180 * np.pi
R = trimesh.transformations.rotation_matrix(angle, [1, 0, 0])
mesh.apply_transform(R)
if with_transforms:
return mesh, loc, scale
return mesh
def visualize_all(file_path):
visualize_pointcloud(load_pointcloud(os.path.join(file_path, "points.npz")), show=True)
visualize_voxels(load_binvox(os.path.join(file_path, "model.binvox")), show=True)
def visualize_from_mesh(file_path: str, flip_yz: bool = False, use_trimes: bool = False):
mesh = load_mesh(file_path)
mesh, loc, scale = process_mesh(mesh, flip_yz=flip_yz, with_transforms=True)
pointcloud = mesh.sample(2048).astype(np.float32)
if use_trimes:
voxel = trimesh.exchange.binvox.voxelize_mesh(mesh,
dimension=32,
remove_internal=False,
center=True,
binvox_path="/home/matthias/Downloads/binvox")
binvox = trimesh.exchange.binvox.export_binvox(voxel) # Writes in 'xzy' format by default
with open("viz.binvox", "wb") as f:
f.write(binvox)
else:
voxels_occ = voxels.voxelize_ray(mesh, 32)
voxels_out = binvox_rw.Voxels(voxels_occ, (32,) * 3,
translate=loc, scale=scale,
axis_order="xyz") # 'xyz' means 'voxel_occ' is in this format
with open("viz.binvox", "wb") as f:
voxels_out.write(f) # Always writes in 'xzy' format
with open("viz.binvox", "rb") as f:
voxels_in = binvox_rw.read_as_3d_array(f) # Expects data in 'xzy' format (otherwise set 'fix_coords' to 'False'
voxels_in = voxels_in.data.astype(np.float32)
visualize_pointcloud(pointcloud, show=True)
visualize_voxels(voxels_in, show=True)
def from_pointcloud(visualize=False):
path_prefix = "/home/matthias/Data/Ubuntu/git/occupancy_networks"
default_path = os.path.join(path_prefix, "configs/default.yaml")
model_path = os.path.join(path_prefix, "configs/pointcloud/onet_pretrained.yaml")
cfg = config.load_config(model_path, default_path)
device = torch.device("cuda")
mesh = load_mesh("/home/matthias/Data/Ubuntu/data/aae_workspace/models/case.ply")
# mesh = load_mesh(os.path.join(path_prefix, "data/ShapeNet.build/03797390/2_watertight/cc5b14ef71e87e9165ba97214ebde03.off"))
mesh = process_mesh(mesh, flip_yz=True)
points = mesh.sample(100000).astype(np.float32)
side = np.random.randint(3)
xb = [points[:, side].min(), points[:, side].max()]
length = np.random.uniform(0.7 * (xb[1] - xb[0]), (xb[1] - xb[0]))
ind = (points[:, side] - xb[0]) <= length
points = points[ind]
indices = np.random.randint(points.shape[0], size=300)
points = points[indices, :]
noise = 0.005 * np.random.randn(*points.shape)
noise = noise.astype(np.float32)
points = points + noise
if visualize:
# visualize_pointcloud(points, show=True)
trimesh.PointCloud(points).show()
data = {'inputs': torch.unsqueeze(torch.from_numpy(points), dim=0)}
model = config.get_model(cfg, device)
checkpoint_io = CheckpointIO("..", model=model)
# checkpoint_io.load(os.path.join(path_prefix, cfg['test']['model_file']))
checkpoint_io.load(cfg['test']['model_file'])
model.eval()
print(model)
generator = config.get_generator(model, cfg, device)
mesh = generator.generate_mesh(data, return_stats=False)
if visualize:
mesh.show()
else:
mesh.export("smile.off")
def from_voxel_grid(use_trimesh: bool = True):
path_prefix = "/home/matthias/Data/Ubuntu/git/occupancy_networks"
default_path = os.path.join(path_prefix, "configs/default.yaml")
model_path = os.path.join(path_prefix, "configs/voxels/onet_pretrained.yaml")
cfg = config.load_config(model_path, default_path)
device = torch.device("cuda")
# mesh = load_mesh("/home/matthias/Data/Ubuntu/data/aae_workspace/models/case.ply")
# mesh = load_mesh(os.path.join(path_prefix, "data/ShapeNet.build/02876657/2_watertight/1ae823260851f7d9ea600d1a6d9f6e07.off"))
# mesh, loc, scale = process_mesh(mesh, with_transforms=True, flip_yz=False)
# assert mesh.is_watertight
#
# if use_trimesh:
# voxel = trimesh.exchange.binvox.voxelize_mesh(mesh,
# dimension=32,
# remove_internal=False,
# center=True,
# binvox_path="/home/matthias/Downloads/binvox")
#
# binvox = trimesh.exchange.binvox.export_binvox(voxel)
# with open("smile.binvox", "wb") as f:
# f.write(binvox)
# else:
# voxels_occ = voxels.voxelize_ray(mesh, 32)
# voxels_out = binvox_rw.Voxels(voxels_occ, (32,) * 3,
# translate=loc, scale=scale,
# axis_order="xyz") # 'xyz' means 'voxel_occ' is in this format
# with open("smile.binvox", "wb") as f:
# voxels_out.write(f) # Always writes in 'xzy' format
#
# with open("smile.binvox", "rb") as f:
# voxels_in = binvox_rw.read_as_3d_array(f)
# with open(os.path.join(path_prefix, "data/ShapeNet/02958343/1a0bc9ab92c915167ae33d942430658c/model.binvox"), "rb") as f:
# voxels_in = binvox_rw.read_as_3d_array(f)
#
# voxels_in = voxels_in.data.astype(np.float32)
# visualize_voxels(voxels_in, show=True)
# data = {'inputs': torch.unsqueeze(torch.from_numpy(voxels_in), dim=0)}
dataset = config.get_dataset('test', cfg, return_idx=True)
test_loader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=0, shuffle=True)
data = next(iter(test_loader))
visualize_voxels(data["voxels"][0].cpu().numpy(), show=True)
model = config.get_model(cfg, device, dataset)
checkpoint_io = CheckpointIO("..", model=model)
checkpoint_io.load(cfg['test']['model_file'])
model.eval()
generator = config.get_generator(model, cfg, device)
mesh = generator.generate_mesh(data, return_stats=False)
mesh.export("smile.off")
if __name__ == "__main__":
from_pointcloud(visualize=True)
| 39.403061
| 131
| 0.633692
|
a805c1bdda7c9aa19dd33076a8bc50217ba0950e
| 567
|
py
|
Python
|
zvt/recorders/sina/money_flow_recorder.py
|
ringwraith/zvt
|
ff5844ff7991132bbf38d464f29f461dba5efa14
|
[
"MIT"
] | 1
|
2019-08-24T02:26:51.000Z
|
2019-08-24T02:26:51.000Z
|
zvt/recorders/sina/money_flow_recorder.py
|
ringwraith/zvt
|
ff5844ff7991132bbf38d464f29f461dba5efa14
|
[
"MIT"
] | null | null | null |
zvt/recorders/sina/money_flow_recorder.py
|
ringwraith/zvt
|
ff5844ff7991132bbf38d464f29f461dba5efa14
|
[
"MIT"
] | 1
|
2020-05-16T09:42:02.000Z
|
2020-05-16T09:42:02.000Z
|
# -*- coding: utf-8 -*-
from zvt.recorders.recorder import TimeSeriesDataRecorder
class MoneyFlowRecorder(TimeSeriesDataRecorder):
url = 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/MoneyFlow.ssl_bkzj_zjlrqs?page=1&num=1000&sort=opendate&asc=0&bankuai=0%2Fnew_jrhy'
'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/MoneyFlow.ssl_bkzj_bk?page=1&num=20&sort=netamount&asc=0&fenlei=1'
'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/MoneyFlow.ssl_bkzj_bk?page=1&num=20&sort=netamount&asc=0&fenlei=0'
| 70.875
| 162
| 0.797178
|
8268f65c5fc0ff1344f218b5fa17ee701150edbb
| 1,072
|
py
|
Python
|
checkin.py
|
Andyvon230/glados_checkin_1426593702_qq_com
|
37e0c47aab198be2284927d9ee381fe684c17cbf
|
[
"MIT"
] | null | null | null |
checkin.py
|
Andyvon230/glados_checkin_1426593702_qq_com
|
37e0c47aab198be2284927d9ee381fe684c17cbf
|
[
"MIT"
] | null | null | null |
checkin.py
|
Andyvon230/glados_checkin_1426593702_qq_com
|
37e0c47aab198be2284927d9ee381fe684c17cbf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
import requests
import os
result = b'success\n'
# url
url = "https://glados.rocks/api/user/checkin"
# cookie
cookie = os.environ["COOKIE"]
payload = "{\"token\":\"glados_network\"}"
headers = {
'authority': 'glados.rocks',
'accept': 'application/json, text/plain, */*',
'dnt': '1',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.80 Safari/537.36',
'content-type': 'application/json;charset=UTF-8',
'origin': 'https://glados.rocks',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://glados.rocks/console/checkin',
'accept-language': 'zh-CN,zh;q=0.9',
'cookie': cookie
}
def do_action():
logger = logging.getLogger()
response = requests.request("POST", url, headers=headers, data = payload)
result = response.text.encode('utf8')
logger.info(result)
print(result)
return result
if __name__ == '__main__':
do_action()
| 28.972973
| 140
| 0.634328
|
54562e62dd3611d8d7d7adb9d186c063fde646ac
| 8,459
|
py
|
Python
|
software/scripts/epixHRGen1FD.py
|
ejangelico/cryo-on-epix-hr-dev
|
354bf205a67d3c43b4e815823dd78cec85d3b672
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-05-24T22:01:54.000Z
|
2021-05-24T22:01:54.000Z
|
software/scripts/epixHRGen1FD.py
|
ejangelico/cryo-on-epix-hr-dev
|
354bf205a67d3c43b4e815823dd78cec85d3b672
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-02-25T20:27:36.000Z
|
2021-03-31T17:55:08.000Z
|
software/scripts/epixHRGen1FD.py
|
ejangelico/cryo-on-epix-hr-dev
|
354bf205a67d3c43b4e815823dd78cec85d3b672
|
[
"BSD-3-Clause-LBNL"
] | 4
|
2020-10-21T21:39:37.000Z
|
2021-07-24T02:19:34.000Z
|
#!/usr/bin/env python3
#-----------------------------------------------------------------------------
# Title : ePix 10ka board instance
#-----------------------------------------------------------------------------
# File : epix10kaDAQ.py evolved from evalBoard.py
# Author : Ryan Herbst, rherbst@slac.stanford.edu
# Modified by: Dionisio Doering
# Created : 2016-09-29
# Last update: 2017-02-01
#-----------------------------------------------------------------------------
# Description:
# Rogue interface to ePix 10ka board
#-----------------------------------------------------------------------------
# This file is part of the rogue_example software. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue_example software, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import threading
import signal
import atexit
import yaml
import time
import sys
import argparse
import PyQt4.QtGui
import PyQt4.QtCore
import pyrogue.utilities.prbs
import pyrogue.utilities.fileio
import pyrogue.gui
import rogue.hardware.pgp
import rogue.hardware.data
import surf
import surf.axi
import surf.protocols.ssi
import ePixViewer as vi
import ePixFpga as fpga
from XilinxKcu1500Pgp3.XilinxKcu1500Pgp3 import *
# Set the argument parser
parser = argparse.ArgumentParser()
# Add arguments
parser.add_argument(
"--type",
type = str,
required = True,
help = "define the PCIe card type (either pgp-gen3 or kcu1500)",
)
parser.add_argument(
"--start_gui",
type = bool,
required = False,
default = True,
help = "true to show gui",
)
parser.add_argument(
"--verbose",
type = bool,
required = False,
default = True,
help = "true for verbose printout",
)
# Get the arguments
args = parser.parse_args()
# Add PGP virtual channels
if ( args.type == 'pgp-gen3' ):
# Create the PGP interfaces for ePix hr camera
pgpL0Vc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',1,0) # Data & cmds
pgpL0Vc1 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',1,1) # Registers for ePix board
pgpL0Vc2 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',1,2) # PseudoScope
pgpL0Vc3 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',1,3) # Monitoring (Slow ADC)
pgpL1Vc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',0,0) # Data (when using all four lanes it should be swapped back with L0)
pgpL2Vc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',2,0) # Data
pgpL3Vc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',3,0) # Data
print("")
print("PGP Card Version: %x" % (pgpL0Vc0.getInfo().version))
elif ( args.type == 'kcu1500' ):
pgpL0Vc0 = rogue.hardware.data.DataCard('/dev/datadev_0',(1*32)+0) # Data & cmds
pgpL0Vc1 = rogue.hardware.data.DataCard('/dev/datadev_0',(1*32)+1) # Registers for ePix board
pgpL0Vc2 = rogue.hardware.data.DataCard('/dev/datadev_0',(1*32)+2) # PseudoScope
pgpL0Vc3 = rogue.hardware.data.DataCard('/dev/datadev_0',(1*32)+3) # Monitoring (Slow ADC)
pgpL1Vc0 = rogue.hardware.data.DataCard('/dev/datadev_0',(0*32)+0) # Data (when using all four lanes it should be swapped back with L0)
pgpL2Vc0 = rogue.hardware.data.DataCard('/dev/datadev_0',(2*32)+0) # Data
pgpL3Vc0 = rogue.hardware.data.DataCard('/dev/datadev_0',(3*32)+0) # Data
else:
raise ValueError("Invalid type (%s)" % (args.type) )
# Add data stream to file as channel 1 File writer
dataWriter = pyrogue.utilities.fileio.StreamWriter(name='dataWriter')
pyrogue.streamConnect(pgpL0Vc0, dataWriter.getChannel(0x1))
cmd = rogue.protocols.srp.Cmd()
pyrogue.streamConnect(cmd, pgpL0Vc0)
# Create and Connect SRP to VC1 to send commands
srp = rogue.protocols.srp.SrpV3()
pyrogue.streamConnectBiDir(pgpL0Vc1,srp)
#############################################
# Microblaze console printout
#############################################
class MbDebug(rogue.interfaces.stream.Slave):
def __init__(self):
rogue.interfaces.stream.Slave.__init__(self)
self.enable = False
def _acceptFrame(self,frame):
if self.enable:
p = bytearray(frame.getPayload())
frame.read(p,0)
print('-------- Microblaze Console --------')
print(p.decode('utf-8'))
#######################################
# Custom run control
#######################################
class MyRunControl(pyrogue.RunControl):
def __init__(self,name):
pyrogue.RunControl.__init__(self,name, description='Run Controller ePix HR empty', rates={1:'1 Hz', 2:'2 Hz', 4:'4 Hz', 8:'8 Hz', 10:'10 Hz', 30:'30 Hz', 60:'60 Hz', 120:'120 Hz'})
self._thread = None
def _setRunState(self,dev,var,value,changed):
if changed:
if self.runState.get(read=False) == 'Running':
self._thread = threading.Thread(target=self._run)
self._thread.start()
else:
self._thread.join()
self._thread = None
def _run(self):
self.runCount.set(0)
self._last = int(time.time())
while (self.runState.value() == 'Running'):
delay = 1.0 / ({value: key for key,value in self.runRate.enum.items()}[self._runRate])
time.sleep(delay)
self._root.ssiPrbsTx.oneShot()
self._runCount += 1
if self._last != int(time.time()):
self._last = int(time.time())
self.runCount._updated()
##############################
# Set base
##############################
class EpixBoard(pyrogue.Root):
def __init__(self, guiTop, cmd, dataWriter, srp, **kwargs):
super().__init__(name='ePixHRGen1',description='ePix HR No ASIC', **kwargs)
#self.add(MyRunControl('runControl'))
self.add(dataWriter)
self.guiTop = guiTop
@self.command()
def Trigger():
cmd.sendCmd(0, 0)
# Add Devices
if ( args.type == 'kcu1500' ):
coreMap = rogue.hardware.data.DataMap('/dev/datadev_0')
self.add(XilinxKcu1500Pgp3(memBase=coreMap))
self.add(fpga.EpixHRGen1FD(name='EpixHRGen1', offset=0, memBase=srp, hidden=False, enabled=True))
self.add(pyrogue.RunControl(name = 'runControl', description='Run Controller ePix HR Gen1 No ASIC', cmd=self.Trigger, rates={1:'1 Hz', 2:'2 Hz', 4:'4 Hz', 8:'8 Hz', 10:'10 Hz', 30:'30 Hz', 60:'60 Hz', 120:'120 Hz'}))
# debug
mbcon = MbDebug()
pyrogue.streamTap(pgpL0Vc0,mbcon)
#pyrogue.streamTap(pgpL1Vc0,mbcon)
#pyrogue.streamTap(pgpL2Vc0,mbcon)
#pyrogue.streamTap(pgpL3Vc0,mbcon)
mbcon1 = MbDebug()
pyrogue.streamTap(pgpL1Vc0,mbcon1)
mbcon2 = MbDebug()
pyrogue.streamTap(pgpL2Vc0,mbcon2)
mbcon3 = MbDebug()
pyrogue.streamTap(pgpL3Vc0,mbcon3)
if (args.verbose): dbgData = rogue.interfaces.stream.Slave()
if (args.verbose): dbgData.setDebug(60, "DATA Verbose 0[{}]".format(0))
if (args.verbose): pyrogue.streamTap(pgpL0Vc0, dbgData)
if (args.verbose): dbgData = rogue.interfaces.stream.Slave()
if (args.verbose): dbgData.setDebug(60, "DATA Verbose 1[{}]".format(0))
if (args.verbose): pyrogue.streamTap(pgpL1Vc0, dbgData)
if (args.verbose): dbgData = rogue.interfaces.stream.Slave()
if (args.verbose): dbgData.setDebug(60, "DATA Verbose 2[{}]".format(0))
if (args.verbose): pyrogue.streamTap(pgpL2Vc0, dbgData)
if (args.verbose): dbgData = rogue.interfaces.stream.Slave()
if (args.verbose): dbgData.setDebug(60, "DATA Verbose 3[{}]".format(0))
if (args.verbose): pyrogue.streamTap(pgpL3Vc0, dbgData)
# Create GUI
appTop = PyQt4.QtGui.QApplication(sys.argv)
guiTop = pyrogue.gui.GuiTop(group='ePixHRGEn1Gui')
ePixBoard = EpixBoard(guiTop, cmd, dataWriter, srp)
ePixBoard.start(pollEn=False, pyroGroup=None, pyroHost=None)
guiTop.addTree(ePixBoard)
guiTop.resize(800,800)
# Create GUI
if (args.start_gui):
appTop.exec_()
# Close window and stop polling
def stop():
mNode.stop()
ePixBoard.stop()
exit()
# Start with: ipython -i scripts/epix10kaDAQ.py for interactive approach
print("Started rogue mesh and epics V3 server. To exit type stop()")
| 35.099585
| 224
| 0.623005
|
eddfdb42e7b9d1f96056c8395b79635da0778079
| 1,606
|
py
|
Python
|
utils.py
|
yjs1224/TextSteganalysis
|
3b391f67c37cf2dea964639d201ea5f65fdcf9ba
|
[
"MIT"
] | 6
|
2021-12-17T13:39:04.000Z
|
2022-03-09T09:12:39.000Z
|
utils.py
|
yjs1224/TextSteganalysis
|
3b391f67c37cf2dea964639d201ea5f65fdcf9ba
|
[
"MIT"
] | 1
|
2022-01-17T09:52:49.000Z
|
2022-01-22T14:05:10.000Z
|
utils.py
|
yjs1224/TextSteganalysis
|
3b391f67c37cf2dea964639d201ea5f65fdcf9ba
|
[
"MIT"
] | null | null | null |
import json
import sklearn.metrics as metrics
class MyDict(dict):
__setattr__ = dict.__setitem__
# def __setattr__(self, key, value):
# try:
# self[key] = value
# except:
# raise AttributeError(key)
# __getattr__ = dict.__getitem__
def __getattr__(self, item):
try:
return self[item]
except:
raise AttributeError(item)
class Config(object):
def __init__(self, config_path):
configs = json.load(open(config_path, "r", encoding="utf-8"))
self.configs = self.dictobj2obj(configs)
self.configs.state_dict = configs
def dictobj2obj(self, dictobj):
if not isinstance(dictobj, dict):
return dictobj
d = MyDict()
for k, v in dictobj.items():
d[k] = self.dictobj2obj(v)
return d
def get_configs(self):
return self.configs
def compute_metrics(task_name, preds, labels, stego_label=1):
assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
if task_name in ["steganalysis", "graph_steganalysis"]:
return {"accuracy": metrics.accuracy_score(labels, preds),
"macro_f1":metrics.f1_score(labels, preds, average="macro"),
"precision":metrics.precision_score(labels, preds, pos_label=stego_label),
"recall":metrics.recall_score(labels, preds, pos_label=stego_label),
"f1_score":metrics.f1_score(labels, preds, pos_label=stego_label)}
else:
raise KeyError(task_name)
| 33.458333
| 118
| 0.627646
|
d720407a42faf44bd067444a1b3406b9fc5ee4ad
| 375
|
py
|
Python
|
lahja/tools/benchmark/utils/config.py
|
vaporydev/lahja
|
10fb6276d2312629cdbc7367fa3a0057656b540b
|
[
"MIT"
] | null | null | null |
lahja/tools/benchmark/utils/config.py
|
vaporydev/lahja
|
10fb6276d2312629cdbc7367fa3a0057656b540b
|
[
"MIT"
] | null | null | null |
lahja/tools/benchmark/utils/config.py
|
vaporydev/lahja
|
10fb6276d2312629cdbc7367fa3a0057656b540b
|
[
"MIT"
] | null | null | null |
from typing import (
Tuple,
)
from lahja import (
ConnectionConfig,
)
def create_consumer_endpoint_configs(num_processes: int) -> Tuple[ConnectionConfig, ...]:
return tuple(
ConnectionConfig.from_name(create_consumer_endpoint_name(i)) for i in range(num_processes)
)
def create_consumer_endpoint_name(id: int) -> str:
return f"consumer_{id}"
| 20.833333
| 98
| 0.725333
|
00c95f43de77f3d87521365358907b478be7f2df
| 573
|
py
|
Python
|
tests/test_bigdict.py
|
zpz/bigdict
|
1cd32885aa0ce908ca824411f7662fa2439af1bd
|
[
"MIT"
] | 3
|
2021-07-23T03:15:19.000Z
|
2021-09-12T06:03:45.000Z
|
tests/test_bigdict.py
|
zpz/bigdict
|
1cd32885aa0ce908ca824411f7662fa2439af1bd
|
[
"MIT"
] | 5
|
2021-07-08T06:48:28.000Z
|
2021-07-19T03:47:21.000Z
|
tests/test_bigdict.py
|
zpz/bigdict
|
1cd32885aa0ce908ca824411f7662fa2439af1bd
|
[
"MIT"
] | null | null | null |
from uuid import uuid4
from bigdict import Bigdict
def test_bigdict():
bd = Bigdict.new()
print(bd)
bd['a'] = 3
bd['b'] = 4
bd[9] = [1, 2, 'a']
bd[('a', 3)] = {'a': 3, 'b': 4}
uid = str(uuid4())
bd['uid'] = uid
assert len(bd) == 5
bd2 = bd.view()
assert bd2['a'] == 3
assert bd2['b'] == 4
assert bd2[9] == [1, 2, 'a']
assert bd2[('a', 3)] == {'a': 3, 'b': 4}
assert bd2['uid'] == uid
del bd['b']
assert 'b' not in bd
assert len(bd) == 4
bd.flush()
assert len(bd2) == 5
bd.destroy()
| 17.90625
| 44
| 0.462478
|
e6a1ea49bd8370f359c7943e51249dae61a11c3f
| 1,371
|
py
|
Python
|
chouette_iot/metrics/plugins/__init__.py
|
akatashev/chouette-iot
|
bab56df266fffbc9d1332eebb8f2f5cafac7ba6a
|
[
"Apache-2.0"
] | 1
|
2020-06-10T10:13:53.000Z
|
2020-06-10T10:13:53.000Z
|
chouette_iot/metrics/plugins/__init__.py
|
akatashev/chouette-iot
|
bab56df266fffbc9d1332eebb8f2f5cafac7ba6a
|
[
"Apache-2.0"
] | null | null | null |
chouette_iot/metrics/plugins/__init__.py
|
akatashev/chouette-iot
|
bab56df266fffbc9d1332eebb8f2f5cafac7ba6a
|
[
"Apache-2.0"
] | null | null | null |
"""
chouette.metrics.plugins
"""
# pylint: disable=too-few-public-methods
from typing import Dict, Optional, Type
from pykka import ActorRef # type: ignore
from ._collector_plugin import CollectorPluginActor
from ._docker_collector import DockerCollectorPlugin
from ._dramatiq_collector import DramatiqCollectorPlugin
from ._host_collector import HostCollectorPlugin
from ._k8s_collector import K8sCollectorPlugin
from ._tegrastats_collector import TegrastatsCollectorPlugin
__all__ = [
"PluginsFactory",
]
class PluginsFactory:
"""
PluginsFactory class creates plugins actors and returns their ActorRefs.
"""
plugins: Dict[str, Type[CollectorPluginActor]] = {
"dramatiq": DramatiqCollectorPlugin,
"host": HostCollectorPlugin,
"k8s": K8sCollectorPlugin,
"tegrastats": TegrastatsCollectorPlugin,
"docker": DockerCollectorPlugin,
}
@classmethod
def get_plugin(cls, plugin_name: str) -> Optional[ActorRef]:
"""
Takes a plugin name and returns an ActorRef if such plugin exists.
Args:
plugin_name: Plugin name as a string.
Returns: ActorRef or None.
"""
plugin_class = cls.plugins.get(plugin_name)
if not plugin_class:
return None
actor_ref: ActorRef = plugin_class.get_instance()
return actor_ref
| 28.5625
| 76
| 0.708242
|
24cfa13c25be758965b8305c2e91840bed929b07
| 7,366
|
py
|
Python
|
run_details/generate_run.py
|
sealuzh/docker-ecosystem-paper
|
5c8b253062796baf5d154bc6f9660a7d05d3dad5
|
[
"Apache-2.0"
] | 5
|
2017-05-19T15:41:46.000Z
|
2021-08-03T16:52:56.000Z
|
run_details/generate_run.py
|
sealuzh/docker-ecosystem-paper
|
5c8b253062796baf5d154bc6f9660a7d05d3dad5
|
[
"Apache-2.0"
] | 1
|
2019-11-18T09:26:23.000Z
|
2019-11-18T09:26:23.000Z
|
run_details/generate_run.py
|
sealuzh/docker-ecosystem-paper
|
5c8b253062796baf5d154bc6f9660a7d05d3dad5
|
[
"Apache-2.0"
] | 1
|
2017-05-20T13:54:14.000Z
|
2017-05-20T13:54:14.000Z
|
#!/usr/bin/python
# Do not judge a man by the quality of his research code…
# while his intentions were good, his will broke under the
# time pressure of the conference submission deadline.
# ...And now stop complaining and enjoy the perils of reproducibility. (J.C.)
import psycopg2
import sys
def main():
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(host=sys.argv[1], database=sys.argv[2], user=sys.argv[3], password=sys.argv[4],port=sys.argv[5])
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
run_diff_breakdown(cursor)
def row_format(row):
return " & ".join(row)
### RUN Diff Type queries
def run_diff_breakdown(cursor):
print "Breakdown of RUN Diff Type instructions"
print "Breakdown of All Changes"
print row_format(['All', 'Add', 'Mod', 'Rem'])
diff_types = ['', 'Add', 'Update', 'Del']
top_list = [0, 1000, 100]
rows = []
row_index = 0
for label, run_list in all_lists().iteritems():
rows.append([label])
for top in top_list:
print label + ", Top: " + str(top)
for diff_type in diff_types:
value = round(run_diff_proportion(cursor, run_list, diff_type, top), 2)
color = cellcolor(value)
column = cellcolor_format(color) + str(value)
print diff_type + ": " + str(column)
rows[row_index].append(column)
row_index += 1
for row in rows:
print row_format(row)
def cellcolor_format(color):
if color == "": return ""
return "\cellcolor{" + color + "} "
def cellcolor(value):
#\cellcolor{mid}
if value < 0.01:
return ""
if value < 0.02: return "lowest"
if value < 0.05: return "low"
if value < 0.10: return "midlow"
if value < 0.2: return "mid"
if value < 0.4: return "midhigh"
if value < 0.5: return "high"
return "highest"
"""0.00 - white
0.01 - lowest
0.02 - 0.05 - low
0.06 - 0.10 - midlow
0.11 - 0.2 - mid
0.21 - 0.4 - midhigh
0.41 - 0.5 - high"""
def run_diff_proportion(cursor, executable_list = '', diff_type = '', top = 0):
population = float(run_diff_count(cursor, '', diff_type, top))
count = run_diff_count(cursor, executable_list, diff_type, top)
return count / population
def run_diff_count(cursor, executable_list = '', diff_type = '', top = 0):
top_join, top_where = diff_top_query_sql_parts(top)
diff_type_where = diff_type_query_sql_part(diff_type)
executable_list_where = "" if executable_list == '' else "executable in %(executable_list)s"
where = ["diff_state = 'COMMIT_COMMIT'",
"instruction = 'RUN'",
executable_list_where,
top_where,
diff_type_where]
where = filter(None, where) # remove empty elements
sql = "select count(*) FROM diff d join diff_type dt on d.diff_id = dt.diff_id " + top_join + " WHERE " + " and ".join(where)
#use mogrify instead of execute if you want to see the resulting SQL statement
if executable_list == '':
#print cursor.mogrify(sql, { 'type' : diff_type + "%%"})
cursor.execute(sql, { 'type' : diff_type + "%%"})
else:
cursor.execute(sql, { 'type' : diff_type + "%%",'executable_list' : tuple(executable_list), })
return cursor.fetchone()[0]
def diff_type_query_sql_part(diff_type):
if diff_type == '':
return ''
return " change_type like %(type)s "
def diff_top_query_sql_parts(top):
if top == 100 or top == 1000:
# restrict to top100 or top1000 projects if param given
top_join = " join repo_diff_type rdt on dt.diff_type_id = rdt.diff_type_id "
top_where = " rdt.repo_path in (select distinct(repo_path) from top" + str(top) + ")"
return top_join, top_where
return "", ""
### RUN current queries
def run_breakdown(cursor):
print "Breakdown of RUN instructions"
print "All & T1000 & T100"
# get population numbers
all_population = float(run_population(cursor))
t1000_population = float(run_population(cursor, 1000))
t100_population = float(run_population(cursor, 100))
#print all_population, t1000_population, t100_population
sum_all = 0
sum_t1000 = 0
sum_t100 = 0
for label, run_list in all_lists().iteritems():
all = run_count(cursor, run_list)
t1000 = run_count(cursor, run_list, 1000)
t100 = run_count(cursor, run_list, 100)
all_proportional = round(all / all_population, 3)
t1000_proportional = round(t1000 / t1000_population, 3)
t100_proportional = round(t100 / t100_population, 3)
sum_all += all
sum_t1000 += t1000
sum_t100 += t100
print row_format([label, str(all_proportional), str(t1000_proportional), str(t100_proportional)])
# 'Other' is the remaining %
all_other = round((all_population - sum_all) / all_population, 3)
t1000_other = round((t1000_population - sum_t1000) / t1000_population, 3)
t100_other = round((t100_population - sum_t100) / t100_population, 3)
print row_format(["Other", str(all_other), str(t1000_other), str(t100_other)])
def run_population(cursor, top = 0):
top_join, top_where = top_query_sql_parts(top)
cursor.execute("select count(*) from df_run r " + top_join + " where r.current = true " + top_where)
return cursor.fetchone()[0]
def run_count(cursor, executable_list, top = 0):
top_join, top_where = top_query_sql_parts(top)
cursor.execute("select count(*) from df_run r " + top_join + " where r.current = true and r.executable in %(executable_list)s " + top_where, { 'executable_list' : tuple(executable_list), })
return cursor.fetchone()[0]
def top_query_sql_parts(top):
if top == 100 or top == 1000:
# restrict to top100 or top1000 projects if param given
top_join = " join snapshot s on s.snap_id = r.snap_id join dockerfile d on d.dock_id = s.dock_id "
top_where = " and d.repo_path in (select repo_path from top" + str(top) + ")"
return top_join, top_where
return "", ""
def all_lists():
return { 'Dependencies' : dependencies_list(), 'Filesystem' : filesystem_list(), 'Build/Execute' : build_execute_list(),
'Environment' : environment_list(), 'Permissions' : permissions_list()}
def dependencies_list():
return ['apt-get', 'npm', 'yum', 'curl', 'pip', 'wget', 'git', 'apk', 'gem', 'bower', 'add-apt-repository', 'dpkg', 'rpm', 'bundle', 'apt-key', 'pip3', 'dnf', 'conda', 'cabal', 'easy_install', 'nvm', 'lein', 'composer', 'mvn', 'apk-install', 'apt', 'pecl', 'puppet', 'svn', 'godep']
def filesystem_list():
return ['echo', 'mkdir', 'rm', 'cd', 'tar', 'sed', 'ln', 'mv', 'cp', 'unzip', 'pacman', 'touch', 'ls', 'cat', 'find']
def build_execute_list():
return ['make', 'go', './configure', '/bin/bash', 'bash', 'python', 'service', 'sh', 'cmake', 'install', 'python3']
def environment_list():
return ['set', 'export', 'source', 'virtualenv']
def permissions_list():
return ['chmod', 'chown', 'useradd', 'groupadd', 'adduser', 'usermod', 'addgroup']
if __name__ == "__main__":
main()
| 34.420561
| 290
| 0.631686
|
546053bf19bbf17c5f3f43fdfa3c7d3a0af93a4b
| 20,773
|
py
|
Python
|
src/popoto/models/base.py
|
tomcounsell/popoto
|
fc36d625a35393cd6f96afee6b13e849fe9cd242
|
[
"MIT"
] | 5
|
2021-11-21T01:36:02.000Z
|
2022-01-28T23:16:51.000Z
|
src/popoto/models/base.py
|
tomcounsell/popoto
|
fc36d625a35393cd6f96afee6b13e849fe9cd242
|
[
"MIT"
] | 1
|
2021-12-29T13:20:17.000Z
|
2021-12-29T13:20:17.000Z
|
src/popoto/models/base.py
|
tomcounsell/popoto
|
fc36d625a35393cd6f96afee6b13e849fe9cd242
|
[
"MIT"
] | null | null | null |
import logging
import redis
from .encoding import encode_popoto_model_obj
from .db_key import DB_key
from .query import Query
from ..fields.field import Field, VALID_FIELD_TYPES
from ..fields.key_field_mixin import KeyFieldMixin
from ..fields.sorted_field_mixin import SortedFieldMixin
from ..fields.geo_field import GeoField
from ..fields.relationship import Relationship
from ..redis_db import POPOTO_REDIS_DB
logger = logging.getLogger('POPOTO.model_base')
global RELATED_MODEL_LOAD_SEQUENCE
RELATED_MODEL_LOAD_SEQUENCE = set()
class ModelException(Exception):
pass
class ModelOptions:
def __init__(self, model_name):
self.model_name = model_name
self.hidden_fields = dict()
self.explicit_fields = dict()
self.key_field_names = set()
# self.auto_field_names = set()
# self.list_field_names = set()
# self.set_field_names = set()
self.relationship_field_names = set()
self.sorted_field_names = set()
self.geo_field_names = set()
# todo: should this be a dict of related objects or just a list of field names?
# self.related_fields = {} # model becomes graph node
# todo: allow customizing this in model.Meta class
self.db_class_key = DB_key(self.model_name)
self.db_class_set_key = DB_key("$Class", self.db_class_key)
self.abstract = False
self.unique_together = []
self.index_together = []
self.parents = []
self.auto_created = False
self.base_meta = None
def add_field(self, field_name: str, field: Field):
if field_name.startswith("_") and field_name not in self.hidden_fields:
self.hidden_fields[field_name] = field
elif field_name not in self.explicit_fields:
self.explicit_fields[field_name] = field
else:
raise ModelException(f"{field_name} is already a Field on the model")
if isinstance(field, KeyFieldMixin):
self.key_field_names.add(field_name)
# if field.auto:
# self.auto_field_names.add(field_name)
if isinstance(field, SortedFieldMixin):
self.sorted_field_names.add(field_name)
if isinstance(field, GeoField):
self.geo_field_names.add(field_name)
# elif isinstance(field, ListField):
# self.list_field_names.add(field_name)
if isinstance(field, Relationship):
self.relationship_field_names.add(field_name)
@property
def fields(self) -> dict:
return {**self.explicit_fields, **self.hidden_fields}
@property
def field_names(self) -> list:
return list(self.fields.keys())
@property
def db_key_length(self):
return 1 + len(self.key_field_names)
def get_db_key_index_position(self, field_name):
return 1 + sorted(self.key_field_names).index(field_name)
class ModelBase(type):
"""Metaclass for all Popoto Models."""
def __new__(cls, name, bases, attrs, **kwargs):
# Initialization is only performed for a Model and its subclasses
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super().__new__(cls, name, bases, attrs, **kwargs)
# logger.debug({k: v for k, v in attrs.items() if not k.startswith('__')})
module = attrs.pop('__module__')
new_attrs = {'__module__': module}
attr_meta = attrs.pop('Meta', None)
options = ModelOptions(name)
options.parents = parents
for obj_name, obj in attrs.items():
if obj_name.startswith("__"):
# builtin or inherited private vars and methods
new_attrs[obj_name] = obj
elif isinstance(obj, Field):
# save field instance
# attr will be overwritten as a field.type
# model will handle this and set default values
options.add_field(obj_name, obj)
elif callable(obj) or hasattr(obj, '__func__') or hasattr(obj, '__set__'):
# a callable method or property
new_attrs[obj_name] = obj
elif obj_name.startswith("_"):
# a private static attr not to be saved in the db
new_attrs[obj_name] = obj
else:
raise ModelException(
f"public model attributes must inherit from class Field. "
f"Try using a private var (eg. _{obj_name})_"
)
# todo: handle multiple inheritance
# for base in parents:
# for field_name, field in base.auto_fields.items():
# options.add_field(field_name, field)
new_class = super().__new__(cls, name, bases, new_attrs)
options.abstract = getattr(attr_meta, 'abstract', False)
options.meta = attr_meta or getattr(new_class, 'Meta', None)
options.base_meta = getattr(new_class, '_meta', None)
new_class._meta = options
new_class.objects = new_class.query = Query(new_class)
return new_class
class Model(metaclass=ModelBase):
query: Query
def __init__(self, **kwargs):
cls = self.__class__
# self._ttl = kwargs.get('ttl', None)
# self._expire_at = kwargs.get('expire_at', None)
# allow init kwargs to set any base parameters
self.__dict__.update(kwargs)
# add auto KeyField if needed
if not len(self._meta.key_field_names):
from ..fields.shortcuts import AutoKeyField
self._meta.add_field('_auto_key', AutoKeyField())
# prep AutoKeys with new default ids
for field in self._meta.fields.values():
if hasattr(field, 'auto') and field.auto:
field.set_auto_key_value()
# set defaults
for field_name, field in self._meta.fields.items():
setattr(self, field_name, field.default)
# set field values from init kwargs
for field_name in self._meta.fields.keys() & kwargs.keys():
setattr(self, field_name, kwargs.get(field_name))
# load relationships
if len(self._meta.relationship_field_names):
global RELATED_MODEL_LOAD_SEQUENCE
is_parent_model = len(RELATED_MODEL_LOAD_SEQUENCE) == 0
for field_name in self._meta.relationship_field_names:
if f"{self.__class__.__name__}.{field_name}" in RELATED_MODEL_LOAD_SEQUENCE:
continue
RELATED_MODEL_LOAD_SEQUENCE.add(f"{self.__class__.__name__}.{field_name}")
field_value = getattr(self, field_name)
if isinstance(field_value, Model):
setattr(self, field_name, field_value)
elif isinstance(field_value, str):
setattr(
self, field_name,
self._meta.fields[field_name].model.query.get(redis_key=field_value)
)
# todo: lazy load the instance from the db
elif not field_value:
setattr(self, field_name, None)
else:
raise ModelException(f"{field_name} expects model instance or redis_key")
if is_parent_model:
RELATED_MODEL_LOAD_SEQUENCE = set()
self._ttl = None # todo: set default in child Meta class
self._expire_at = None # todo: datetime? or timestamp?
# validate initial attributes
if not self.is_valid(null_check=False): # exclude null, will validate null values on pre-save
raise ModelException(f"Could not instantiate class {self}")
self._redis_key = None
# _db_key used by Redis cannot be known without performance cost
# _db_key is predicted until synced during save() call
if None not in [getattr(self, key_field_name) for key_field_name in self._meta.key_field_names]:
self._redis_key = self.db_key.redis_key
self.obsolete_redis_key = None # to be used when db_key changes between loading and saving the object
self._db_content = dict() # empty until synced during save() call
# todo: create set of possible custom field keys
@property
def db_key(self) -> DB_key:
"""
the db key must include the class name - equivalent to db table name
keys append alphabetically.
if another order is required, propose feature request in GitHub issue
possible solutions include param on each model's KeyField order=int
OR model Meta: key_order = [keyname, keyname, ]
OR both
"""
return DB_key(self._meta.db_class_key, [
getattr(self, key_field_name) or "None"
for key_field_name in sorted(self._meta.key_field_names)
])
def __repr__(self):
return f"<{self.__class__.__name__} Popoto object at {self.db_key.redis_key}>"
def __str__(self):
return str(self.db_key)
def __eq__(self, other):
"""
equality method
instances with the same key(s) and class are considered equal
except when any key(s) are None, they are not equal to anything except themselves.
for evaluating all instance values against each other, use something like this:
self_dict = self._meta.fields.update((k, self.__dict__[k]) for k in set(self.__dict__).intersection(self._meta.fields))
other_dict = other._meta.fields.update((k, other.__dict__[k]) for k in set(other.__dict__).intersection(other._meta.fields))
return repr(dict(sorted(self_dict))) == repr(dict(sorted(other_dict)))
"""
try:
if isinstance(other, self.__class__):
# always False if if any KeyFields are None
if (None in [
self._meta.fields.get(key_field_name) for key_field_name in self._meta.key_field_names
]) or (None in [
other._meta.fields.get(key_field_name) for key_field_name in other._meta.key_field_names
]):
return repr(self) == repr(other)
return self.db_key == other.db_key
except:
return False
else:
return False
# @property
# def field_names(self):
# return [
# k for k, v in self.__dict__.items()
# if all([not k.startswith("_"), k + "_meta" in self.__dict__])
# ]
def is_valid(self, null_check=True) -> bool:
"""
todo: validate values
- field.type ✅
- field.null ✅
- field.max_length ✅
- ttl, expire_at - todo
"""
for field_name in self._meta.field_names:
# type check the field values against their class specified type, unless null/None
if all([
getattr(self, field_name) is not None,
not isinstance(getattr(self, field_name), self._meta.fields[field_name].type)
]):
try:
if getattr(self, field_name) is not None:
if self._meta.fields[field_name].type in VALID_FIELD_TYPES:
setattr(self, field_name, self._meta.fields[field_name].type(getattr(self, field_name)))
else:
pass # do not force typing if custom type is defined
if not isinstance(getattr(self, field_name), self._meta.fields[field_name].type):
raise TypeError(f"Expected {field_name} to be type {self._meta.fields[field_name].type}. "
f"It is type {type(getattr(self, field_name))}")
except TypeError as e:
logger.error(
f"{str(e)} \n Change the value or modify type on {self.__class__.__name__}.{field_name}"
)
return False
# check non-nullable fields
if null_check and \
self._meta.fields[field_name].null is False and \
getattr(self, field_name) is None:
error = f"{field_name} is None/null. " \
f"Set a value or set null=True on {self.__class__.__name__}.{field_name}"
logger.error(error)
return False
# validate str max_length
if self._meta.fields[field_name].type == str and \
getattr(self, field_name) and \
len(getattr(self, field_name)) > self._meta.fields[field_name].max_length:
error = f"{field_name} is greater than max_length={self._meta.fields[field_name].max_length}"
logger.error(error)
return False
if self._ttl and self._expire_at:
raise ModelException("Can set either ttl and expire_at. Not both.")
for field_name, field_value in self.__dict__.items():
if field_name in self._meta.fields.keys():
field_class = self._meta.fields[field_name].__class__
if not field_class.is_valid(self._meta.fields[field_name], field_value, null_check=null_check):
error = f"Validation on [{field_name}] Field failed"
logger.error(error)
return False
return True
def pre_save(self, pipeline: redis.client.Pipeline = None,
ignore_errors: bool = False, **kwargs):
"""
Model instance preparation for saving.
"""
if not self.is_valid():
error_message = "Model instance parameters invalid. Failed to save."
if ignore_errors:
logger.error(error_message)
else:
raise ModelException(error_message)
return False
# run any necessary formatting on field data before saving
for field_name, field in self._meta.fields.items():
setattr(
self, field_name,
field.format_value_pre_save(getattr(self, field_name))
)
return pipeline if pipeline else True
def save(self, pipeline: redis.client.Pipeline = None,
ttl=None, expire_at=None, ignore_errors: bool = False,
**kwargs):
"""
Model instance save method. Uses Redis HSET command with key, dict of values, ttl.
Also triggers all field on_save methods.
"""
pipeline_or_success = self.pre_save(pipeline=pipeline, ignore_errors=ignore_errors, **kwargs)
if not pipeline_or_success:
return pipeline or False
elif pipeline:
pipeline = pipeline_or_success
new_db_key = DB_key(self.db_key) # todo: why have a new key??
if self._redis_key != new_db_key.redis_key:
self.obsolete_redis_key = self._redis_key
# todo: implement and test tll, expire_at
ttl, expire_at = (ttl or self._ttl), (expire_at or self._expire_at)
"""
1. save object as hashmap
2. optionally set ttl, expire_at
3. add to class set
4. if obsolete key, delete and run field on_delete methods
5. run field on_save methods
6. save private version of compiled db key
"""
hset_mapping = encode_popoto_model_obj(self) # 1
self._db_content = hset_mapping # 1
if isinstance(pipeline, redis.client.Pipeline):
pipeline = pipeline.hset(new_db_key.redis_key, mapping=hset_mapping) # 1
# if ttl is not None:
# pipeline = pipeline.expire(new_db_key, ttl) # 2
# if expire_at is not None:
# pipeline = pipeline.expire_at(new_db_key, expire_at) # 2
pipeline = pipeline.sadd(self._meta.db_class_set_key.redis_key, new_db_key.redis_key) # 3
if self.obsolete_redis_key and self.obsolete_redis_key != new_db_key.redis_key: # 4
for field_name, field in self._meta.fields.items():
pipeline = field.on_delete( # 4
model_instance=self, field_name=field_name,
field_value=getattr(self, field_name),
pipeline=pipeline, **kwargs
)
pipeline.delete(self.obsolete_redis_key) # 4
self.obsolete_redis_key = None
for field_name, field in self._meta.fields.items(): # 5
pipeline = field.on_save( # 5
self, field_name=field_name,
field_value=getattr(self, field_name),
# ttl=ttl, expire_at=expire_at,
ignore_errors=ignore_errors,
pipeline=pipeline, **kwargs
)
self._redis_key = new_db_key.redis_key # 6
return pipeline
else:
db_response = POPOTO_REDIS_DB.hset(new_db_key.redis_key, mapping=hset_mapping) # 1
# if ttl is not None:
# POPOTO_REDIS_DB.expire(new_db_key, ttl) # 2
# if expire_at is not None:
# POPOTO_REDIS_DB.expireat(new_db_key, ttl) # 2
POPOTO_REDIS_DB.sadd(self._meta.db_class_set_key.redis_key, new_db_key.redis_key) # 2
if self.obsolete_redis_key and self.obsolete_redis_key != new_db_key.redis_key: # 4
for field_name, field in self._meta.fields.items():
field.on_delete( # 4
model_instance=self, field_name=field_name,
field_value=getattr(self, field_name),
pipeline=None, **kwargs
)
POPOTO_REDIS_DB.delete(self.obsolete_redis_key) # 4
self.obsolete_redis_key = None
for field_name, field in self._meta.fields.items(): # 5
field.on_save( # 5
self, field_name=field_name,
field_value=getattr(self, field_name),
# ttl=ttl, expire_at=expire_at,
ignore_errors=ignore_errors,
pipeline=None, **kwargs
)
self._redis_key = new_db_key.redis_key # 6
return db_response
@classmethod
def create(cls, pipeline: redis.client.Pipeline = None, **kwargs):
instance = cls(**kwargs)
pipeline_or_db_response = instance.save(pipeline=pipeline)
return pipeline_or_db_response if pipeline else instance
@classmethod
def load(cls, db_key: str = None, **kwargs):
return cls.query.get(db_key=db_key or cls(**kwargs).db_key)
def delete(self, pipeline: redis.client.Pipeline = None, *args, **kwargs):
"""
Model instance delete method. Uses Redis DELETE command with key.
Also triggers all field on_delete methods.
"""
delete_redis_key = self._redis_key or self.db_key.redis_key
"""
1. delete object as hashmap
2. delete from class set
3. run field on_delete methods
4. reset private vars
"""
if pipeline is not None:
pipeline = pipeline.delete(delete_redis_key) # 1
pipeline = pipeline.srem(self._meta.db_class_set_key.redis_key, delete_redis_key) # 2
for field_name, field in self._meta.fields.items(): # 3
pipeline = field.on_delete( # 3
model_instance=self, field_name=field_name,
field_value=getattr(self, field_name),
pipeline=pipeline, **kwargs
)
self._db_content = dict() # 4
return pipeline
else:
db_response = POPOTO_REDIS_DB.delete(delete_redis_key) # 1
POPOTO_REDIS_DB.srem(self._meta.db_class_set_key.redis_key, delete_redis_key) # 2
for field_name, field in self._meta.fields.items(): # 3
field.on_delete( # 3
model_instance=self, field_name=field_name,
field_value=getattr(self, field_name),
pipeline=None, **kwargs
)
self._db_content = dict() # 4
return bool(db_response > 0)
@classmethod
def get_info(cls):
from itertools import chain
query_filters = list(chain(*[
field.get_filter_query_params(field_name)
for field_name, field in cls._meta.fields.items()
]))
return {
'name': cls.__name__,
'fields': cls._meta.field_names,
'query_filters': query_filters,
}
| 40.811395
| 132
| 0.595051
|
5b45468fc5c36bb349ea054fdcf6af5d32223e46
| 599
|
py
|
Python
|
jccli/errors.py
|
zaro0508/jccli
|
1de9a7f493d14bbbe6f3d201eb1aa989cdeec5bb
|
[
"Apache-2.0"
] | null | null | null |
jccli/errors.py
|
zaro0508/jccli
|
1de9a7f493d14bbbe6f3d201eb1aa989cdeec5bb
|
[
"Apache-2.0"
] | null | null | null |
jccli/errors.py
|
zaro0508/jccli
|
1de9a7f493d14bbbe6f3d201eb1aa989cdeec5bb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
.. currentmodule:: jccli.errors.py
.. moduleauthor:: zaro0508 <zaro0508@gmail.com>
Exceptions
"""
class JcCliError(Exception):
"""
Base class for all JC CLI errors
"""
class SystemUserNotFoundError(JcCliError):
"""
Jumpcloud system user is not found
"""
class GroupNotFoundError(JcCliError):
"""
Jumpcloud group is not found
"""
class NotAMemberError(JcCliError):
"""
A user or system is not a member of a group
"""
class MissingRequiredArgumentError(JcCliError):
"""
Required arguments are missing
"""
| 17.114286
| 47
| 0.649416
|
2d5d4645026898f173e67af899229420c00e39f2
| 6,785
|
py
|
Python
|
config-dump.py
|
Bond-o/config-dump
|
f472034850c0138a798a422fc3b32e8b68b57b0f
|
[
"MIT"
] | null | null | null |
config-dump.py
|
Bond-o/config-dump
|
f472034850c0138a798a422fc3b32e8b68b57b0f
|
[
"MIT"
] | null | null | null |
config-dump.py
|
Bond-o/config-dump
|
f472034850c0138a798a422fc3b32e8b68b57b0f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
__author__ = "Mike Bond"
__copyright__ = "Copyright (c) 2021"
__license__ = "MIT"
__originalDate__ = "20210805"
__modifiedDate__ = "20210805"
__version__ = "0.1"
__maintainer__ = "Mike Bond"
__status__ = "Beta"
"""
config-dump.py executes the snmpset binary and passes Cisco SNMP OID to download a configuration file to
a TFTP server.
"""
"""Import modules"""
import argparse
import os
import sys
import random
import time
from termcolor import colored
"""Define Color Status"""
error = '\033[1m\033[31m[!]\033[0m'
warning = '\033[1m\033[33m[-]\033[0m'
info = '\033[1m\033[94m[*]\033[0m'
complete = '\033[1m\033[92m[+]\033[0m'
""" Functions """
def snmp(auth,auth_pass,protocol,proto_pass,user,target_ip,tftp_ip):
"""
The snmp function that uses snmpset to download a config file based on args.type selection
:param: auth
:param: auth_pass
:param: protocol
:param: proto_pass
:param: user
:param: target_ip
:param: tftp_ip
:return:
"""
try:
random_number = str(random.randint(100,999))
if protocol is not None:
command = 'snmpset -v 3 -l authpriv -a {0} -A {1} -x {2} -X {3} -u {4} {5}'\
.format(auth,auth_pass,protocol,proto_pass,user,target_ip)
else:
command = 'snmpset -v 3 -l authpriv -a {0} -A {1} -u {2} {3}'\
.format(auth,auth_pass,user,target_ip)
ccCopyProtocol = '.1.3.6.1.4.1.9.9.96.1.1.1.1.2.{0} i 1'.format(random_number)
ccCopySourceFileType = '.1.3.6.1.4.1.9.9.96.1.1.1.1.3.{0} i 4'.format(random_number)
ccCopyDestFileType = '.1.3.6.1.4.1.9.9.96.1.1.1.1.4.{0} i 1'.format(random_number)
ccCopyServerAddress = '.1.3.6.1.4.1.9.9.96.1.1.1.1.5.{0} a {1}'.format(random_number,tftp_ip)
ccCopyFileName = '.1.3.6.1.4.1.9.9.96.1.1.1.1.6.{0} s {1}-config.txt'.format(random_number,target_ip)
ccCopyEntryRowStatus = '.1.3.6.1.4.1.9.9.96.1.1.1.1.14.{0} i 4'.format(random_number)
dev_null = '>/dev/null 2>&1'
session = command+' '+ccCopyProtocol+' '+ccCopySourceFileType+' '+ccCopyDestFileType+' '+ccCopyServerAddress+' '+\
ccCopyFileName+' '+ccCopyEntryRowStatus+' '+dev_null
results = (os.system(session))
if results == 256:
print (results)
print("{0} Issue with SNMP Username and/or Password!".format(error))
return None
if results == 512:
print("{0} No SNMP Read/Write access or issue with encryption".format(error))
return None
else:
time.sleep(1)
command = "netstat -anup | grep 69 >/dev/null 2>&1"
results = os.system(command)
if results == 0:
if os.path.isfile("{0}-config.txt".format(target_ip)):
print ("{0} Configuration file from {1} saved as {1}-config.txt in current working directory"
.format(complete,target_ip))
return None
else:
print ("{0} Configuration file from {1} saved as {1}-config.txt in the root of the TFTP directory"
.format(complete,target_ip))
return None
else:
print ("{0} Configuration file from {1} may have been saved on TFTP server {2}"
.format(warning,target_ip,tftp_ip))
return None
except Exception as e:
print ("{0}".format(error),e)
return None
def main():
"""
The main function that checks for root and then calls the snmp function
:param:
:return:
"""
if not os.geteuid() == 0:
print("{0} Execute config-dump with sudo privileges or as root".format(error))
sys.exit(-1)
command = "which snmpset >/dev/null 2>&1"
results = os.system(command)
if results != 0:
print("{0} The snmpset binary not found on this device".format(error))
sys.exit(-1)
if sys.platform == 'darwin':
print("{0} Script not tested on OSX".format(warning))
sys.exit(-1)
if args.protocol == 'AES':
# Call the Function snmp; Noted issues with snmpset for AES encryption > 128
print("{0} Authentication issues persist with AES encryption above 128".format(warning))
snmp(args.auth,args.auth_pass,args.protocol,args.proto_pass,args.user,args.target,args.tftp)
return None
else:
# Call the Function snmp
snmp(args.auth,args.auth_pass,args.protocol,args.proto_pass,args.user,args.target,args.tftp)
return None
def print_art():
"""
The print_art function prints the ASCII Art
:param:
:return:
"""
ascii_art1 = colored("""
,-. ,-. ,-. ," . ,-.
| | | | | |- | | |
`-' `-' ' ' | ' `-|
' ,|
`' """,'yellow',attrs=['bold'])
ascii_art2 = colored("""
|
,-| . . ,-,-. ,-.
| | | | | | | | |
`-^ `-^ ' ' ' |-'
|
'
""",'red',attrs=['bold'])
desc = 'Download a Cisco Device Configuration with '+colored('SNMPv3','green')+' to a TFTP server'
version = colored('\t\t Version: ','red')+colored('{0} {1}','yellow').format(__version__,__status__)
print ('{0} {1}'.format(ascii_art1,ascii_art2))
print (desc,flush=True)
print ('{0}\n'.format(version))
if __name__ == "__main__":
# Use ArgParse with mandatory flag of -t -a -A -u -s
try:
# Call the 'print_art' function
print_art()
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
required = parser.add_argument_group("required arguments")
required.add_argument("-t", "--target", type=str, help="Target SNMP Host IP Address",required=True)
required.add_argument("-a", "--auth", type=str, help="MD5 or SHA Authentication Protocol",required=True)
required.add_argument("-A","--auth-pass", type=str, help="MD5 or SHA Password",required=True)
required.add_argument("-u", "--user", type=str, help="Username",required=True)
required.add_argument("-s", "--tftp", type=str, help="TFTP Server IP Address", required=True)
parser.add_argument("-x", "--protocol", type=str, help="DES or AES Protocol")
parser.add_argument("-X", "--proto-pass", type=str, help="DES or AES Password")
args = parser.parse_args()
# Call the 'main' function
main()
except KeyboardInterrupt:
print("{0} User Interrupt! Quitting....\n".format(error))
sys.exit(-1)
except:
raise
exit()
| 37.905028
| 122
| 0.572587
|
d87a8062481fbda6380f5c5a7e7cb4de861a28e9
| 1,327
|
py
|
Python
|
src/11/implementing_remote_procedure_call/jsonrpcserver.py
|
tuanavu/python-gitbook
|
948a05e065b0f40afbfd22f697dff16238163cde
|
[
"MIT"
] | 14
|
2017-05-20T04:06:46.000Z
|
2022-01-23T06:48:45.000Z
|
src/11/implementing_remote_procedure_call/jsonrpcserver.py
|
tuanavu/python-gitbook
|
948a05e065b0f40afbfd22f697dff16238163cde
|
[
"MIT"
] | 1
|
2021-06-10T20:17:55.000Z
|
2021-06-10T20:17:55.000Z
|
src/11/implementing_remote_procedure_call/jsonrpcserver.py
|
tuanavu/python-gitbook
|
948a05e065b0f40afbfd22f697dff16238163cde
|
[
"MIT"
] | 15
|
2017-03-29T17:57:33.000Z
|
2021-08-24T02:20:08.000Z
|
# rpcserver.py
import json
class RPCHandler:
def __init__(self):
self._functions = { }
def register_function(self, func):
self._functions[func.__name__] = func
def handle_connection(self, connection):
try:
while True:
# Receive a message
func_name, args, kwargs = json.loads(connection.recv())
# Run the RPC and send a response
try:
r = self._functions[func_name](*args,**kwargs)
connection.send(json.dumps(r))
except Exception as e:
connection.send(json.dumps(str(e)))
except EOFError:
pass
# Example use
from multiprocessing.connection import Listener
from threading import Thread
def rpc_server(handler, address, authkey):
sock = Listener(address, authkey=authkey)
while True:
client = sock.accept()
t = Thread(target=handler.handle_connection, args=(client,))
t.daemon = True
t.start()
# Some remote functions
def add(x, y):
return x + y
def sub(x, y):
return x - y
# Register with a handler
handler = RPCHandler()
handler.register_function(add)
handler.register_function(sub)
# Run the server
rpc_server(handler, ('localhost', 17000), authkey=b'peekaboo')
| 26.019608
| 71
| 0.610399
|
71eaab5839846340e576f1a337c25f9b0a34a8aa
| 2,090
|
py
|
Python
|
tempest/api/object_storage/test_account_services_negative.py
|
NetApp/tempest
|
dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/object_storage/test_account_services_negative.py
|
NetApp/tempest
|
dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/object_storage/test_account_services_negative.py
|
NetApp/tempest
|
dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Joe H. Rahme <joe.hakim.rahme@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.object_storage import base
from tempest import clients
from tempest import exceptions
from tempest.test import attr
class AccountNegativeTest(base.BaseObjectTest):
@attr(type=['negative', 'gate'])
def test_list_containers_with_non_authorized_user(self):
# list containers using non-authorized user
# create user
self.data.setup_test_user()
test_os = clients.Manager(self.data.test_user,
self.data.test_password,
self.data.test_tenant)
test_auth_provider = test_os.auth_provider
# Get auth for the test user
test_auth_provider.auth_data
# Get fresh auth for test user and set it to next auth request for
# custom_account_client
delattr(test_auth_provider, 'auth_data')
test_auth_new_data = test_auth_provider.auth_data
self.custom_account_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=test_auth_new_data
)
params = {'format': 'json'}
# list containers with non-authorized user token
self.assertRaises(exceptions.Unauthorized,
self.custom_account_client.list_account_containers,
params=params)
# delete the user which was created
self.data.teardown_all()
| 38.703704
| 78
| 0.677033
|
49336efae7ad8ab0ed8d9f0b0ab614569ef39185
| 3,883
|
py
|
Python
|
dot_weechat/python/unhighlight.py
|
benmezger/new-dotfiles
|
5aa41015bd017d0e4cc39edf374ca73e8c25b8cb
|
[
"MIT"
] | 68
|
2016-09-28T12:51:20.000Z
|
2022-02-25T15:33:16.000Z
|
dot_weechat/python/unhighlight.py
|
benmezger/new-dotfiles
|
5aa41015bd017d0e4cc39edf374ca73e8c25b8cb
|
[
"MIT"
] | null | null | null |
dot_weechat/python/unhighlight.py
|
benmezger/new-dotfiles
|
5aa41015bd017d0e4cc39edf374ca73e8c25b8cb
|
[
"MIT"
] | 2
|
2016-09-28T12:51:28.000Z
|
2022-01-11T10:26:44.000Z
|
#
# Copyright (C) 2016 Andrew Rodgers-Schatz <me@andrew.rs>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
try:
import weechat
except Exception:
print('This script must be run under WeeChat.')
print('Get WeeChat now at: https://weechat.org/')
import_ok = False
import time
import re
SCRIPT_NAME = 'unhighlight'
SCRIPT_AUTHOR = 'xiagu'
SCRIPT_VERSION = '0.1.3'
SCRIPT_LICENSE = 'GPL3'
SCRIPT_DESC = 'Allows per-buffer specification of a regex that prevents highlights.'
def matches_unhighlight_strings(msg, regex):
return weechat.string_has_highlight_regex(msg, regex)
def unhighlight_cb(data, modifier, modifier_data, message):
"""Check if the line matches the unhighlight regular expression, and if it does, clear the message and reprint it with the no_highlight tag added."""
if modifier_data.startswith('0x'):
# WeeChat >= 2.9
buffer, tags = modifier_data.split(';', 1)
else:
# WeeChat <= 2.8
plugin, buffer_name, tags = modifier_data.split(';', 2)
buffer = weechat.buffer_search(plugin, buffer_name)
if 'no_highlight' in tags or 'notify_none' in tags:
return message
unhighlight_regex = weechat.buffer_get_string(buffer, 'localvar_unhighlight_regex')
if not matches_unhighlight_strings(message, unhighlight_regex):
return message
# inspired by https://weechat.org/scripts/source/mass_hl_blocker.pl.html/
# this is terrible and gross but afaik there is no way to change the
# highlight message once it's set and no way to interact with a message's
# tags before highlights are checked.
weechat.prnt_date_tags(buffer, 0, "%s,no_highlight" % tags, message)
return ''
def command_cb(data, buffer, args):
args = args.strip().lower().split(' ')
if args[0] == 'list':
weechat.command('', '/set *.localvar_set_unhighlight_regex')
else:
weechat.command('', '/help %s' % SCRIPT_NAME)
return weechat.WEECHAT_RC_OK
def main():
hook = weechat.hook_modifier('weechat_print', 'unhighlight_cb', '')
description = """
{script_name} lets you set up a regex for things to never highlight.
To use this, set the localvar 'unhighlight_regex' on a buffer. Lines in
that buffer which match will never be highlighted, even if they have
your nick or match highlight_words or highlight_regex.
You will need the script 'buffer_autoset.py' installed to make local
variables persistent; see the examples below.
Examples:
Temporarily block highlights in the current buffer for lines matching 'banana':
/buffer set localvar_set_unhighlight_regex banana
Unhighlight SASL authentication messages for double logins:
/buffer weechat
/buffer set localvar_set_unhighlight_regex SaslServ
/buffer_autoset add core.weechat localvar_set_unhighlight_regex SaslServ
List buffers with autoset unhighlights:
/{script_name} list
Show this help:
/{script_name}
Display local variables for current buffer:
/buffer localvar
""".format(script_name = SCRIPT_NAME)
weechat.hook_command(SCRIPT_NAME, SCRIPT_DESC, 'list', description, 'list %-', 'command_cb', '')
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
main()
| 35.3
| 153
| 0.733969
|
99e52c94b940ce17fd297684c6337b12ad2027f6
| 290
|
py
|
Python
|
tests/node/test_serialization.py
|
Vernacular-ai/lute
|
b943c441d7fce6f7431eb413e13577260276b469
|
[
"MIT"
] | 1
|
2021-06-27T01:54:36.000Z
|
2021-06-27T01:54:36.000Z
|
tests/node/test_serialization.py
|
Vernacular-ai/lute
|
b943c441d7fce6f7431eb413e13577260276b469
|
[
"MIT"
] | null | null | null |
tests/node/test_serialization.py
|
Vernacular-ai/lute
|
b943c441d7fce6f7431eb413e13577260276b469
|
[
"MIT"
] | 1
|
2021-06-27T02:12:34.000Z
|
2021-06-27T02:12:34.000Z
|
import json
from lute.node import Constant
def test_serialization():
class Dummy:
pass
c = Constant({1: Dummy()})
c.value
try:
json.dumps(Dummy())
except TypeError as e:
message = str(e)
assert json.loads(c.dumps())["value"] == message
| 15.263158
| 52
| 0.589655
|
1c19905c43ffd32faac78173baf9d4a7c0d79cc5
| 3,146
|
py
|
Python
|
Works/L4_BabyNames/milestone1.py
|
jackchienchen/StanCode-SC101
|
7a5b9256b128e58482ca37d8f5ab76483be971be
|
[
"MIT"
] | 2
|
2022-01-26T10:18:23.000Z
|
2022-01-26T10:18:24.000Z
|
Works/L4_BabyNames/milestone1.py
|
jackchienchen/StanCode-SC101
|
7a5b9256b128e58482ca37d8f5ab76483be971be
|
[
"MIT"
] | null | null | null |
Works/L4_BabyNames/milestone1.py
|
jackchienchen/StanCode-SC101
|
7a5b9256b128e58482ca37d8f5ab76483be971be
|
[
"MIT"
] | null | null | null |
"""
File: milestone1.py
Name: Jack Chen
-----------------------
This file tests the milestone 1 for
our babyname.py project
"""
import sys
def add_data_for_name(name_data, year, rank, name):
"""
Adds the given year and rank to the associated name in the name_data dict.
Input:
name_data (dict): dict holding baby name data
year (str): the year of the data entry to add
rank (str): the rank of the data entry to add
name (str): the name of the data entry to add
Output:
This function modifies the name_data dict to store the provided
name, year, and rank. This function does not return any value.
"""
if name in name_data: # if the input name is already in the name_data
year_d = name_data[name]
if year in year_d: # if the input year is already in year_d. Then return the higher rank.
if int(rank) < int(year_d[year]):
year_d[year] = rank
else:
pass
else:
year_d[year] = rank
else: # if the input name is NOT in the name_data
year_d = {year: rank}
name_data[name] = year_d # 不能使用name_data = {name: year_d} 因name_data非第一筆資料
# ------------- DO NOT EDIT THE CODE BELOW THIS LINE ---------------- #
def test1():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
print('--------------------test1----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test2():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test2----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test3():
name_data = {'Kylie': {'2010': '57'}, 'Sammy': {'1980': '451', '1990': '200'}, 'Kate': {'2000': '100'}}
add_data_for_name(name_data, '1990', '900', 'Sammy')
add_data_for_name(name_data, '2010', '400', 'Kylie')
add_data_for_name(name_data, '2000', '20', 'Kate')
print('-------------------test3-----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test4():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
add_data_for_name(name_data, '2000', '108', 'Kate')
add_data_for_name(name_data, '1990', '200', 'Sammy')
add_data_for_name(name_data, '1990', '90', 'Sammy')
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test4----------------------')
print(str(name_data))
print('-----------------------------------------------')
def main():
args = sys.argv[1:]
if len(args) == 1 and args[0] == 'test1':
test1()
elif len(args) == 1 and args[0] == 'test2':
test2()
elif len(args) == 1 and args[0] == 'test3':
test3()
elif len(args) == 1 and args[0] == 'test4':
test4()
if __name__ == "__main__":
main()
| 32.43299
| 107
| 0.511443
|
1cbdd1d75a0efccd90f6ce4900364c4221310479
| 1,733
|
py
|
Python
|
Incident-Response/Tools/cyphon/cyphon/monitors/views.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/cyphon/cyphon/monitors/views.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/cyphon/cyphon/monitors/views.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Views for Monitors.
"""
# third party
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import list_route
# local
from .models import Monitor
from .serializers import MonitorSerializer
class MonitorViewSet(viewsets.ReadOnlyModelViewSet):
"""
Read only viewset for Monitors.
"""
queryset = Monitor.objects.all()
serializer_class = MonitorSerializer
@list_route(methods=['get'], url_path='enabled')
def enabled(self, request, *args, **kwargs):
"""
Returns a list of Monitors that are enabled.
"""
enabled_qs = Monitor.objects.find_enabled()
filtered_qs = self.filter_queryset(enabled_qs)
page = self.paginate_queryset(filtered_qs)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(filtered_qs, many=True)
return Response(serializer.data)
| 32.092593
| 71
| 0.722447
|
4df2692e155faba8efad711cd87cac362e45ff36
| 12,813
|
py
|
Python
|
test/transaction/test_canoser.py
|
Xing-Huang/libra-client
|
bf74bc66b98a279476d751b637b1f84da84a51fe
|
[
"MIT"
] | null | null | null |
test/transaction/test_canoser.py
|
Xing-Huang/libra-client
|
bf74bc66b98a279476d751b637b1f84da84a51fe
|
[
"MIT"
] | null | null | null |
test/transaction/test_canoser.py
|
Xing-Huang/libra-client
|
bf74bc66b98a279476d751b637b1f84da84a51fe
|
[
"MIT"
] | null | null | null |
from libra.transaction import *
from libra.access_path import AccessPath
from canoser import *
#import pdb
def test_access_path_canonical_serialization_example():
account_address = [
0x9a, 0x1a, 0xd0, 0x97, 0x42, 0xd1, 0xff, 0xc6, 0x2e, 0x65, 0x9e, 0x9a, 0x77, 0x97, 0x80,
0x8b, 0x20, 0x6f, 0x95, 0x6f, 0x13, 0x1d, 0x07, 0x50, 0x94, 0x49, 0xc0, 0x1a, 0xd8, 0x22,
0x0a, 0xd4,
]
input = AccessPath(
account_address,
[
0x01, 0x21, 0x7d, 0xa6, 0xc6, 0xb3, 0xe1, 0x9f, 0x18, 0x25, 0xcf, 0xb2, 0x67, 0x6d,
0xae, 0xcc, 0xe3, 0xbf, 0x3d, 0xe0, 0x3c, 0xf2, 0x66, 0x47, 0xc7, 0x8d, 0xf0, 0x0b,
0x37, 0x1b, 0x25, 0xcc, 0x97,
],
)
expected_output = [
0x9A, 0x1A, 0xD0, 0x97, 0x42, 0xD1, 0xFF, 0xC6, 0x2E, 0x65, 0x9E,
0x9A, 0x77, 0x97, 0x80, 0x8B, 0x20, 0x6F, 0x95, 0x6F, 0x13, 0x1D, 0x07, 0x50, 0x94, 0x49,
0xC0, 0x1A, 0xD8, 0x22, 0x0A, 0xD4, 0x21, 0x00, 0x00, 0x00, 0x01, 0x21, 0x7D, 0xA6, 0xC6,
0xB3, 0xE1, 0x9F, 0x18, 0x25, 0xCF, 0xB2, 0x67, 0x6D, 0xAE, 0xCC, 0xE3, 0xBF, 0x3D, 0xE0,
0x3C, 0xF2, 0x66, 0x47, 0xC7, 0x8D, 0xF0, 0x0B, 0x37, 0x1B, 0x25, 0xCC, 0x97,
]
actual_output = input.serialize()
assert bytes(expected_output) == actual_output
def test_account_address_canonical_serialization_example():
input = [
0xca, 0x82, 0x0b, 0xf9, 0x30, 0x5e, 0xb9, 0x7d, 0x0d, 0x78, 0x4f, 0x71, 0xb3, 0x95, 0x54,
0x57, 0xfb, 0xf6, 0x91, 0x1f, 0x53, 0x00, 0xce, 0xaa, 0x5d, 0x7e, 0x86, 0x21, 0x52, 0x9e,
0xae, 0x19,
]
expected_output = [
0xCA, 0x82, 0x0B, 0xF9, 0x30, 0x5E, 0xB9, 0x7D, 0x0D, 0x78, 0x4F,
0x71, 0xB3, 0x95, 0x54, 0x57, 0xFB, 0xF6, 0x91, 0x1F, 0x53, 0x00, 0xCE, 0xAA, 0x5D, 0x7E,
0x86, 0x21, 0x52, 0x9E, 0xAE, 0x19,
]
actual_output = ArrayT(Uint8, 32, False).encode(input)
assert bytes(expected_output) == actual_output
def test_program_canonical_serialization_example():
input = get_common_program()
expected_output = [
0x04, 0x00, 0x00, 0x00, 0x6D, 0x6F, 0x76, 0x65, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00, 0x09, 0x00, 0x00, 0x00, 0x43, 0x41, 0x46, 0x45, 0x20, 0x44, 0x30, 0x30, 0x44, 0x02,
0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x63, 0x61, 0x66, 0x65, 0x20, 0x64, 0x30, 0x30,
0x64,
]
actual_output = input.serialize()
assert bytes(expected_output) == actual_output
def test_raw_transaction_with_a_program_canonical_serialization_example():
input = RawTransaction(
[
0x3a, 0x24, 0xa6, 0x1e, 0x05, 0xd1, 0x29, 0xca, 0xce, 0x9e, 0x0e, 0xfc, 0x8b, 0xc9,
0xe3, 0x38, 0x31, 0xfe, 0xc9, 0xa9, 0xbe, 0x66, 0xf5, 0x0f, 0xd3, 0x52, 0xa2, 0x63,
0x8a, 0x49, 0xb9, 0xee,
],
32,
TransactionPayload('Script', get_common_program()),
10000,
20000,
86400,
)
expected_output = [
0x3A, 0x24, 0xA6, 0x1E, 0x05, 0xD1, 0x29, 0xCA, 0xCE, 0x9E, 0x0E,
0xFC, 0x8B, 0xC9, 0xE3, 0x38, 0x31, 0xFE, 0xC9, 0xA9, 0xBE, 0x66, 0xF5, 0x0F, 0xD3, 0x52,
0xA2, 0x63, 0x8A, 0x49, 0xB9, 0xEE, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x6F, 0x76, 0x65, 0x02, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x43, 0x41, 0x46, 0x45, 0x20, 0x44, 0x30,
0x30, 0x44, 0x02, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x63, 0x61, 0x66, 0x65, 0x20,
0x64, 0x30, 0x30, 0x64, 0x10, 0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x4E, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x51, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
]
actual_output = input.serialize()
assert bytes(expected_output) == actual_output
def test_raw_transaction_with_a_write_set_canonical_serialization_example():
input = RawTransaction.new_write_set_tx(
[
0xc3, 0x39, 0x8a, 0x59, 0x9a, 0x6f, 0x3b, 0x9f, 0x30, 0xb6, 0x35, 0xaf, 0x29, 0xf2,
0xba, 0x04, 0x6d, 0x3a, 0x75, 0x2c, 0x26, 0xe9, 0xd0, 0x64, 0x7b, 0x96, 0x47, 0xd1,
0xf4, 0xc0, 0x4a, 0xd4,
],
32,
ChangeSet(get_common_write_set(), [])
)
#pdb.set_trace()
expected_output = [
0xC3, 0x39, 0x8A, 0x59, 0x9A, 0x6F, 0x3B, 0x9F, 0x30, 0xB6, 0x35,
0xAF, 0x29, 0xF2, 0xBA, 0x04, 0x6D, 0x3A, 0x75, 0x2C, 0x26, 0xE9, 0xD0, 0x64, 0x7B, 0x96,
0x47, 0xD1, 0xF4, 0xC0, 0x4A, 0xD4, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0xA7, 0x1D, 0x76, 0xFA,
0xA2, 0xD2, 0xD5, 0xC3, 0x22, 0x4E, 0xC3, 0xD4, 0x1D, 0xEB, 0x29, 0x39, 0x73, 0x56, 0x4A,
0x79, 0x1E, 0x55, 0xC6, 0x78, 0x2B, 0xA7, 0x6C, 0x2B, 0xF0, 0x49, 0x5F, 0x9A, 0x21, 0x00,
0x00, 0x00, 0x01, 0x21, 0x7D, 0xA6, 0xC6, 0xB3, 0xE1, 0x9F, 0x18, 0x25, 0xCF, 0xB2, 0x67,
0x6D, 0xAE, 0xCC, 0xE3, 0xBF, 0x3D, 0xE0, 0x3C, 0xF2, 0x66, 0x47, 0xC7, 0x8D, 0xF0, 0x0B,
0x37, 0x1B, 0x25, 0xCC, 0x97, 0x00, 0x00, 0x00, 0x00, 0xC4, 0xC6,
0x3F, 0x80, 0xC7, 0x4B, 0x11, 0x26, 0x3E, 0x42, 0x1E, 0xBF, 0x84, 0x86, 0xA4, 0xE3, 0x98,
0xD0, 0xDB, 0xC0, 0x9F, 0xA7, 0xD4, 0xF6, 0x2C, 0xCD, 0xB3, 0x09, 0xF3, 0xAE, 0xA8, 0x1F,
0x09, 0x00, 0x00, 0x00, 0x01, 0x21, 0x7D, 0xA6, 0xC6, 0xB3, 0xE1, 0x9F, 0x18, 0x01, 0x00,
0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xCA, 0xFE, 0xD0, 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
]
actual_output = input.serialize()
assert bytes(expected_output) == actual_output
def test_transaction_argument_address_canonical_serialization_example():
input = TransactionArgument('Address', [
0x2c, 0x25, 0x99, 0x17, 0x85, 0x34, 0x3b, 0x23, 0xae, 0x07, 0x3a, 0x50, 0xe5, 0xfd, 0x80,
0x9a, 0x2c, 0xd8, 0x67, 0x52, 0x6b, 0x3c, 0x1d, 0xb2, 0xb0, 0xbf, 0x5d, 0x19, 0x24, 0xc6,
0x93, 0xed,
])
expected_output= [
0x01, 0x00, 0x00, 0x00, 0x2C, 0x25, 0x99, 0x17, 0x85, 0x34, 0x3B,
0x23, 0xAE, 0x07, 0x3A, 0x50, 0xE5, 0xFD, 0x80, 0x9A, 0x2C, 0xD8, 0x67, 0x52, 0x6B, 0x3C,
0x1D, 0xB2, 0xB0, 0xBF, 0x5D, 0x19, 0x24, 0xC6, 0x93, 0xED,
]
actual_output = TransactionArgument.encode(input)
assert bytes(expected_output) == actual_output
def test_transaction_argument_byte_array_canonical_serialization_example():
input = TransactionArgument('ByteArray', [0xCA, 0xFE, 0xD0, 0x0D])
expected_output = [
0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xCA, 0xFE, 0xD0, 0x0D,
]
actual_output = TransactionArgument.encode(input)
assert bytes(expected_output) == actual_output
def test_transaction_argument_string_canonical_serialization_example():
input = TransactionArgument('String', "Hello, World!")
expected_output = [
0x02, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x2C, 0x20,
0x57, 0x6F, 0x72, 0x6C, 0x64, 0x21,
]
actual_output = TransactionArgument.encode(input)
assert bytes(expected_output) == actual_output
def test_transaction_argument_u64_canonical_serialization_example():
input = TransactionArgument('U64', 9_213_671_392_124_193_148)
expected_output = [
0x00, 0x00, 0x00, 0x00, 0x7C, 0xC9, 0xBD, 0xA4, 0x50, 0x89, 0xDD, 0x7F,
]
actual_output = TransactionArgument.encode(input)
assert bytes(expected_output) == actual_output
def test_transaction_payload_with_a_program_canonical_serialization_example():
input = TransactionPayload('Script', get_common_program())
expected_output = [
0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x6F, 0x76, 0x65, 0x02, 0x00, 0x00,
0x00, 0x02, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x43, 0x41, 0x46, 0x45, 0x20, 0x44,
0x30, 0x30, 0x44, 0x02, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x63, 0x61, 0x66, 0x65,
0x20, 0x64, 0x30, 0x30, 0x64,
]
actual_output = TransactionPayload.encode(input)
assert bytes(expected_output) == actual_output
def test_transaction_payload_with_a_write_set_canonical_serialization_example():
input = TransactionPayload('WriteSet', ChangeSet(get_common_write_set(), []))
expected_output = [
0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0xA7, 0x1D, 0x76,
0xFA, 0xA2, 0xD2, 0xD5, 0xC3, 0x22, 0x4E, 0xC3, 0xD4, 0x1D, 0xEB, 0x29, 0x39, 0x73, 0x56,
0x4A, 0x79, 0x1E, 0x55, 0xC6, 0x78, 0x2B, 0xA7, 0x6C, 0x2B, 0xF0, 0x49, 0x5F, 0x9A, 0x21,
0x00, 0x00, 0x00, 0x01, 0x21, 0x7D, 0xA6, 0xC6, 0xB3, 0xE1, 0x9F, 0x18, 0x25, 0xCF, 0xB2,
0x67, 0x6D, 0xAE, 0xCC, 0xE3, 0xBF, 0x3D, 0xE0, 0x3C, 0xF2, 0x66, 0x47, 0xC7, 0x8D, 0xF0,
0x0B, 0x37, 0x1B, 0x25, 0xCC, 0x97, 0x00, 0x00, 0x00, 0x00, 0xC4,
0xC6, 0x3F, 0x80, 0xC7, 0x4B, 0x11, 0x26, 0x3E, 0x42, 0x1E, 0xBF, 0x84, 0x86, 0xA4, 0xE3,
0x98, 0xD0, 0xDB, 0xC0, 0x9F, 0xA7, 0xD4, 0xF6, 0x2C, 0xCD, 0xB3, 0x09, 0xF3, 0xAE, 0xA8,
0x1F, 0x09, 0x00, 0x00, 0x00, 0x01, 0x21, 0x7D, 0xA6, 0xC6, 0xB3, 0xE1, 0x9F, 0x18, 0x01,
0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xCA, 0xFE, 0xD0, 0x0D, 0x00, 0x00, 0x00, 0x00,
]
actual_output = TransactionPayload.encode(input)
assert bytes(expected_output) == actual_output
def test_write_op_delete_canonical_serialization_example():
input = WriteOp('Deletion')
expected_output = [0x00, 0x00, 0x00, 0x00]
actual_output = WriteOp.encode(input)
assert bytes(expected_output) == actual_output
def test_write_op_value_canonical_serialization_example():
input = WriteOp('Value', [0xca, 0xfe, 0xd0, 0x0d])
expected_output = [
0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xCA, 0xFE, 0xD0, 0x0D,
]
actual_output = WriteOp.encode(input)
assert bytes(expected_output) == actual_output
def test_write_set_canonical_serialization_example():
input = get_common_write_set()
expected_output = [
0x02, 0x00, 0x00, 0x00, 0xA7, 0x1D, 0x76, 0xFA, 0xA2, 0xD2, 0xD5,
0xC3, 0x22, 0x4E, 0xC3, 0xD4, 0x1D, 0xEB, 0x29, 0x39, 0x73, 0x56, 0x4A, 0x79, 0x1E, 0x55,
0xC6, 0x78, 0x2B, 0xA7, 0x6C, 0x2B, 0xF0, 0x49, 0x5F, 0x9A, 0x21, 0x00, 0x00, 0x00, 0x01,
0x21, 0x7D, 0xA6, 0xC6, 0xB3, 0xE1, 0x9F, 0x18, 0x25, 0xCF, 0xB2, 0x67, 0x6D, 0xAE, 0xCC,
0xE3, 0xBF, 0x3D, 0xE0, 0x3C, 0xF2, 0x66, 0x47, 0xC7, 0x8D, 0xF0, 0x0B, 0x37, 0x1B, 0x25,
0xCC, 0x97, 0x00, 0x00, 0x00, 0x00, 0xC4, 0xC6, 0x3F, 0x80, 0xC7,
0x4B, 0x11, 0x26, 0x3E, 0x42, 0x1E, 0xBF, 0x84, 0x86, 0xA4, 0xE3, 0x98, 0xD0, 0xDB, 0xC0,
0x9F, 0xA7, 0xD4, 0xF6, 0x2C, 0xCD, 0xB3, 0x09, 0xF3, 0xAE, 0xA8, 0x1F, 0x09, 0x00, 0x00,
0x00, 0x01, 0x21, 0x7D, 0xA6, 0xC6, 0xB3, 0xE1, 0x9F, 0x18, 0x01, 0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0xCA, 0xFE, 0xD0, 0x0D,
]
actual_output = WriteSet.encode(input)
assert bytes(expected_output) == actual_output
assert WriteSet.encode(input) == input.serialize()
def get_common_program():
return Script(
list(b"move"),
[
TransactionArgument('String', "CAFE D00D"),
TransactionArgument('String', "cafe d00d")
]
)
def get_common_write_set():
return WriteSet([
(
AccessPath(
[
0xa7, 0x1d, 0x76, 0xfa, 0xa2, 0xd2, 0xd5, 0xc3, 0x22, 0x4e, 0xc3, 0xd4, 0x1d,
0xeb, 0x29, 0x39, 0x73, 0x56, 0x4a, 0x79, 0x1e, 0x55, 0xc6, 0x78, 0x2b, 0xa7,
0x6c, 0x2b, 0xf0, 0x49, 0x5f, 0x9a,
],
[
0x01, 0x21, 0x7D, 0xA6, 0xC6, 0xB3, 0xE1, 0x9F, 0x18, 0x25, 0xCF, 0xB2, 0x67,
0x6D, 0xAE, 0xCC, 0xE3, 0xBF, 0x3D, 0xE0, 0x3C, 0xF2, 0x66, 0x47, 0xC7, 0x8D,
0xF0, 0x0B, 0x37, 0x1B, 0x25, 0xCC, 0x97
]
),
WriteOp('Deletion')
),
(
AccessPath(
[
0xc4, 0xc6, 0x3f, 0x80, 0xc7, 0x4b, 0x11, 0x26, 0x3e, 0x42, 0x1e, 0xbf, 0x84,
0x86, 0xa4, 0xe3, 0x98, 0xd0, 0xdb, 0xc0, 0x9f, 0xa7, 0xd4, 0xf6, 0x2c, 0xcd,
0xb3, 0x09, 0xf3, 0xae, 0xa8, 0x1f,
],
[0x01, 0x21, 0x7d, 0xa6, 0xc6, 0xb3, 0xe1, 0x9f, 0x18],
),
WriteOp('Value', [0xca, 0xfe, 0xd0, 0x0d])
)
])
| 48.350943
| 98
| 0.609225
|
aef9edf9444ef7915b8251bf21fbfde9b9ce9770
| 3,421
|
py
|
Python
|
neural_networks/discrete_soft_actor_critic.py
|
FedePeralta/ASVs_Deep_Reinforcement_Learning_with_CNNs
|
23b9b181499a4b06f2ca2951c002359c1959e727
|
[
"MIT"
] | 4
|
2021-03-22T12:42:55.000Z
|
2021-12-13T03:03:52.000Z
|
neural_networks/discrete_soft_actor_critic.py
|
FedePeralta/ASVs_Deep_Reinforcement_Learning_with_CNNs
|
23b9b181499a4b06f2ca2951c002359c1959e727
|
[
"MIT"
] | null | null | null |
neural_networks/discrete_soft_actor_critic.py
|
FedePeralta/ASVs_Deep_Reinforcement_Learning_with_CNNs
|
23b9b181499a4b06f2ca2951c002359c1959e727
|
[
"MIT"
] | 1
|
2021-03-22T12:48:21.000Z
|
2021-03-22T12:48:21.000Z
|
from abc import ABC
import torch as T
import torch.nn as nn
import torch.nn.functional as F
class Convolutional_ActorNetwork(nn.Module, ABC):
"""
Convolutional Neural Network for the actor.
The Output corresponds with a Softmax layer representing
the probability of select an action a -> P(a|s) = pi(s,action = a)
"""
def __init__(self, input_size, action_size):
super(Convolutional_ActorNetwork, self).__init__()
""" Convolutional DNN """
self.conv1 = nn.Conv2d(input_size[0], 16, 5)
self.conv2 = nn.Conv2d(16, 16, 3)
x_test = T.zeros(1, input_size[0], input_size[1], input_size[2]).float()
fc_input_size = self.size_of_conv_out(x_test)
""" Fully-connected DNN - Dense """
self.fc1 = nn.Linear(fc_input_size, 255)
self.fc2 = nn.Linear(255, 255)
self.fc3 = nn.Linear(255, 255)
self.f_out = nn.Linear(255, action_size) # The actor return a mu and std for every possible action #
def forward(self, x):
""" Forward function. """
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = T.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
P = F.softmax(self.f_out(x), dim = 1) # In the discrete actor-critic, the actor output is softmax
return P
def size_of_conv_out(self, x):
"""
Function to extract the output size of the convolutional network.
:param x: Input of the convolutional network
:return: Integer with the size of the input of the next layer (FC)
"""
x = self.conv1(x)
x = self.conv2(x)
x = T.flatten(x, start_dim=1)
return x.shape[1]
class Convolutional_CriticNetwork(nn.Module, ABC):
"""
Convolutional Neural Network for the Critic Q(s,a).
The Output corresponds with the Q values representing
the state-action discounted values.
"""
def __init__(self, input_size, action_size):
super(Convolutional_CriticNetwork, self).__init__()
""" First Convolutional part - The state is processed here"""
""" Convolutional DNN """
self.conv1 = nn.Conv2d(input_size[0], 16, 5)
self.conv2 = nn.Conv2d(16, 16, 3)
x_test = T.zeros(1, input_size[0], input_size[1], input_size[2]).float()
fc_input_size = self.size_of_conv_out(x_test)
""" Fully-connected DNN - Dense """
self.fc1 = nn.Linear(fc_input_size, 255)
self.fc2 = nn.Linear(255, 255)
self.fc3 = nn.Linear(255, 255)
self.f_out = nn.Linear(255, action_size) # The actor return a mu and std for every possible action #
def size_of_conv_out(self, x):
"""
Function to extract the output size of the convolutional network.
:param x: Input of the convolutional network
:return: Integer with the size of the input of the next layer (FC)
"""
x = self.conv1(x)
x = self.conv2(x)
x = T.flatten(x, start_dim=1)
return x.shape[1]
def forward(self, state):
""" Forward function. """
x = F.relu(self.conv1(state))
x = F.relu(self.conv2(x))
x = T.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
Q = self.f_out(x)
return Q
| 29.491379
| 109
| 0.602163
|
a57e66e4cb223af8552dd8db599986960e14c875
| 1,064
|
py
|
Python
|
cart/views.py
|
dipikamarathe/project2
|
78f5ecf4dcd568ab82436ab87ec64e0676039aab
|
[
"MIT"
] | null | null | null |
cart/views.py
|
dipikamarathe/project2
|
78f5ecf4dcd568ab82436ab87ec64e0676039aab
|
[
"MIT"
] | 28
|
2020-10-26T16:51:38.000Z
|
2022-01-13T03:32:54.000Z
|
cart/views.py
|
dipikamarathe/project2
|
78f5ecf4dcd568ab82436ab87ec64e0676039aab
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from shop.models import Product
from .cart import Cart
from .forms import CartAddProductForm
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product,
quantity=cd['quantity'],
override_quantity=cd['override'])
return redirect('cart:cart_detail')
@require_POST
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
cart = Cart(request)
for item in cart:
item['update_quantity_form'] = CartAddProductForm(initial={'quantity': item['quantity'], 'override': True})
return render(request, 'cart/detail.html', {'cart': cart})
| 31.294118
| 115
| 0.711466
|
996d925dfe9d3a28280a747dac8ad15a742f43bc
| 1,248
|
py
|
Python
|
unit_test.py
|
gpwork4u/Facebooker
|
4a78c9575d5f36a402d7e489b69058d4e1692ce4
|
[
"MIT"
] | 26
|
2020-05-29T02:41:05.000Z
|
2022-03-25T17:27:32.000Z
|
unit_test.py
|
gpwork4u/Facebooker
|
4a78c9575d5f36a402d7e489b69058d4e1692ce4
|
[
"MIT"
] | 7
|
2020-05-28T06:09:22.000Z
|
2021-10-02T05:22:38.000Z
|
unit_test.py
|
gpwork4u/Facebooker
|
4a78c9575d5f36a402d7e489b69058d4e1692ce4
|
[
"MIT"
] | 9
|
2020-05-28T05:40:25.000Z
|
2022-02-13T21:48:01.000Z
|
from Facebooker import facebook
from test_constant import *
import unittest
class FBUnitTest(unittest.TestCase):
fb = facebook.API()
def __init__(self, *args):
self.fb.login(EMAIL,
PASSWORD)
super().__init__(*args)
def test_login(self):
self.assertTrue(self.fb.login_check)
def test_get_user_post_list(self):
post_generator = self.fb.get_user_post_list(TEST_USER_ID)
post_id = next(post_generator)
self.assertIsNotNone(post_id)
def test_get_post(self):
post_info = self.fb.get_post(TEST_POST_ID)
self.assertEqual(post_info.id, TEST_POST_ID)
self.assertEqual(post_info.author, TEST_POST_AUTHOR)
self.assertEqual(post_info.content, TEST_POST_CONTENT)
def test_get_comments(self):
comment = self.fb.get_comments(TEST_POST_ID)[-1]
self.assertIn(comment.id, TEST_COMMNENT_ID)
self.assertEqual(comment.content, TEST_COMMENT_CONTENT)
def test_get_replies(self):
reply = self.fb.get_replies(TEST_POST_ID, TEST_COMMNENT_ID)[-1]
self.assertEqual(reply.id, TEST_REPLY_ID)
self.assertEqual(reply.content, TEST_REPLY_CONTENT)
if __name__ == '__main__':
unittest.main()
| 30.439024
| 71
| 0.694712
|
91dca4f65187b544b704a30207422e1d0f0362f0
| 5,183
|
py
|
Python
|
kubernetes_asyncio/client/models/v1_resource_requirements.py
|
opsani/kubernetes_asyncio
|
55283bf6f3690e5c0a0c589cd752221511e2be51
|
[
"Apache-2.0"
] | 196
|
2018-05-23T16:55:41.000Z
|
2022-03-31T10:09:40.000Z
|
kubernetes_asyncio/client/models/v1_resource_requirements.py
|
tomplus/kubernetes_asyncio
|
e8c8686ec11be3a5295ae9d5d8728299492a61f8
|
[
"Apache-2.0"
] | 164
|
2018-05-20T20:39:03.000Z
|
2022-03-29T22:57:04.000Z
|
kubernetes_asyncio/client/models/v1_resource_requirements.py
|
opsani/kubernetes_asyncio
|
55283bf6f3690e5c0a0c589cd752221511e2be51
|
[
"Apache-2.0"
] | 41
|
2018-06-08T00:39:53.000Z
|
2022-01-12T18:19:06.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.18.20
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1ResourceRequirements(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'limits': 'dict(str, str)',
'requests': 'dict(str, str)'
}
attribute_map = {
'limits': 'limits',
'requests': 'requests'
}
def __init__(self, limits=None, requests=None, local_vars_configuration=None): # noqa: E501
"""V1ResourceRequirements - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._limits = None
self._requests = None
self.discriminator = None
if limits is not None:
self.limits = limits
if requests is not None:
self.requests = requests
@property
def limits(self):
"""Gets the limits of this V1ResourceRequirements. # noqa: E501
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ # noqa: E501
:return: The limits of this V1ResourceRequirements. # noqa: E501
:rtype: dict(str, str)
"""
return self._limits
@limits.setter
def limits(self, limits):
"""Sets the limits of this V1ResourceRequirements.
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ # noqa: E501
:param limits: The limits of this V1ResourceRequirements. # noqa: E501
:type: dict(str, str)
"""
self._limits = limits
@property
def requests(self):
"""Gets the requests of this V1ResourceRequirements. # noqa: E501
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ # noqa: E501
:return: The requests of this V1ResourceRequirements. # noqa: E501
:rtype: dict(str, str)
"""
return self._requests
@requests.setter
def requests(self, requests):
"""Sets the requests of this V1ResourceRequirements.
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ # noqa: E501
:param requests: The requests of this V1ResourceRequirements. # noqa: E501
:type: dict(str, str)
"""
self._requests = requests
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ResourceRequirements):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ResourceRequirements):
return True
return self.to_dict() != other.to_dict()
| 34.324503
| 328
| 0.628593
|
692cb5888e783753383b7aca2809910c23812fa1
| 6,122
|
py
|
Python
|
bcbio/variation/realign.py
|
arvados/bcbio-nextgen
|
2a5cfa8c3a1d540bb2f2e66f51835042195cbc87
|
[
"MIT"
] | 3
|
2015-11-18T07:17:54.000Z
|
2021-04-28T13:58:37.000Z
|
bcbio/variation/realign.py
|
yong27/bcbio-nextgen
|
9320479d8f21677b61ed1274b4da23d569c686ae
|
[
"MIT"
] | null | null | null |
bcbio/variation/realign.py
|
yong27/bcbio-nextgen
|
9320479d8f21677b61ed1274b4da23d569c686ae
|
[
"MIT"
] | null | null | null |
"""Perform realignment of BAM files around indels using the GATK toolkit.
"""
import os
import shutil
from contextlib import closing
import pysam
from bcbio import bam, broad
from bcbio.bam import ref
from bcbio.log import logger
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.pipeline.shared import subset_bam_by_region, subset_variant_regions
from bcbio.provenance import do
# ## GATK realignment
def gatk_realigner_targets(runner, align_bam, ref_file, config, dbsnp=None,
region=None, out_file=None, deep_coverage=False,
variant_regions=None):
"""Generate a list of interval regions for realignment around indels.
"""
if out_file:
out_file = "%s.intervals" % os.path.splitext(out_file)[0]
else:
out_file = "%s-realign.intervals" % os.path.splitext(align_bam)[0]
# check only for file existence; interval files can be empty after running
# on small chromosomes, so don't rerun in those cases
if not os.path.exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
logger.debug("GATK RealignerTargetCreator: %s %s" %
(os.path.basename(align_bam), region))
params = ["-T", "RealignerTargetCreator",
"-I", align_bam,
"-R", ref_file,
"-o", tx_out_file,
"-l", "INFO",
]
region = subset_variant_regions(variant_regions, region, tx_out_file)
if region:
params += ["-L", region, "--interval_set_rule", "INTERSECTION"]
if dbsnp:
params += ["--known", dbsnp]
if deep_coverage:
params += ["--mismatchFraction", "0.30",
"--maxIntervalSize", "650"]
runner.run_gatk(params, memscale={"direction": "decrease", "magnitude": 2})
return out_file
def gatk_indel_realignment_cl(runner, align_bam, ref_file, intervals,
tmp_dir, region=None, deep_coverage=False):
"""Prepare input arguments for GATK indel realignment.
"""
params = ["-T", "IndelRealigner",
"-I", align_bam,
"-R", ref_file,
"-targetIntervals", intervals,
]
if region:
params += ["-L", region]
if deep_coverage:
params += ["--maxReadsInMemory", "300000",
"--maxReadsForRealignment", str(int(5e5)),
"--maxReadsForConsensuses", "500",
"--maxConsensuses", "100"]
return runner.cl_gatk(params, tmp_dir)
def gatk_indel_realignment(runner, align_bam, ref_file, intervals,
region=None, out_file=None, deep_coverage=False,
config=None):
"""Perform realignment of BAM file in specified regions
"""
if out_file is None:
out_file = "%s-realign.bam" % os.path.splitext(align_bam)[0]
if not file_exists(out_file):
with tx_tmpdir(config) as tmp_dir:
with file_transaction(config, out_file) as tx_out_file:
logger.info("GATK IndelRealigner: %s %s" %
(os.path.basename(align_bam), region))
cl = gatk_indel_realignment_cl(runner, align_bam, ref_file, intervals,
tmp_dir, region, deep_coverage)
cl += ["-o", tx_out_file]
do.run(cl, "GATK indel realignment", {})
return out_file
def gatk_realigner(align_bam, ref_file, config, dbsnp=None, region=None,
out_file=None, deep_coverage=False):
"""Realign a BAM file around indels using GATK, returning sorted BAM.
"""
runner = broad.runner_from_config(config)
bam.index(align_bam, config)
runner.run_fn("picard_index_ref", ref_file)
ref.fasta_idx(ref_file)
if region:
align_bam = subset_bam_by_region(align_bam, region, config, out_file)
bam.index(align_bam, config)
if has_aligned_reads(align_bam, region):
variant_regions = config["algorithm"].get("variant_regions", None)
realign_target_file = gatk_realigner_targets(runner, align_bam,
ref_file, config, dbsnp, region,
out_file, deep_coverage,
variant_regions)
realign_bam = gatk_indel_realignment(runner, align_bam, ref_file,
realign_target_file, region,
out_file, deep_coverage, config=config)
# No longer required in recent GATK (> Feb 2011) -- now done on the fly
# realign_sort_bam = runner.run_fn("picard_fixmate", realign_bam)
return realign_bam
elif out_file:
shutil.copy(align_bam, out_file)
return out_file
else:
return align_bam
# ## Utilities
def has_aligned_reads(align_bam, region=None):
"""Check if the aligned BAM file has any reads in the region.
region can be a chromosome string ("chr22"),
a tuple region (("chr22", 1, 100)) or a file of regions.
"""
import pybedtools
if region is not None:
if isinstance(region, basestring) and os.path.isfile(region):
regions = [tuple(r) for r in pybedtools.BedTool(region)]
else:
regions = [region]
with closing(pysam.Samfile(align_bam, "rb")) as cur_bam:
if region is not None:
for region in regions:
if isinstance(region, basestring):
for item in cur_bam.fetch(region):
return True
else:
for item in cur_bam.fetch(region[0], int(region[1]), int(region[2])):
return True
else:
for item in cur_bam:
if not item.is_unmapped:
return True
return False
| 42.513889
| 89
| 0.579222
|
9a5101c7e3e60bc47a68ef24e9d88e27c1456dc2
| 6,999
|
py
|
Python
|
pos_tagging/probe_train4.py
|
ecacikgoz97/Probing
|
5df8f9fedeffdd2c6f9328b6ff47e36adca49dbb
|
[
"MIT"
] | null | null | null |
pos_tagging/probe_train4.py
|
ecacikgoz97/Probing
|
5df8f9fedeffdd2c6f9328b6ff47e36adca49dbb
|
[
"MIT"
] | null | null | null |
pos_tagging/probe_train4.py
|
ecacikgoz97/Probing
|
5df8f9fedeffdd2c6f9328b6ff47e36adca49dbb
|
[
"MIT"
] | null | null | null |
# -----------------------------------------------------------
# Date: 2021/12/19
# Author: Muge Kural
# Description: Trainer of surface form pos tagging probe, saves the results under ./results directory.
# -----------------------------------------------------------
import sys, argparse, random, torch, json, matplotlib, os
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR
from torch import optim
from common.utils import *
from data.data import build_data, log_data
from models.gpt3 import GPT3
from common.vocab import VocabEntry
from probe import MiniGPT_Probe, MiniGPT_Probe2
matplotlib.use('Agg')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
local_set = False
if local_set == True:
working_path = "/Users/emrecanacikgoz/Desktop/"
else:
working_path = "/kuacc/users/eacikgoz17/"
def test(batches, mode, args):
epoch_loss = 0; epoch_acc = 0; epoch_num_instances = 0
numbatches = len(batches)
indices = list(range(numbatches))
for i, idx in enumerate(indices):
# (batchsize, t)
surf, surfpos = batches[idx]
loss, acc = args.model.probe_loss(surf, surfpos)
epoch_num_instances += surf.size(0)
epoch_loss += loss.item()
epoch_acc += acc
nll = epoch_loss / numbatches
acc = epoch_acc / epoch_num_instances
args.logger.write('%s --- avg_loss: %.4f, acc: %.4f \n' % (mode, nll, acc))
return nll, acc
def train(data, args):
trnbatches, valbatches, tstbatches = data
opt = optim.Adam(filter(lambda p: p.requires_grad, args.model.parameters()), lr=args.lr)
scheduler = MultiStepLR(opt, milestones=[80,150,250,400], gamma=0.1)
for name, prm in args.model.named_parameters():
args.logger.write('\n'+name+', '+str(prm.shape) + ': '+ str(prm.requires_grad))
numbatches = len(trnbatches)
indices = list(range(numbatches))
random.seed(0)
best_loss = 1e4
trn_loss_values = []; trn_acc_values = []
val_loss_values = []; val_acc_values = []
for epc in range(args.epochs):
epoch_loss = 0; epoch_acc = 0; epoch_num_instances = 0
random.shuffle(indices) # this breaks continuity if there is
for i, idx in enumerate(indices):
args.model.zero_grad()
# (batchsize, t)
surf, surfpos = trnbatches[idx]
loss, acc = args.model.probe_loss(surf, surfpos)
loss.backward()
opt.step()
epoch_num_instances += surf.size(0)
epoch_loss += loss.item()
epoch_acc += acc
nll = epoch_loss / numbatches
acc = epoch_acc / epoch_num_instances
trn_loss_values.append(nll)
trn_acc_values.append(acc)
args.logger.write('\nepoch: %.1d avg_loss: %.4f, acc: %.4f \n' % (epc, nll, acc))
# VAL
args.model.eval()
with torch.no_grad():
nll, acc = test(valbatches, "val", args)
val_loss_values.append(nll)
val_acc_values.append(acc)
scheduler.step()
if nll < best_loss:
args.logger.write('update best loss \n')
best_loss = nll
torch.save(args.model.state_dict(), args.save_path)
args.model.train()
plot_curves(args.task, args.mname, args.fig, args.axs[0], trn_loss_values, val_loss_values, args.plt_style, 'loss')
plot_curves(args.task, args.mname, args.fig, args.axs[1], trn_acc_values, val_acc_values, args.plt_style, 'acc')
# CONFIG
parser = argparse.ArgumentParser(description='')
args = parser.parse_args()
args.device = device
args.mname = 'MiniGPT_3_500epochs_lr0001_batch32_schedulerStep'
model_path = working_path + 'NLP/EXPERIMENTS/exp14/charlm_miniGPT/results/50000_instances500epochs.pt'
model_vocab = working_path + 'NLP/EXPERIMENTS/exp14/charlm_miniGPT/results/surf_vocab.json'
# training
args.batchsize = 32; args.epochs = 500
args.opt= 'Adam'; args.lr = 0.001
args.task = 'surf2surfpos'
args.seq_to_no_pad = 'surface'
# data
with open(model_vocab) as f:
word2id = json.load(f)
surf_vocab = VocabEntry(word2id)
args.trndata = working_path + 'NLP/Probing/pos_tagging/data/surfpos.uniquesurfs.trn.txt'
args.valdata = working_path + 'NLP/Probing/pos_tagging/data/surfpos.uniquesurfs.val.txt'
args.tstdata = working_path + 'NLP/Probing/pos_tagging/data/surfpos.uniquesurfs.val.txt'
args.maxtrnsize = 57769; args.maxvalsize = 10000; args.maxtstsize = 10000
rawdata, batches, vocab = build_data(args, surf_vocab)
_, surfpos_vocab = vocab
trndata, vlddata, tstdata = rawdata
args.trnsize , args.valsize, args.tstsize = len(trndata), len(vlddata), len(tstdata)
# model
num_layers=3
embed_dim=128
num_heads=16
block_size=128
embedding_dropout_rate=0.15
attention_dropout_rate=0.15
residual_dropout_rate=0.15
expand_ratio = 4
args.pretrained_model = GPT3(vocab=surf_vocab,
num_layers=num_layers,
embed_dim=embed_dim,
num_heads=num_heads,
block_size=block_size,
embedding_dropout_rate=embedding_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
residual_dropout_rate=residual_dropout_rate,
expand_ratio=expand_ratio
)
args.pretrained_model.load_state_dict(torch.load(model_path))
args.embed = embed_dim
args.model = MiniGPT_Probe2(args, surfpos_vocab)
print(args.model)
for param in args.model.token_embedding.parameters():
param.requires_grad = False
for param in args.model.decoder1.parameters():
param.requires_grad = False
for param in args.model.decoder2.parameters():
param.requires_grad = False
for param in args.model.MH_attention3.parameters():
param.requires_grad = False
args.model.to(args.device)
print(args.model)
# logging
args.modelname = working_path + 'NLP/Probing/pos_tagging/results/'+args.mname+'/'+str(len(trndata))+'_instances/'
try:
os.makedirs(args.modelname)
print("Directory " , args.modelname , " Created ")
except FileExistsError:
print("Directory " , args.modelname , " already exists")
args.save_path = args.modelname + str(args.epochs)+'epochs.pt'
args.log_path = args.modelname + str(args.epochs)+'epochs.log'
args.fig_path = args.modelname + str(args.epochs)+'epochs.png'
args.logger = Logger(args.log_path)
with open(args.modelname+'/surf_vocab.json', 'w') as f:
f.write(json.dumps(surf_vocab.word2id))
with open(args.modelname+'/surfpos_vocab.json', 'w') as f:
f.write(json.dumps(surfpos_vocab.word2id))
args.logger.write('\nnumber of params: %d \n' % count_parameters(args.model))
args.logger.write(args)
args.logger.write('\n')
# plotting
args.fig, args.axs = plt.subplots(2, sharex=True)
args.plt_style = pstyle = '-'
# run
train(batches, args)
plt.savefig(args.fig_path)
| 39.542373
| 119
| 0.66781
|
f05bb19d99e2d1c6132a3e99f6cb2134d451f693
| 6,086
|
py
|
Python
|
qa/rpc-tests/proxy_test.py
|
LordSoylent/dextro-1
|
71514bc58170e65168e72925af85c3479bec873b
|
[
"MIT"
] | 14
|
2018-04-27T06:47:08.000Z
|
2021-06-29T21:39:38.000Z
|
qa/rpc-tests/proxy_test.py
|
LordSoylent/dextro-1
|
71514bc58170e65168e72925af85c3479bec873b
|
[
"MIT"
] | 4
|
2018-05-21T13:14:59.000Z
|
2019-06-15T22:59:08.000Z
|
qa/rpc-tests/proxy_test.py
|
LordSoylent/dextro-1
|
71514bc58170e65168e72925af85c3479bec873b
|
[
"MIT"
] | 24
|
2018-04-22T04:12:40.000Z
|
2020-12-08T19:26:43.000Z
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework import BitcoinTestFramework
from util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0']
])
def node_test(self, node, proxies, auth):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing onion connection through node
node.addnode("youraddress.onion:39720", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "youraddress.onion")
assert_equal(cmd.port, 39720)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.684932
| 145
| 0.652317
|
34cc15f8bce373649ab99a47c02d283759e0918f
| 461
|
py
|
Python
|
plotly/validators/ohlc/stream/_token.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/ohlc/stream/_token.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/ohlc/stream/_token.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='token', parent_name='ohlc.stream', **kwargs
):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
no_blank=True,
role='info',
strict=True,
**kwargs
)
| 25.611111
| 70
| 0.598698
|
6f1d47cbac07337a28b9b63a7ee97db69afa22aa
| 1,355
|
py
|
Python
|
contratospr/contracts/search.py
|
jycordero/contratospr-api
|
6778b02b42305aa7ce65c956a0d89029ddd857a4
|
[
"Apache-2.0"
] | 15
|
2019-02-26T12:40:18.000Z
|
2020-01-24T00:58:00.000Z
|
contratospr/contracts/search.py
|
jycordero/contratospr-api
|
6778b02b42305aa7ce65c956a0d89029ddd857a4
|
[
"Apache-2.0"
] | 52
|
2019-02-13T03:54:34.000Z
|
2020-01-20T16:39:56.000Z
|
contratospr/contracts/search.py
|
jycordero/contratospr-api
|
6778b02b42305aa7ce65c956a0d89029ddd857a4
|
[
"Apache-2.0"
] | 6
|
2019-02-18T13:59:55.000Z
|
2019-11-30T23:36:43.000Z
|
from django.contrib.postgres.fields import JSONField
from django.contrib.postgres.search import SearchQuery
from django.db.models.functions import Cast
from ..utils.search import SearchVector
from .models import Contract
search_vector = (
SearchVector(Cast("document__pages", JSONField()))
+ SearchVector("contractors__name")
+ SearchVector("entity__name")
+ SearchVector("number")
)
def index_contract(obj):
instance = (
Contract.objects.select_related("document", "entity")
.prefetch_related("contractors")
.annotate(search=search_vector)
.filter(pk=obj.pk)
)[:1]
contract = instance[0]
contract.search_vector = contract.search
return contract.save(update_fields=["search_vector"])
def search_contracts(query, service_id, service_group_id):
filter_kwargs = {}
if query:
filter_kwargs["search_vector"] = SearchQuery(query)
if service_id:
filter_kwargs["service_id"] = service_id
if service_group_id:
filter_kwargs["service__group_id"] = service_group_id
if not filter_kwargs:
return []
return (
Contract.objects.select_related("document", "entity", "service")
.prefetch_related("contractors")
.defer("document__pages")
.filter(**filter_kwargs)
.order_by("-date_of_grant")
)
| 26.568627
| 72
| 0.690037
|
fc27781cf09b811e761f8faae0664c359a7b6a96
| 92,725
|
py
|
Python
|
ghostwriter/reporting/views.py
|
unashamedgeek/Ghostwriter
|
a1d221d60526d16d91864e00b2dd8bcce9f326e2
|
[
"BSD-3-Clause"
] | null | null | null |
ghostwriter/reporting/views.py
|
unashamedgeek/Ghostwriter
|
a1d221d60526d16d91864e00b2dd8bcce9f326e2
|
[
"BSD-3-Clause"
] | null | null | null |
ghostwriter/reporting/views.py
|
unashamedgeek/Ghostwriter
|
a1d221d60526d16d91864e00b2dd8bcce9f326e2
|
[
"BSD-3-Clause"
] | null | null | null |
"""This contains all of the views used by the Reporting application."""
# Standard Libraries
import io
import json
import logging
import logging.config
import os
import zipfile
from asgiref.sync import async_to_sync
from datetime import datetime
from socket import gaierror
# Django Imports
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import (
LoginRequiredMixin,
PermissionRequiredMixin,
UserPassesTestMixin,
)
from django.core.files import File
from django.core.files.base import ContentFile
from django.db.models import Q
from django.http import (
FileResponse,
Http404,
HttpResponse,
HttpResponseRedirect,
JsonResponse,
)
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.urls import reverse, reverse_lazy
from django.views import generic
from django.views.generic.detail import DetailView, SingleObjectMixin
from django.views.generic.edit import CreateView, DeleteView, UpdateView, View
# 3rd Party Libraries
from channels.layers import get_channel_layer
from docx.image.exceptions import UnrecognizedImageError
from docx.opc.exceptions import PackageNotFoundError as DocxPackageNotFoundError
from pptx.exc import PackageNotFoundError as PptxPackageNotFoundError
from xlsxwriter.workbook import Workbook
# Ghostwriter Libraries
from ghostwriter.commandcenter.models import ReportConfiguration
from ghostwriter.modules import reportwriter
from ghostwriter.modules.exceptions import MissingTemplate
from ghostwriter.rolodex.models import Project, ProjectAssignment
from .filters import ArchiveFilter, FindingFilter, ReportFilter
from .forms import (
EvidenceForm,
FindingForm,
FindingNoteForm,
LocalFindingNoteForm,
ReportFindingLinkUpdateForm,
ReportForm,
ReportTemplateForm,
SelectReportTemplateForm,
)
from .models import (
Archive,
Evidence,
Finding,
FindingNote,
FindingType,
LocalFindingNote,
Report,
ReportFindingLink,
ReportTemplate,
Severity,
)
from .resources import FindingResource
channel_layer = get_channel_layer()
User = get_user_model()
# Using __name__ resolves to ghostwriter.reporting.views
logger = logging.getLogger(__name__)
def get_position(report_pk, severity):
findings = ReportFindingLink.objects.filter(
Q(report__pk=report_pk) & Q(severity=severity)
).order_by("-position")
if findings:
# Set new position to be one above the last/largest position
last_position = findings[0].position
return last_position + 1
return 1
##################
# AJAX Functions #
##################
@login_required
def ajax_update_report_findings(request):
"""
Update the ``position`` and ``severity`` fields of all :model:`reporting.ReportFindingLink`
attached to an individual :model:`reporting.Report`.
"""
data = {"result": "error"}
if request.method == "POST" and request.is_ajax():
pos = request.POST.get("positions")
report_id = request.POST.get("report")
severity_class = request.POST.get("severity").replace("_severity", "")
order = json.loads(pos)
logger.info(
"Received AJAX POST to update report %s's %s severity group findings in this order: %s",
report_id,
severity_class,
", ".join(order),
)
try:
severity = Severity.objects.get(severity__iexact=severity_class)
except Severity.DoesNotExist:
severity = None
if severity:
counter = 1
for finding_id in order:
if "placeholder" not in finding_id:
finding_instance = ReportFindingLink.objects.get(id=finding_id)
if finding_instance:
finding_instance.severity = severity
finding_instance.position = counter
finding_instance.save()
counter += 1
else:
logger.error(
"Received a finding ID, %s, that did not match an existing finding",
finding_id,
)
else:
data = {"result": "specified severity, {}, is invalid".format(severity_class)}
# If all went well, return success
data = {"result": "success"}
else:
data = {"result": "error"}
return JsonResponse(data)
class UpdateTemplateLintResults(LoginRequiredMixin, SingleObjectMixin, View):
"""
Return an updated version of the template following a request to update linter results
for an individual :model:`reporting.ReportTemplate`.
**Template**
:template:`snippets/template_lint_results.html`
"""
model = ReportTemplate
def get(self, *args, **kwargs):
self.object = self.get_object()
html = render_to_string(
"snippets/template_lint_results.html",
{"reporttemplate": self.object},
)
return HttpResponse(html)
class AssignFinding(LoginRequiredMixin, SingleObjectMixin, View):
"""
Copy an individual :model:`reporting.Finding` to create a new
:model:`reporting.ReportFindingLink` connected to the user's active
:model:`reporting.Report`.
"""
model = Finding
def post(self, *args, **kwargs):
self.object = self.get_object()
# The user must have the ``active_report`` session variable
active_report = self.request.session.get("active_report", None)
if active_report:
try:
report = Report.objects.get(pk=active_report["id"])
except Exception:
message = (
"Please select a report to edit before trying to assign a finding"
)
data = {"result": "error", "message": message}
return JsonResponse(data)
# Clone the selected object to make a new :model:`reporting.ReportFindingLink`
report_link = ReportFindingLink(
title=self.object.title,
description=self.object.description,
impact=self.object.impact,
mitigation=self.object.mitigation,
replication_steps=self.object.replication_steps,
host_detection_techniques=self.object.host_detection_techniques,
network_detection_techniques=self.object.network_detection_techniques,
references=self.object.references,
severity=self.object.severity,
finding_type=self.object.finding_type,
finding_guidance=self.object.finding_guidance,
report=report,
assigned_to=self.request.user,
position=get_position(report.id, self.object.severity),
)
report_link.save()
message = "{} successfully added to your active report".format(self.object)
data = {"result": "success", "message": message}
logger.info(
"Copied %s %s to %s %s (%s %s) by request of %s",
self.object.__class__.__name__,
self.object.id,
report.__class__.__name__,
report.id,
report_link.__class__.__name__,
report_link.id,
self.request.user,
)
else:
message = "Please select a report to edit before trying to assign a finding"
data = {"result": "error", "message": message}
return JsonResponse(data)
class LocalFindingNoteDelete(LoginRequiredMixin, SingleObjectMixin, UserPassesTestMixin, View):
"""
Delete an individual :model:`reporting.LocalFindingNote`.
"""
model = LocalFindingNote
def test_func(self):
self.object = self.get_object()
return self.object.operator.id == self.request.user.id
def handle_no_permission(self):
messages.error(self.request, "You do not have permission to access that")
return redirect("home:dashboard")
def post(self, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
data = {"result": "success", "message": "Note successfully deleted!"}
logger.info(
"Deleted %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
return JsonResponse(data)
class FindingNoteDelete(LoginRequiredMixin, SingleObjectMixin, UserPassesTestMixin, View):
"""
Delete an individual :model:`reporting.FindingNote`.
"""
model = FindingNote
def test_func(self):
self.object = self.get_object()
return self.object.operator.id == self.request.user.id
def handle_no_permission(self):
messages.error(self.request, "You do not have permission to access that")
return redirect("home:dashboard")
def post(self, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
data = {"result": "success", "message": "Note successfully deleted!"}
logger.info(
"Deleted %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
return JsonResponse(data)
class ReportFindingLinkDelete(LoginRequiredMixin, SingleObjectMixin, View):
"""
Delete an individual :model:`reporting.ReportFindingLink`.
"""
model = ReportFindingLink
def post(self, *args, **kwargs):
self.object = self.get_object()
self.report_pk = self.get_object().report.pk
self.object.delete()
data = {
"result": "success",
"message": "Successfully deleted {finding} and cleaned up evidence".format(
finding=self.object
),
}
logger.info(
"Deleted %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
return JsonResponse(data)
class ReportActivate(LoginRequiredMixin, SingleObjectMixin, View):
"""
Set an individual :model:`reporting.Report` as active for the current user session.
"""
model = Report
# Set the user's session variable
def post(self, *args, **kwargs):
self.object = self.get_object()
try:
self.request.session["active_report"] = {}
self.request.session["active_report"]["id"] = self.object.id
self.request.session["active_report"]["title"] = self.object.title
message = "{report} is now your active report and you will be redirected there in 5 seconds".format(
report=self.object.title
)
data = {
"result": "success",
"report": self.object.title,
"report_url": self.object.get_absolute_url(),
"message": message,
}
except Exception as exception: # pragma: no cover
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
log_message = template.format(type(exception).__name__, exception.args)
logger.error(log_message)
data = {
"result": "error",
"message": "Could not set the selected report as your active report",
}
return JsonResponse(data)
class ReportStatusToggle(LoginRequiredMixin, SingleObjectMixin, View):
"""
Toggle the ``complete`` field of an individual :model:`rolodex.Report`.
"""
model = Report
def post(self, *args, **kwargs):
self.object = self.get_object()
try:
if self.object.complete:
self.object.complete = False
data = {
"result": "success",
"message": "Report successfully marked as incomplete",
"status": "Draft",
"toggle": 0,
}
else:
self.object.complete = True
data = {
"result": "success",
"message": "Report successfully marked as complete",
"status": "Complete",
"toggle": 1,
}
self.object.save()
logger.info(
"Toggled status of %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
except Exception as exception: # pragma: no cover
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
log_message = template.format(type(exception).__name__, exception.args)
logger.error(log_message)
data = {"result": "error", "message": "Could not update report's status"}
return JsonResponse(data)
class ReportDeliveryToggle(LoginRequiredMixin, SingleObjectMixin, View):
"""
Toggle the ``delivered`` field of an individual :model:`rolodex.Report`.
"""
model = Report
def post(self, *args, **kwargs):
self.object = self.get_object()
try:
if self.object.delivered:
self.object.delivered = False
data = {
"result": "success",
"message": "Report successfully marked as not delivered",
"status": "Not Delivered",
"toggle": 0,
}
else:
self.object.delivered = True
data = {
"result": "success",
"message": "Report successfully marked as delivered",
"status": "Delivered",
"toggle": 1,
}
self.object.save()
logger.info(
"Toggled delivery status of %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
except Exception as exception: # pragma: no cover
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
log_message = template.format(type(exception).__name__, exception.args)
logger.error(log_message)
data = {
"result": "error",
"message": "Could not update report's deliveery status",
}
return JsonResponse(data)
class ReportFindingStatusUpdate(LoginRequiredMixin, SingleObjectMixin, View):
"""
Update the ``complete`` field of an individual :model:`reporting.ReportFindingLink`.
"""
model = ReportFindingLink
def post(self, *args, **kwargs):
data = {}
# Get ``status`` kwargs from the URL
status = self.kwargs["status"]
self.object = self.get_object()
try:
result = "success"
if status.lower() == "edit":
self.object.complete = False
message = "Successfully flagged finding for editing"
display_status = "Needs Editing"
classes = "burned"
elif status.lower() == "complete":
self.object.complete = True
message = "Successfully marking finding as complete"
display_status = "Ready"
classes = "healthy"
else:
result = "error"
message = "Could not update the finding's status to: {}".format(status)
display_status = "Error"
classes = "burned"
self.object.save()
# Prepare the JSON response data
data = {
"result": result,
"status": display_status,
"classes": classes,
"message": message,
}
logger.info(
"Set status of %s %s to %s by request of %s",
self.object.__class__.__name__,
self.object.id,
status,
self.request.user,
)
# Return an error message if the query for the requested status returned DoesNotExist
except Exception as exception: # pragma: no cover
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
log_message = template.format(type(exception).__name__, exception.args)
logger.error(log_message)
data = {"result": "error", "message": "Could not update finding's status"}
return JsonResponse(data)
class ReportTemplateSwap(LoginRequiredMixin, SingleObjectMixin, View):
"""
Update the ``template`` value for an individual :model:`reporting.Report`.
"""
model = Report
def post(self, *args, **kwargs):
self.object = self.get_object()
docx_template_id = self.request.POST.get("docx_template", None)
pptx_template_id = self.request.POST.get("pptx_template", None)
if docx_template_id and pptx_template_id:
docx_template_query = None
pptx_template_query = None
try:
docx_template_id = int(docx_template_id)
pptx_template_id = int(pptx_template_id)
if docx_template_id < 0 or pptx_template_id < 0:
data = {
"result": "warning",
"message": "Select both templates before your settings can be saved",
}
else:
if docx_template_id >= 0:
docx_template_query = ReportTemplate.objects.get(
pk=docx_template_id
)
self.object.docx_template = docx_template_query
if pptx_template_id >= 0:
pptx_template_query = ReportTemplate.objects.get(
pk=pptx_template_id
)
self.object.pptx_template = pptx_template_query
data = {
"result": "success",
"message": "Template successfully swapped",
}
self.object.save()
# Check template for linting issues
try:
if docx_template_query:
template_status = docx_template_query.get_status()
data["docx_lint_result"] = template_status
if template_status != "success":
if template_status == "warning":
data[
"docx_lint_message"
] = "Selected Word template has warnings from linter. Check the template before generating a report."
elif template_status == "error":
data[
"docx_lint_message"
] = "Selected Word template has linting errors and cannot be used to generate a report."
elif template_status == "failed":
data[
"docx_lint_message"
] = "Selected Word template failed basic linter checks and can't be used to generate a report."
else:
data[
"docx_lint_message"
] = "Selected Word template has an unknown linter status. Check and lint the template before generating a report."
data["docx_url"] = docx_template_query.get_absolute_url()
except Exception: # pragma: no cover
logger.exception("Failed to get the template status")
data["docx_lint_result"] = "failed"
data[
"docx_lint_message"
] = "Could not retrieve the Word template's linter status. Check and lint the template before generating a report."
try:
if pptx_template_query:
template_status = pptx_template_query.get_status()
data["pptx_lint_result"] = template_status
if template_status != "success":
if template_status == "warning":
data[
"pptx_lint_message"
] = "Selected PowerPoint template has warnings from linter. Check the template before generating a report."
elif template_status == "error":
data[
"pptx_lint_message"
] = "Selected PowerPoint template has linting errors and cannot be used to generate a report."
elif template_status == "failed":
data[
"pptx_lint_message"
] = "Selected PowerPoint template failed basic linter checks and can't be used to generate a report."
else:
data[
"pptx_lint_message"
] = "Selected PowerPoint template has an unknown linter status. Check and lint the template before generating a report."
data["pptx_url"] = pptx_template_query.get_absolute_url()
except Exception: # pragma: no cover
logger.exception("Failed to get the template status")
data["pptx_lint_result"] = "failed"
data[
"pptx_lint_message"
] = "Could not retrieve the PowerPoint template's linter status. Check and lint the template before generating a report."
logger.info(
"Swapped template for %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
except ValueError:
data = {
"result": "error",
"message": "Submitted template ID was not an integer",
}
logger.error(
"Received one or two invalid (non-integer) template IDs (%s & %s) from a request submitted by %s",
docx_template_id,
pptx_template_id,
self.request.user,
)
except ReportTemplate.DoesNotExist:
data = {
"result": "error",
"message": "Submitted template ID does not exist",
}
logger.error(
"Received one or two invalid (non-existent) template IDs (%s & %s) from a request submitted by %s",
docx_template_id,
pptx_template_id,
self.request.user,
)
except Exception: # pragma: no cover
data = {
"result": "error",
"message": "An exception prevented the template change",
}
logger.exception(
"Encountered an error trying to update %s %s with template IDs %s & %s from a request submitted by %s",
self.object.__class__.__name__,
self.object.id,
docx_template_id,
pptx_template_id,
self.request.user,
)
else:
data = {"result": "error", "message": "Submitted request was incomplete"}
logger.warning(
"Received bad template IDs (%s & %s) from a request submitted by %s",
docx_template_id,
pptx_template_id,
self.request.user,
)
return JsonResponse(data)
class ReportTemplateLint(LoginRequiredMixin, SingleObjectMixin, View):
"""
Check an individual :model:`reporting.ReportTemplate` for Jinja2 syntax errors
and undefined variables.
"""
model = ReportTemplate
def post(self, *args, **kwargs):
self.object = self.get_object()
template_loc = self.object.document.path
linter = reportwriter.TemplateLinter(template_loc=template_loc)
if self.object.doc_type.doc_type == "docx":
results = linter.lint_docx()
elif self.object.doc_type.doc_type == "pptx":
results = linter.lint_pptx()
else:
logger.warning(
"Template had an unknown filetype not supported by the linter: %s",
self.object.doc_type,
)
results = {}
self.object.lint_result = results
self.object.save()
data = results
if data["result"] == "success":
data[
"message"
] = "Template linter returned results with no errors or warnings"
elif not data["result"]:
data[
"message"
] = f"Template had an unknown filetype not supported by the linter: {self.object.doc_type}"
else:
data[
"message"
] = "Template linter returned results with issues that require attention"
return JsonResponse(data)
class ReportClone(LoginRequiredMixin, SingleObjectMixin, View):
"""
Create an identical copy of an individual :model:`reporting.Report`.
"""
model = Report
def get(self, *args, **kwargs):
self.object = self.get_object()
try:
findings = ReportFindingLink.objects.select_related("report").filter(
report=self.object.pk
)
report_to_clone = self.object
report_to_clone.title = report_to_clone.title + " Copy"
report_to_clone.complete = False
report_to_clone.pk = None
report_to_clone.save()
new_report_pk = report_to_clone.pk
for finding in findings:
finding.report = report_to_clone
finding.pk = None
finding.save()
logger.info(
"Cloned %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.success(
self.request,
"Successfully cloned your report: {}".format(self.object.title),
extra_tags="alert-error",
)
except Exception as exception: # pragma: no cover
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
log_message = template.format(type(exception).__name__, exception.args)
logger.error(log_message)
messages.error(
self.request,
"Encountered an error while trying to clone your report: {}".format(
exception.args
),
extra_tags="alert-error",
)
return HttpResponseRedirect(
reverse("reporting:report_detail", kwargs={"pk": new_report_pk})
)
class AssignBlankFinding(LoginRequiredMixin, SingleObjectMixin, View):
"""
Create a blank :model:`reporting.ReportFindingLink` entry linked to an individual
:model:`reporting.Report`.
"""
model = Report
def __init__(self):
self.severity = Severity.objects.order_by("weight").last()
self.finding_type = FindingType.objects.all().first()
super().__init__()
def get(self, *args, **kwargs):
self.object = self.get_object()
try:
report_link = ReportFindingLink(
title="Blank Template",
severity=self.severity,
finding_type=self.finding_type,
report=self.object,
assigned_to=self.request.user,
position=get_position(self.object.id, self.severity),
)
report_link.save()
logger.info(
"Added a blank finding to %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.success(
self.request,
"Successfully added a blank finding to the report",
extra_tags="alert-success",
)
except Exception as exception: # pragma: no cover
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
log_message = template.format(type(exception).__name__, exception.args)
logger.error(log_message)
messages.error(
self.request,
"Encountered an error while trying to add a blank finding to your report: {}".format(
exception.args
),
extra_tags="alert-error",
)
return HttpResponseRedirect(
reverse("reporting:report_detail", args=(self.object.id,))
)
class ConvertFinding(LoginRequiredMixin, SingleObjectMixin, View):
"""
Create a copy of an individual :model:`reporting.ReportFindingLink` and prepare
it to be saved as a new :model:`reporting.Finding`.
**Template**
:template:`reporting/finding_form.html`
"""
model = ReportFindingLink
def get(self, *args, **kwargs):
self.object = self.get_object()
try:
finding_instance = self.object
form = FindingForm(
initial={
"title": finding_instance.title,
"description": finding_instance.description,
"impact": finding_instance.impact,
"mitigation": finding_instance.mitigation,
"replication_steps": finding_instance.replication_steps,
"host_detection_techniques": finding_instance.host_detection_techniques,
"network_detection_techniques": finding_instance.network_detection_techniques,
"references": finding_instance.references,
"severity": finding_instance.severity,
"finding_type": finding_instance.finding_type,
}
)
except Exception as exception: # pragma: no cover
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
log_message = template.format(type(exception).__name__, exception.args)
logger.error(log_message)
messages.error(
self.request,
"Encountered an error while trying to convert your finding: {}".format(
exception.args
),
extra_tags="alert-error",
)
return render(self.request, "reporting/finding_form.html", {"form": form})
def post(self, *args, **kwargs):
form = FindingForm(self.request.POST)
if form.is_valid():
new_finding = form.save()
new_finding_pk = new_finding.pk
return HttpResponseRedirect(
reverse("reporting:finding_detail", kwargs={"pk": new_finding_pk})
)
logger.warning(form.errors.as_data())
return render(self.request, "reporting/finding_form.html", {"form": form})
##################
# View Functions #
##################
@login_required
def index(request):
"""
Display the main homepage.
"""
return HttpResponseRedirect(reverse("home:dashboard"))
@login_required
def findings_list(request):
"""
Display a list of all :model:`reporting.Finding`.
**Context**
``filter``
Instance of :filter:`reporting.FindingFilter`
**Template**
:template:`reporting/finding_list.html`
"""
# Check if a search parameter is in the request
try:
search_term = request.GET.get("finding_search")
except Exception:
search_term = ""
if search_term:
messages.success(
request,
"Displaying search results for: {}".format(search_term),
extra_tags="alert-success",
)
findings = (
Finding.objects.select_related("severity", "finding_type")
.filter(
Q(title__icontains=search_term) | Q(description__icontains=search_term)
)
.order_by("severity__weight", "finding_type", "title")
)
else:
findings = (
Finding.objects.select_related("severity", "finding_type")
.all()
.order_by("severity__weight", "finding_type", "title")
)
findings_filter = FindingFilter(request.GET, queryset=findings)
return render(request, "reporting/finding_list.html", {"filter": findings_filter})
@login_required
def reports_list(request):
"""
Display a list of all :model:`reporting.Report`.
**Template**
:template:`reporting/report_list.html`
"""
reports = (
Report.objects.select_related("created_by").all().order_by("complete", "title")
)
reports_filter = ReportFilter(request.GET, queryset=reports)
return render(request, "reporting/report_list.html", {"filter": reports_filter})
@login_required
def archive_list(request):
"""
Display a list of all :model:`reporting.Report` marked as archived.
**Context**
``filter``
Instance of :filter:`reporting.ArchiveFilter`
**Template**
:template:`reporting/archives.html`
"""
archives = (
Archive.objects.select_related("project__client")
.all()
.order_by("project__client")
)
archive_filter = ArchiveFilter(request.GET, queryset=archives)
return render(request, "reporting/archives.html", {"filter": archive_filter})
@login_required
def upload_evidence_modal_success(request):
"""
Display message following the successful creation of an individual
:model:`reporting.Evidence` using a TinyMCE URLDialog.
**Template**
:template:`reporting/evidence_modal_success.html`
"""
return render(request, "reporting/evidence_modal_success.html")
def generate_report_name(report_instance):
"""
Generate a filename for a report based on the current time and attributes of an
individual :model:`reporting.Report`. All periods and commas are removed to keep
the filename browser-friendly.
"""
def replace_chars(report_name):
return report_name.replace(".", "").replace(",", "")
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
client_name = report_instance.project.client
assessment_type = report_instance.project.project_type
report_name = replace_chars(f"{timestamp}_{client_name}_{assessment_type}")
return report_name
def zip_directory(path, zip_handler):
"""
Compress the target directory as a Zip file for archiving.
"""
# Walk the target directory
abs_src = os.path.abspath(path)
for root, dirs, files in os.walk(path):
# Add each file to the zip file handler
for file in files:
absname = os.path.abspath(os.path.join(root, file))
arcname = absname[len(abs_src) + 1 :]
zip_handler.write(os.path.join(root, file), "evidence/" + arcname)
@login_required
def archive(request, pk):
"""
Generate all report types for an individual :model:`reporting.Report`, collect all
related :model:`reporting.Evidence` and related files, and compress the files into a
single Zip file for archiving.
"""
try:
report_instance = Report.objects.select_related("project", "project__client").get(
pk=pk
)
# output_path = os.path.join(settings.MEDIA_ROOT, report_instance.title)
# evidence_path = os.path.join(settings.MEDIA_ROOT)
archive_loc = os.path.join(settings.MEDIA_ROOT, "archives/")
evidence_loc = os.path.join(settings.MEDIA_ROOT, "evidence", str(pk))
report_name = generate_report_name(report_instance)
# Get the templates for Word and PowerPoint
if report_instance.docx_template:
docx_template = report_instance.docx_template.document.path
else:
docx_template = ReportTemplate.objects.get(
default=True, doc_type__doc_type="docx"
).document.path
if report_instance.pptx_template:
pptx_template = report_instance.pptx_template.document.path
else:
pptx_template = ReportTemplate.objects.get(
default=True, doc_type__doc_type="pptx"
).document.path
engine = reportwriter.Reportwriter(report_instance, template_loc=None)
json_doc, word_doc, excel_doc, ppt_doc = engine.generate_all_reports(
docx_template, pptx_template
)
# Convert the dict to pretty JSON output for the file
pretty_json = json.dumps(json_doc, indent=4)
# Create a zip file in memory and add the reports to it
zip_buffer = io.BytesIO()
with zipfile.ZipFile(zip_buffer, "a") as zf:
zf.writestr("report.json", pretty_json)
zf.writestr("report.docx", word_doc.getvalue())
zf.writestr("report.xlsx", excel_doc.getvalue())
zf.writestr("report.pptx", ppt_doc.getvalue())
zip_directory(evidence_loc, zf)
zip_buffer.seek(0)
with open(os.path.join(archive_loc, report_name + ".zip"), "wb+") as archive_file:
archive_file = ContentFile(zip_buffer.read(), name=report_name + ".zip")
new_archive = Archive(
project=report_instance.project,
report_archive=File(archive_file),
)
new_archive.save()
messages.success(
request,
"Successfully archived {}".format(report_instance.title),
extra_tags="alert-success",
)
return HttpResponseRedirect(reverse("reporting:archived_reports"))
except Report.DoesNotExist:
messages.error(
request,
"The target report does not exist",
extra_tags="alert-danger",
)
except ReportTemplate.DoesNotExist:
messages.error(
request,
"You do not have templates selected for Word and PowerPoint and have not selected default templates",
extra_tags="alert-danger",
)
except Exception:
logger.exception("Error archiving report")
messages.error(
request,
"Failed to generate one or more documents for the archive",
extra_tags="alert-danger",
)
return HttpResponseRedirect(reverse("reporting:report_detail", kwargs={"pk": pk}))
@login_required
def download_archive(request, pk):
"""
Return the target :model:`reporting.Report` archive file for download.
"""
archive_instance = Archive.objects.get(pk=pk)
file_path = os.path.join(settings.MEDIA_ROOT, archive_instance.report_archive.path)
if os.path.exists(file_path):
with open(file_path, "rb") as archive_file:
response = HttpResponse(
archive_file.read(), content_type="application/x-zip-compressed"
)
response["Content-Disposition"] = "attachment; filename=" + os.path.basename(
file_path
)
return response
raise Http404
@login_required
def export_findings_to_csv(request):
"""
Export all :model:`reporting.Finding` to a csv file for download.
"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
finding_resource = FindingResource()
dataset = finding_resource.export()
response = HttpResponse(dataset.csv, content_type="text/csv")
response["Content-Disposition"] = f'attachment; filename="{timestamp}_findings.csv"'
return response
################
# View Classes #
################
# CBVs related to :model:`reporting.Finding`
class FindingDetailView(LoginRequiredMixin, DetailView):
"""
Display an individual :model:`reporting.Finding`.
**Template**
:template:`reporting/finding_detail.html`
"""
model = Finding
class FindingCreate(LoginRequiredMixin, CreateView):
"""
Create an individual instance of :model:`reporting.Finding`.
**Context**
``cancel_link``
Link for the form's Cancel button to return to clients list page
**Template**
:template:`reporting/finding_form.html`
"""
model = Finding
form_class = FindingForm
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["cancel_link"] = reverse("reporting:findings")
return ctx
def get_success_url(self):
messages.success(
self.request,
"Successfully added {} to the findings library".format(self.object.title),
extra_tags="alert-success",
)
return reverse("reporting:finding_detail", kwargs={"pk": self.object.pk})
class FindingUpdate(LoginRequiredMixin, UpdateView):
"""
Update an individual instance of :model:`reporting.Finding`.
**Context**
``cancel_link``
Link for the form's Cancel button to return to clients list page
**Template**
:template:`reporting/finding_form.html`
"""
model = Finding
form_class = FindingForm
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["cancel_link"] = reverse(
"reporting:finding_detail", kwargs={"pk": self.object.pk}
)
return ctx
def get_success_url(self):
messages.success(
self.request,
"Master record for {} was successfully updated".format(
self.get_object().title
),
extra_tags="alert-success",
)
return reverse("reporting:finding_detail", kwargs={"pk": self.object.pk})
class FindingDelete(LoginRequiredMixin, DeleteView):
"""
Delete an individual instance of :model:`reporting.Finding`.
**Context**
``object_type``
String describing what is to be deleted
``object_to_be_deleted``
To-be-deleted instance of :model:`reporting.Finding`
``cancel_link``
Link for the form's Cancel button to return to finding list page
**Template**
:template:`confirm_delete.html`
"""
model = Finding
template_name = "confirm_delete.html"
def get_success_url(self):
messages.warning(
self.request,
"Master record for {} was successfully deleted".format(
self.get_object().title
),
extra_tags="alert-warning",
)
return reverse_lazy("reporting:findings")
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
queryset = kwargs["object"]
ctx["object_type"] = "finding master record"
ctx["object_to_be_deleted"] = queryset.title
ctx["cancel_link"] = reverse("reporting:findings")
return ctx
# CBVs related to :model:`reporting.Report`
class ReportDetailView(LoginRequiredMixin, DetailView):
"""
Display an individual :model:`reporting.Report`.
**Template**
:template:`reporting/report_detail.html`
"""
model = Report
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
form = SelectReportTemplateForm(instance=self.object)
form.fields["docx_template"].queryset = ReportTemplate.objects.filter(
Q(doc_type__doc_type="docx") & Q(client=self.object.project.client)
| Q(doc_type__doc_type="docx") & Q(client__isnull=True)
).select_related(
"doc_type",
"client",
)
form.fields["pptx_template"].queryset = ReportTemplate.objects.filter(
Q(doc_type__doc_type="pptx") & Q(client=self.object.project.client)
| Q(doc_type__doc_type="pptx") & Q(client__isnull=True)
).select_related(
"doc_type",
"client",
)
ctx["form"] = form
return ctx
class ReportCreate(LoginRequiredMixin, CreateView):
"""
Create an individual instance of :model:`reporting.Report`.
**Context**
``project``
Instance of :model:`rolodex.Project` associated with this report
``cancel_link``
Link for the form's Cancel button to return to report list or details page
**Template**
:template:`reporting/report_form.html`
"""
model = Report
form_class = ReportForm
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
# Check if this request is for a specific project or not
self.project = ""
# Determine if ``pk`` is in the kwargs
if "pk" in self.kwargs:
pk = self.kwargs.get("pk")
# Try to get the project from :model:`rolodex.Project`
if pk:
try:
self.project = get_object_or_404(Project, pk=self.kwargs.get("pk"))
except Project.DoesNotExist:
logger.info(
"Received report create request for Project ID %s, but that Project does not exist",
pk,
)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({"project": self.project})
return kwargs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["project"] = self.project
if self.project:
ctx["cancel_link"] = reverse(
"rolodex:project_detail", kwargs={"pk": self.project.pk}
)
else:
ctx["cancel_link"] = reverse("reporting:reports")
return ctx
def get_form(self, form_class=None):
form = super().get_form(form_class)
if not form.fields["project"].queryset:
messages.error(
self.request,
"There are no active projects for a new report",
extra_tags="alert-error",
)
return form
def form_valid(self, form):
form.instance.created_by = self.request.user
self.request.session["active_report"] = {}
self.request.session["active_report"]["title"] = form.instance.title
return super().form_valid(form)
def get_initial(self):
if self.project:
title = "{} {} ({}) Report".format(
self.project.client, self.project.project_type, self.project.start_date
)
return {"title": title, "project": self.project.id}
return super().get_initial()
def get_success_url(self):
self.request.session["active_report"]["id"] = self.object.pk
self.request.session.modified = True
messages.success(
self.request,
"Successfully created new report and set it as your active report",
extra_tags="alert-success",
)
return reverse("reporting:report_detail", kwargs={"pk": self.object.pk})
class ReportUpdate(LoginRequiredMixin, UpdateView):
"""
Update an individual instance of :model:`reporting.Report`.
**Context**
``cancel_link``
Link for the form's Cancel button to return to report's detail page
**Template**
:template:`reporting/report_form.html`
"""
model = Report
form_class = ReportForm
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
# Check if this request is for a specific project or not
self.project = "update"
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({"project": self.project})
return kwargs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["project"] = self.object.project
ctx["cancel_link"] = reverse(
"reporting:report_detail", kwargs={"pk": self.object.pk}
)
return ctx
def form_valid(self, form):
self.request.session["active_report"] = {}
self.request.session["active_report"]["id"] = form.instance.id
self.request.session["active_report"]["title"] = form.instance.title
self.request.session.modified = True
return super().form_valid(form)
def get_success_url(self):
messages.success(
self.request, "Successfully updated the report", extra_tags="alert-success"
)
return reverse("reporting:report_detail", kwargs={"pk": self.object.pk})
class ReportDelete(LoginRequiredMixin, DeleteView):
"""
Delete an individual instance of :model:`reporting.Report`.
**Context**
``object_type``
String describing what is to be deleted
``object_to_be_deleted``
To-be-deleted instance of :model:`reporting.Report`
``cancel_link``
Link for the form's Cancel button to return to report's detail page
**Template**
:template:`confirm_delete.html`
"""
model = Report
template_name = "confirm_delete.html"
def get_success_url(self):
# Clear user's session if deleted report is their active report
if self.object.pk == self.request.session["active_report"]["id"]:
self.request.session["active_report"] = {}
self.request.session["active_report"]["id"] = ""
self.request.session["active_report"]["title"] = ""
self.request.session.modified = True
messages.warning(
self.request,
"Successfully deleted the report and associated evidence files",
extra_tags="alert-warning",
)
return "{}#reports".format(
reverse("rolodex:project_detail", kwargs={"pk": self.object.project.id})
)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
queryset = kwargs["object"]
ctx["cancel_link"] = reverse(
"rolodex:project_detail", kwargs={"pk": self.object.project.pk}
)
ctx["object_type"] = "entire report, evidence and all"
ctx["object_to_be_deleted"] = queryset.title
return ctx
class ReportTemplateListView(LoginRequiredMixin, generic.ListView):
"""
Display a list of all :model:`reporting.ReportTemplate`.
**Template**
:template:`reporting/report_template_list.html`
"""
model = ReportTemplate
template_name = "reporting/report_templates_list.html"
class ReportTemplateDetailView(LoginRequiredMixin, DetailView):
"""
Display an individual :model:`reporting.ReportTemplate`.
**Template**
:template:`reporting/report_template_list.html`
"""
model = ReportTemplate
template_name = "reporting/report_template_detail.html"
class ReportTemplateCreate(LoginRequiredMixin, CreateView):
"""
Create an individual instance of :model:`reporting.ReportTemplate`.
**Context**
``cancel_link``
Link for the form's Cancel button to return to template list page
**Template**
:template:`report_template_form.html`
"""
model = ReportTemplate
form_class = ReportTemplateForm
template_name = "reporting/report_template_form.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["cancel_link"] = reverse("reporting:templates")
return ctx
def get_initial(self):
date = datetime.now().strftime("%d %B %Y")
initial_upload = f'<p><span class="bold">{date}</span></p><p>Initial upload</p>'
return {"changelog": initial_upload}
def get_success_url(self):
messages.success(
self.request,
"Template successfully uploaded",
extra_tags="alert-success",
)
return reverse("reporting:template_detail", kwargs={"pk": self.object.pk})
def form_valid(self, form, **kwargs):
self.object = form.save(commit=False)
self.object.uploaded_by = self.request.user
self.object.save()
return HttpResponseRedirect(self.get_success_url())
class ReportTemplateUpdate(LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
"""
Save an individual instance of :model:`reporting.ReportTemplate`.
**Context**
``cancel_link``
Link for the form's Cancel button to return to template list page
**Template**
:template:`report_template_form.html`
"""
model = ReportTemplate
form_class = ReportTemplateForm
template_name = "reporting/report_template_form.html"
permission_denied_message = "Only an admin can edit this template"
def has_permission(self):
self.object = self.get_object()
if self.object.protected:
return self.request.user.is_staff
return self.request.user.is_active
def handle_no_permission(self):
self.object = self.get_object()
messages.error(
self.request, "That template is protected – only an admin can edit it"
)
return HttpResponseRedirect(
reverse(
"reporting:template_detail",
args=(self.object.pk,),
)
)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["cancel_link"] = reverse("reporting:templates")
return ctx
def get_success_url(self):
messages.success(
self.request,
"Template successfully updated",
extra_tags="alert-success",
)
return reverse("reporting:template_detail", kwargs={"pk": self.object.pk})
def form_valid(self, form, **kwargs):
self.object = form.save(commit=False)
self.object.uploaded_by = self.request.user
self.object.save()
return HttpResponseRedirect(self.get_success_url())
class ReportTemplateDelete(LoginRequiredMixin, PermissionRequiredMixin, DeleteView):
"""
Delete an individual instance of :model:`reporting.ReportTemplate`.
**Context**
``object_type``
String describing what is to be deleted
``object_to_be_deleted``
To-be-deleted instance of :model:`reporting.ReportTemplate`
``cancel_link``
Link for the form's Cancel button to return to template's detail page
**Template**
:template:`confirm_delete.html`
"""
model = ReportTemplate
template_name = "confirm_delete.html"
permission_denied_message = "Only an admin can delete this template"
def has_permission(self):
self.object = self.get_object()
if self.object.protected:
return self.request.user.is_staff
return self.request.user.is_active
def handle_no_permission(self):
self.object = self.get_object()
messages.error(
self.request, "That template is protected – only an admin can edit it"
)
return HttpResponseRedirect(
reverse(
"reporting:template_detail",
args=(self.object.pk,),
)
)
def get_success_url(self):
message = "Successfully deleted the template and associated file"
if os.path.isfile(self.object.document.path):
message = "Successfully deleted the template, but could not delete the associated file"
messages.success(
self.request,
message,
extra_tags="alert-success",
)
return reverse("reporting:templates")
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
queryset = kwargs["object"]
ctx["cancel_link"] = reverse(
"reporting:template_detail", kwargs={"pk": queryset.pk}
)
ctx["object_type"] = "report template file (and associated file on disk)"
ctx["object_to_be_deleted"] = queryset.filename
return ctx
class ReportTemplateDownload(LoginRequiredMixin, SingleObjectMixin, View):
"""
Return the target :model:`reporting.ReportTemplate` template file for download.
"""
model = ReportTemplate
def get(self, *args, **kwargs):
self.object = self.get_object()
file_path = os.path.join(settings.MEDIA_ROOT, self.object.document.path)
if os.path.exists(file_path):
return FileResponse(
open(file_path, "rb"),
as_attachment=True,
filename=os.path.basename(file_path),
)
raise Http404
class GenerateReportJSON(LoginRequiredMixin, SingleObjectMixin, View):
"""
Generate a JSON report for an individual :model:`reporting.Report`.
"""
model = Report
def get(self, *args, **kwargs):
self.object = self.get_object()
logger.info(
"Generating JSON report for %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
engine = reportwriter.Reportwriter(self.object, template_loc=None)
json_report = engine.generate_json()
return HttpResponse(json_report, "application/json")
class GenerateReportDOCX(LoginRequiredMixin, SingleObjectMixin, View):
"""
Generate a DOCX report for an individual :model:`reporting.Report`.
"""
model = Report
def get(self, *args, **kwargs):
self.object = self.get_object()
logger.info(
"Generating DOCX report for %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
try:
report_name = generate_report_name(self.object)
engine = reportwriter.Reportwriter(self.object, template_loc=None)
# Get the template for this report
if self.object.docx_template:
report_template = self.object.docx_template
else:
report_config = ReportConfiguration.get_solo()
report_template = report_config.default_docx_template
if not report_template:
raise MissingTemplate
template_loc = report_template.document.path
# Check template's linting status
template_status = report_template.get_status()
if template_status in ("error", "failed"):
messages.error(
self.request,
"The selected report template has linting errors and cannot be used to render a DOCX document",
extra_tags="alert-danger",
)
return HttpResponseRedirect(
reverse("reporting:report_detail", kwargs={"pk": self.object.pk})
)
# Template available and passes linting checks, so proceed with generation
engine = reportwriter.Reportwriter(self.object, template_loc)
docx = engine.generate_word_docx()
response = HttpResponse(
content_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document"
)
response["Content-Disposition"] = f'attachment; filename="{report_name}.docx"'
docx.save(response)
# Send WebSocket message to update user's webpage
try:
async_to_sync(channel_layer.group_send)(
"report_{}".format(self.object.pk),
{
"type": "status_update",
"message": {"status": "success"},
},
)
except gaierror:
# WebSocket are unavailable (unit testing)
pass
return response
except ZeroDivisionError:
logger.error(
"DOCX generation failed for %s %s and user %s because of an attempt to divide by zero in Jinja2",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.info(
self.request,
"Tip: Before performing math, check if the number is greater than zero",
extra_tags="alert-danger",
)
messages.error(
self.request,
"Word document generation failed because the selected template has Jinja2 code that attempts to divide by zero",
extra_tags="alert-danger",
)
except MissingTemplate:
logger.error(
"DOCX generation failed for %s %s and user %s because no template was configured",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.error(
self.request,
"You do not have a Word template selected and have not configured a default template",
extra_tags="alert-danger",
)
except DocxPackageNotFoundError:
logger.exception(
"DOCX generation failed for %s %s and user %s because the template file was missing",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.error(
self.request,
"Your selected Word template could not be found on the server – try uploading it again",
extra_tags="alert-danger",
)
except FileNotFoundError as error:
logger.exception(
"DOCX generation failed for %s %s and user %s because an evidence file was missing",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.error(
self.request,
"Halted document generation because an evidence file is missing: {}".format(
error
),
extra_tags="alert-danger",
)
except UnrecognizedImageError as error:
logger.exception(
"DOCX generation failed for %s %s and user %s because of an unrecognized or corrupt image",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.error(
self.request,
"Encountered an error generating the document: {}".format(error)
.replace('"', "")
.replace("'", "`"),
extra_tags="alert-danger",
)
except Exception as error:
logger.exception(
"DOCX generation failed unexpectedly for %s %s and user %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.error(
self.request,
"Encountered an error generating the document: {}".format(error)
.replace('"', "")
.replace("'", "`"),
extra_tags="alert-danger",
)
return HttpResponseRedirect(
reverse("reporting:report_detail", kwargs={"pk": self.object.pk})
)
class GenerateReportXLSX(LoginRequiredMixin, SingleObjectMixin, View):
"""
Generate an XLSX report for an individual :model:`reporting.Report`.
"""
model = Report
def get(self, *args, **kwargs):
self.object = self.get_object()
logger.info(
"Generating XLSX report for %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
try:
report_name = generate_report_name(self.object)
engine = reportwriter.Reportwriter(self.object, template_loc=None)
output = io.BytesIO()
workbook = Workbook(output, {"in_memory": True})
engine.generate_excel_xlsx(workbook)
output.seek(0)
response = HttpResponse(
output.read(),
content_type="application/application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
)
response["Content-Disposition"] = f'attachment; filename="{report_name}.xlsx"'
output.close()
return response
except Exception as error:
logger.exception(
"XLSX generation failed unexpectedly for %s %s and user %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.error(
self.request,
"Encountered an error generating the spreadsheet: {}".format(error),
extra_tags="alert-danger",
)
return HttpResponseRedirect(
reverse("reporting:report_detail", kwargs={"pk": self.object.pk})
)
class GenerateReportPPTX(LoginRequiredMixin, SingleObjectMixin, View):
"""
Generate a PPTX report for an individual :model:`reporting.Report`.
"""
model = Report
def get(self, *args, **kwargs):
self.object = self.get_object()
logger.info(
"Generating PPTX report for %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
try:
report_name = generate_report_name(self.object)
engine = reportwriter.Reportwriter(self.object, template_loc=None)
# Get the template for this report
if self.object.pptx_template:
report_template = self.object.pptx_template
else:
report_config = ReportConfiguration.get_solo()
report_template = report_config.default_pptx_template
if not report_template:
raise MissingTemplate
template_loc = report_template.document.path
# Check template's linting status
template_status = report_template.get_status()
if template_status in ("error", "failed"):
messages.error(
self.request,
"The selected report template has linting errors and cannot be used to render a PPTX document",
extra_tags="alert-danger",
)
return HttpResponseRedirect(
reverse("reporting:report_detail", kwargs={"pk": self.object.pk})
)
# Template available and passes linting checks, so proceed with generation
engine = reportwriter.Reportwriter(self.object, template_loc)
pptx = engine.generate_powerpoint_pptx()
response = HttpResponse(
content_type="application/application/vnd.openxmlformats-officedocument.presentationml.presentation"
)
response["Content-Disposition"] = f'attachment; filename="{report_name}.pptx"'
pptx.save(response)
return response
except MissingTemplate:
logger.error(
"PPTX generation failed for %s %s and user %s because no template was configured",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.error(
self.request,
"You do not have a PowerPoint template selected and have not configured a default template",
extra_tags="alert-danger",
)
except ValueError as exception:
logger.exception(
"PPTX generation failed for %s %s and user %s because the template could not be loaded as a PPTX",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.error(
self.request,
f"Your selected template could not be loaded as a PowerPoint template: {exception}",
extra_tags="alert-danger",
)
except PptxPackageNotFoundError:
logger.exception(
"PPTX generation failed for %s %s and user %s because the template file was missing",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.error(
self.request,
"Your selected PowerPoint template could not be found on the server – try uploading it again",
extra_tags="alert-danger",
)
except FileNotFoundError as error:
logger.exception(
"PPTX generation failed for %s %s and user %s because an evidence file was missing",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.error(
self.request,
"Halted document generation because an evidence file is missing: {}".format(
error
),
extra_tags="alert-danger",
)
except UnrecognizedImageError as error:
logger.exception(
"PPTX generation failed for %s %s and user %s because of an unrecognized or corrupt image",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.error(
self.request,
"Encountered an error generating the document: {}".format(error)
.replace('"', "")
.replace("'", "`"),
extra_tags="alert-danger",
)
except Exception as error:
logger.exception(
"PPTX generation failed unexpectedly for %s %s and user %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
messages.error(
self.request,
"Encountered an error generating the document: {}".format(error)
.replace('"', "")
.replace("'", "`"),
extra_tags="alert-danger",
)
return HttpResponseRedirect(
reverse("reporting:report_detail", kwargs={"pk": self.object.pk})
)
class GenerateReportAll(LoginRequiredMixin, SingleObjectMixin, View):
"""
Generate all report types for an individual :model:`reporting.Report`.
"""
model = Report
def get(self, *args, **kwargs):
self.object = self.get_object()
logger.info(
"Generating PPTX report for %s %s by request of %s",
self.object.__class__.__name__,
self.object.id,
self.request.user,
)
try:
report_name = generate_report_name(self.object)
engine = reportwriter.Reportwriter(self.object, template_loc=None)
# Get the templates for Word and PowerPoint
if self.object.docx_template:
docx_template = self.object.docx_template
else:
report_config = ReportConfiguration.get_solo()
docx_template = report_config.default_docx_template
if not docx_template:
raise MissingTemplate
docx_template = docx_template.document.path
if self.object.pptx_template:
pptx_template = self.object.pptx_template
else:
report_config = ReportConfiguration.get_solo()
pptx_template = report_config.default_pptx_template
if not pptx_template:
raise MissingTemplate
pptx_template = pptx_template.document.path
# Generate all types of reports
json_doc, docx_doc, xlsx_doc, pptx_doc = engine.generate_all_reports(
docx_template, pptx_template
)
# Convert the dict to pretty JSON output for the file
pretty_json = json.dumps(json_doc, indent=4)
# Create a zip file in memory and add the reports to it
zip_buffer = io.BytesIO()
with zipfile.ZipFile(zip_buffer, "a") as zf:
zf.writestr(f"{report_name}.json", pretty_json)
zf.writestr(f"{report_name}.docx", docx_doc.getvalue())
zf.writestr(f"{report_name}.xlsx", xlsx_doc.getvalue())
zf.writestr(f"{report_name}.pptx", pptx_doc.getvalue())
zip_buffer.seek(0)
# Return the buffer in the HTTP response
response = HttpResponse(content_type="application/x-zip-compressed")
response["Content-Disposition"] = f'attachment; filename="{report_name}.zip"'
response.write(zip_buffer.read())
return response
except MissingTemplate:
messages.error(
self.request,
"You do not have a PowerPoint template selected and have not configured a default template",
extra_tags="alert-danger",
)
except ValueError as exception:
messages.error(
self.request,
f"Your selected template could not be loaded as a PowerPoint template: {exception}",
extra_tags="alert-danger",
)
except DocxPackageNotFoundError:
messages.error(
self.request,
"Your selected Word template could not be found on the server – try uploading it again",
extra_tags="alert-danger",
)
except PptxPackageNotFoundError:
messages.error(
self.request,
"Your selected PowerPoint template could not be found on the server – try uploading it again",
extra_tags="alert-danger",
)
except Exception as error:
messages.error(
self.request,
"Encountered an error generating the document: {}".format(error),
extra_tags="alert-danger",
)
return HttpResponseRedirect(
reverse("reporting:report_detail", kwargs={"pk": self.object.pk})
)
# CBVs related to :model:`reporting.ReportFindingLink`
class ReportFindingLinkUpdate(LoginRequiredMixin, UpdateView):
"""
Update an individual instance of :model:`reporting.ReportFindingLink`.
**Context**
``cancel_link``
Link for the form's Cancel button to return to report's detail page
**Template**
:template:`reporting/local_edit.html.html`
"""
model = ReportFindingLink
form_class = ReportFindingLinkUpdateForm
template_name = "reporting/local_edit.html"
success_url = reverse_lazy("reporting:reports")
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["cancel_link"] = reverse(
"reporting:report_detail", kwargs={"pk": self.object.report.pk}
)
return ctx
def form_valid(self, form):
# Check if severity, position, or assigned_to has changed
if "assigned_to" in form.changed_data:
# Get the entries current values (those being changed)
old_entry = ReportFindingLink.objects.get(pk=self.object.pk)
old_assignee = old_entry.assigned_to
# Notify new assignee over WebSockets
if "assigned_to" in form.changed_data:
new_users_assignments = {}
old_users_assignments = {}
# Only notify if the assignee is not the user who made the change
if self.request.user != self.object.assigned_to:
# Count the current user's total assignments
new_users_assignments = (
ReportFindingLink.objects.select_related(
"report", "report__project"
)
.filter(
Q(assigned_to=self.object.assigned_to)
& Q(report__complete=False)
& Q(complete=False)
)
.count()
+ 1
)
old_users_assignments = (
ReportFindingLink.objects.select_related(
"report", "report__project"
)
.filter(
Q(assigned_to=old_assignee)
& Q(report__complete=False)
& Q(complete=False)
)
.count()
- 1
)
try:
# Send a message to the assigned user
async_to_sync(channel_layer.group_send)(
"notify_{}".format(self.object.assigned_to),
{
"type": "task",
"message": {
"message": "You have been assigned to this finding for {}:\n{}".format(
self.object.report, self.object.title
),
"level": "info",
"title": "New Assignment",
},
"assignments": new_users_assignments,
},
)
except gaierror:
# WebSocket are unavailable (unit testing)
pass
if self.request.user != old_assignee and old_users_assignments:
try:
# Send a message to the unassigned user
async_to_sync(channel_layer.group_send)(
"notify_{}".format(old_assignee),
{
"type": "task",
"message": {
"message": "You have been unassigned from this finding for {}:\n{}".format(
self.object.report, self.object.title
),
"level": "info",
"title": "Assignment Change",
},
"assignments": old_users_assignments,
},
)
except gaierror:
# WebSocket are unavailable (unit testing)
pass
return super().form_valid(form)
def get_form(self, form_class=None):
form = super().get_form(form_class)
user_primary_keys = ProjectAssignment.objects.filter(
project=self.object.report.project
).values_list("operator", flat=True)
form.fields["assigned_to"].queryset = User.objects.filter(
id__in=user_primary_keys
)
return form
def get_success_url(self):
messages.success(
self.request,
"Successfully updated {}".format(self.get_object().title),
extra_tags="alert-success",
)
return reverse("reporting:report_detail", kwargs={"pk": self.object.report.id})
# CBVs related to :model:`reporting.Evidence`
class EvidenceDetailView(LoginRequiredMixin, DetailView):
"""
Display an individual instance of :model:`reporting.Evidence`.
**Template**
:template:`reporting/evidence_detail.html`
"""
model = Evidence
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
file_content = None
if os.path.isfile(self.object.document.path):
if (
self.object.document.name.lower().endswith(".txt")
or self.object.document.name.lower().endswith(".log")
or self.object.document.name.lower().endswith(".md")
):
filetype = "text"
file_content = []
temp = self.object.document.read().splitlines()
for line in temp:
try:
file_content.append(line.decode())
except Exception:
file_content.append(line)
elif (
self.object.document.name.lower().endswith(".jpg")
or self.object.document.name.lower().endswith(".png")
or self.object.document.name.lower().endswith(".jpeg")
):
filetype = "image"
else:
filetype = "unknown"
else:
filetype = "text"
file_content = []
file_content.append("FILE NOT FOUND")
ctx["filetype"] = filetype
ctx["evidence"] = self.object
ctx["file_content"] = file_content
return ctx
class EvidenceCreate(LoginRequiredMixin, CreateView):
"""
Create an individual :model:`reporting.Evidence` entry linked to an individual
:model:`reporting.ReportFindingLink`.
**Template**
:template:`reporting/evidence_form.html`
"""
model = Evidence
form_class = EvidenceForm
def get_template_names(self):
if "modal" in self.kwargs:
modal = self.kwargs["modal"]
if modal:
return ["reporting/evidence_form_modal.html"]
return ["reporting/evidence_form.html"]
return ["reporting/evidence_form.html"]
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
finding_pk = self.kwargs.get("pk")
self.evidence_queryset = Evidence.objects.filter(finding=finding_pk)
kwargs.update({"evidence_queryset": self.evidence_queryset})
self.finding_instance = get_object_or_404(ReportFindingLink, pk=finding_pk)
if "modal" in self.kwargs:
kwargs.update({"is_modal": True})
return kwargs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["cancel_link"] = reverse(
"reporting:report_detail", kwargs={"pk": self.finding_instance.report.pk}
)
if "modal" in self.kwargs:
friendly_names = self.evidence_queryset.values_list(
"friendly_name", flat=True
)
used_friendly_names = []
# Convert the queryset into a list to pass to JavaScript later
for name in friendly_names:
used_friendly_names.append(name)
ctx["used_friendly_names"] = used_friendly_names
return ctx
def form_valid(self, form, **kwargs):
self.object = form.save(commit=False)
self.object.uploaded_by = self.request.user
self.object.finding = self.finding_instance
self.object.save()
if os.path.isfile(self.object.document.path):
messages.success(
self.request,
"Evidence uploaded successfully",
extra_tags="alert-success",
)
else:
messages.error(
self.request,
"Evidence file failed to upload",
extra_tags="alert-danger",
)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
if "modal" in self.kwargs:
return reverse("reporting:upload_evidence_modal_success")
return reverse("reporting:report_detail", args=(self.object.finding.report.pk,))
class EvidenceUpdate(LoginRequiredMixin, UpdateView):
"""
Update an individual instance of :model:`reporting.Evidence`.
**Context**
``cancel_link``
Link for the form's Cancel button to return to evidence's detail page
**Template**
:template:`reporting/evidence_form.html`
"""
model = Evidence
form_class = EvidenceForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
evidence_queryset = Evidence.objects.filter(finding=self.object.finding.pk)
kwargs.update({"evidence_queryset": evidence_queryset})
return kwargs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["cancel_link"] = reverse(
"reporting:evidence_detail",
kwargs={"pk": self.object.pk},
)
return ctx
def get_success_url(self):
messages.success(
self.request,
"Successfully updated {}".format(self.get_object().friendly_name),
extra_tags="alert-success",
)
return reverse(
"reporting:report_detail", kwargs={"pk": self.object.finding.report.pk}
)
class EvidenceDelete(LoginRequiredMixin, DeleteView):
"""
Delete an individual instance of :model:`reporting.Evidence`.
**Context**
``object_type``
String describing what is to be deleted
``object_to_be_deleted``
To-be-deleted instance of :model:`reporting.Evidence`
``cancel_link``
Link for the form's Cancel button to return to evidence's detail page
**Template**
:template:`confirm_delete.html`
"""
model = Evidence
template_name = "confirm_delete.html"
def get_success_url(self):
message = "Successfully deleted the evidence and associated file"
if os.path.isfile(self.object.document.name):
message = "Successfully deleted the evidence, but could not delete the associated file"
messages.success(
self.request,
message,
extra_tags="alert-success",
)
return reverse(
"reporting:report_detail", kwargs={"pk": self.object.finding.report.pk}
)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
queryset = kwargs["object"]
ctx["cancel_link"] = reverse(
"reporting:evidence_detail", kwargs={"pk": queryset.pk}
)
ctx["object_type"] = "evidence file (and associated file on disk)"
ctx["object_to_be_deleted"] = queryset.friendly_name
return ctx
# CBVs related to :model:`reporting.Finding`
class FindingNoteCreate(LoginRequiredMixin, CreateView):
"""
Create an individual instance of :model:`reporting.FindingNote`.
**Context**
``cancel_link``
Link for the form's Cancel button to return to finding's detail page
**Template**
:template:`note_form.html`
"""
model = FindingNote
form_class = FindingNoteForm
template_name = "note_form.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
finding_instance = get_object_or_404(Finding, pk=self.kwargs.get("pk"))
ctx["cancel_link"] = reverse(
"reporting:finding_detail", kwargs={"pk": finding_instance.pk}
)
return ctx
def get_success_url(self):
messages.success(
self.request,
"Successfully added your note to this finding",
extra_tags="alert-success",
)
return "{}#notes".format(
reverse("reporting:finding_detail", kwargs={"pk": self.object.finding.id})
)
def form_valid(self, form, **kwargs):
self.object = form.save(commit=False)
self.object.operator = self.request.user
self.object.finding_id = self.kwargs.get("pk")
self.object.save()
return super().form_valid(form)
class FindingNoteUpdate(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
"""
Update an individual instance of :model:`reporting.FindingNote`.
**Context**
``cancel_link``
Link for the form's Cancel button to return to finding's detail page
**Template**
:template:`note_form.html`
"""
model = FindingNote
form_class = FindingNoteForm
template_name = "note_form.html"
def test_func(self):
self.object = self.get_object()
return self.object.operator.id == self.request.user.id
def handle_no_permission(self):
messages.error(self.request, "You do not have permission to access that")
return redirect("home:dashboard")
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["cancel_link"] = reverse(
"reporting:finding_detail", kwargs={"pk": self.object.finding.pk}
)
return ctx
def get_success_url(self):
messages.success(
self.request, "Successfully updated the note", extra_tags="alert-success"
)
return reverse("reporting:finding_detail", kwargs={"pk": self.object.finding.pk})
# CBVs related to :model:`reporting.LocalFindingNote`
class LocalFindingNoteCreate(LoginRequiredMixin, CreateView):
"""
Create an individual instance of :model:`reporting.LocalFindingNote`.
**Context**
``cancel_link``
Link for the form's Cancel button to return to finding's detail page
**Template**
:template:`note_form.html`
"""
model = LocalFindingNote
form_class = LocalFindingNoteForm
template_name = "note_form.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
self.finding_instance = get_object_or_404(
ReportFindingLink, pk=self.kwargs.get("pk")
)
ctx["cancel_link"] = reverse(
"reporting:local_edit", kwargs={"pk": self.finding_instance.pk}
)
return ctx
def get_success_url(self):
messages.success(
self.request,
"Successfully added your note to this finding",
extra_tags="alert-success",
)
return reverse("reporting:local_edit", kwargs={"pk": self.object.finding.pk})
def form_valid(self, form, **kwargs):
self.object = form.save(commit=False)
self.object.operator = self.request.user
self.object.finding_id = self.kwargs.get("pk")
self.object.save()
return super().form_valid(form)
class LocalFindingNoteUpdate(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
"""
Update an individual instance of :model:`reporting.LocalFindingNote`.
**Context**
``cancel_link``
Link for the form's Cancel button to return to finding's detail page
**Template**
:template:`note_form.html`
"""
model = LocalFindingNote
form_class = LocalFindingNoteForm
template_name = "note_form.html"
def test_func(self):
self.object = self.get_object()
return self.object.operator.id == self.request.user.id
def handle_no_permission(self):
messages.error(self.request, "You do not have permission to access that")
return redirect("home:dashboard")
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
note_instance = get_object_or_404(LocalFindingNote, pk=self.kwargs.get("pk"))
ctx["cancel_link"] = reverse(
"reporting:local_edit", kwargs={"pk": note_instance.finding.id}
)
return ctx
def get_success_url(self):
messages.success(
self.request, "Successfully updated the note", extra_tags="alert-success"
)
return reverse("reporting:local_edit", kwargs={"pk": self.object.finding.pk})
| 35.229863
| 152
| 0.578291
|
b1c1365ed5886f806cb635c2bc0c991980ceb205
| 11,362
|
py
|
Python
|
bnpy/ioutil/ModelReader.py
|
co2meal/-bnpy-dev
|
74f69afde6c9dac8de4c074842df53ae87a15ac1
|
[
"BSD-3-Clause"
] | null | null | null |
bnpy/ioutil/ModelReader.py
|
co2meal/-bnpy-dev
|
74f69afde6c9dac8de4c074842df53ae87a15ac1
|
[
"BSD-3-Clause"
] | null | null | null |
bnpy/ioutil/ModelReader.py
|
co2meal/-bnpy-dev
|
74f69afde6c9dac8de4c074842df53ae87a15ac1
|
[
"BSD-3-Clause"
] | null | null | null |
'''
ModelReader.py
Load bnpy models from disk
See Also
-------
ModelWriter.py : save bnpy models to disk.
'''
import numpy as np
import scipy.io
import os
import glob
from ModelWriter import makePrefixForLap
from bnpy.allocmodel import AllocModelConstructorsByName
from bnpy.obsmodel import ObsModelConstructorsByName
from bnpy.util import toCArray, as1D
def getPrefixForLapQuery(taskpath, lapQuery):
''' Search among checkpoint laps for one nearest to query.
Returns
--------
prefix : str
For lap 1, prefix = 'Lap0001.000'.
For lap 5.5, prefix = 'Lap0005.500'.
lap : int
lap checkpoint for saved params closed to lapQuery
'''
try:
saveLaps = np.loadtxt(os.path.join(taskpath, 'laps-saved-params.txt'))
except IOError:
fileList = glob.glob(os.path.join(taskpath, 'Lap*TopicModel.mat'))
if len(fileList) == 0:
fileList = glob.glob(os.path.join(taskpath, 'Lap*.log_prob_w'))
assert len(fileList) > 0
saveLaps = list()
for fpath in sorted(fileList):
basename = fpath.split(os.path.sep)[-1]
lapstr = basename[3:11]
saveLaps.append(float(lapstr))
saveLaps = np.sort(np.asarray(saveLaps))
if lapQuery is None:
bestLap = saveLaps[-1] # take final saved value
else:
distances = np.abs(lapQuery - saveLaps)
bestLap = saveLaps[np.argmin(distances)]
return makePrefixForLap(bestLap), bestLap
def loadModelForLap(matfilepath, lapQuery):
''' Loads saved model with lap closest to provided lapQuery.
Returns
-------
model : bnpy.HModel
Model object for saved at checkpoint lap=bestLap.
bestLap : int
lap checkpoint for saved model closed to lapQuery
'''
prefix, bestLap = getPrefixForLapQuery(matfilepath, lapQuery)
model = load_model(matfilepath, prefix=prefix)
return model, bestLap
def load_model(matfilepath, prefix='Best', lap=None):
''' Load model stored to disk by ModelWriter
Returns
------
model : bnpy.HModel
Model object for saved at checkpoint indicated by prefix or lap.
'''
# Avoids circular import
import bnpy.HModel as HModel
if lap is not None:
prefix, _ = getPrefixForLapQuery(matfilepath, lap)
try:
obsModel = load_obs_model(matfilepath, prefix)
allocModel = load_alloc_model(matfilepath, prefix)
model = HModel(allocModel, obsModel)
except IOError as e:
if prefix == 'Best':
matList = glob.glob(os.path.join(matfilepath, '*TopicModel.mat'))
lpwList = glob.glob(os.path.join(matfilepath, '*.log_prob_w'))
if len(matList) > 0:
matList.sort() # ascending order, so most recent is last
prefix = matList[-1].split(os.path.sep)[-1][:11]
model = loadTopicModel(matfilepath, prefix=prefix)
elif len(lpwList) > 0:
lpwList.sort() # ascenting order
prefix = lpwList[-1].split(os.path.sep)[-1][:7]
else:
raise e
try:
model = loadTopicModel(matfilepath, prefix=prefix)
except IOError as e:
model = loadTopicModelFromMEDLDA(matfilepath, prefix=prefix)
return model
def load_alloc_model(matfilepath, prefix):
""" Load allocmodel stored to disk in bnpy .mat format.
Parameters
------
matfilepath : str
String file system path to folder where .mat files are stored.
Usually this path is a "taskoutpath" like where bnpy.run
saves its output.
prefix : str
Indicates which stored checkpoint to use.
Can look like 'Lap0005.000'.
Returns
------
allocModel : bnpy.allocmodel object
This object has valid set of global parameters
and valid hyperparameters that define its prior.
"""
apriorpath = os.path.join(matfilepath, 'AllocPrior.mat')
amodelpath = os.path.join(matfilepath, prefix + 'AllocModel.mat')
APDict = loadDictFromMatfile(apriorpath)
ADict = loadDictFromMatfile(amodelpath)
AllocConstr = AllocModelConstructorsByName[ADict['name']]
amodel = AllocConstr(ADict['inferType'], APDict)
amodel.from_dict(ADict)
return amodel
def load_obs_model(matfilepath, prefix):
""" Load observation model object stored to disk in bnpy mat format.
Parameters
------
matfilepath : str
String file system path to folder where .mat files are stored.
Usually this path is a "taskoutpath" like where bnpy.run
saves its output.
prefix : str
Indicates which stored checkpoint to use.
Can look like 'Lap0005.000'.
Returns
------
allocModel : bnpy.allocmodel object
This object has valid set of global parameters
and valid hyperparameters that define its prior.
"""
obspriormatfile = os.path.join(matfilepath, 'ObsPrior.mat')
PriorDict = loadDictFromMatfile(obspriormatfile)
ObsConstr = ObsModelConstructorsByName[PriorDict['name']]
obsModel = ObsConstr(**PriorDict)
obsmodelpath = os.path.join(matfilepath, prefix + 'ObsModel.mat')
ParamDict = loadDictFromMatfile(obsmodelpath)
if obsModel.inferType == 'EM':
obsModel.setEstParams(**ParamDict)
else:
obsModel.setPostFactors(**ParamDict)
return obsModel
def loadDictFromMatfile(matfilepath):
''' Load dict of numpy arrays from a .mat-format file on disk.
This is a wrapper around scipy.io.loadmat,
which makes the returned numpy arrays in standard aligned format.
Returns
--------
D : dict
Each key/value pair is a parameter name and a numpy array
loaded from the provided mat file.
We ensure before returning that each array has properties:
* C alignment
* Original 2D shape has been squeezed as much as possible
* (1,1) becomes a size=1 1D array
* (1,N) or (N,1) become 1D arrays
* flags.aligned is True
* flags.owndata is True
* dtype.byteorder is '='
Examples
-------
>>> import scipy.io
>>> Dorig = dict(scalar=5, scalar1DN1=np.asarray([3.14,]))
>>> Dorig['arr1DN3'] = np.asarray([1,2,3])
>>> scipy.io.savemat('Dorig.mat', Dorig, oned_as='row')
>>> D = loadDictFromMatfile('Dorig.mat')
>>> D['scalar']
array(5)
>>> D['scalar1DN1']
array(3.14)
>>> D['arr1DN3']
array([1, 2, 3])
'''
Dtmp = scipy.io.loadmat(matfilepath)
D = dict([x for x in Dtmp.items() if not x[0].startswith('__')])
for key in D:
if not isinstance(D[key], np.ndarray):
continue
x = D[key]
if x.size == 1 and isinstance(x[0], np.unicode_):
D[key] = str(x[0])
continue
if x.ndim == 2:
x = np.squeeze(x)
if str(x.dtype).count('int'):
arr = toCArray(x, dtype=np.int32)
else:
arr = toCArray(x, dtype=np.float64)
assert arr.dtype.byteorder == '='
assert arr.flags.aligned is True
assert arr.flags.owndata is True
D[key] = arr
return D
def loadWordCountMatrixForLap(matfilepath, lapQuery, toDense=True):
''' Load word counts
'''
prefix, bestLap = getPrefixForLapQuery(matfilepath, lapQuery)
_, WordCounts = loadTopicModel(matfilepath, prefix, returnWordCounts=1)
return WordCounts
def loadTopicModelFromMEDLDA(filepath,
prefix=None,
returnTPA=0):
''' Load topic model saved in medlda format.
'''
# Avoid circular import
import bnpy.HModel as HModel
assert prefix is not None
alphafilepath = os.path.join(filepath, prefix + '.alpha')
etafilepath = os.path.join(filepath, prefix + '.eta')
topicfilepath = os.path.join(filepath, prefix + '.log_prob_w')
alpha = float(np.loadtxt(alphafilepath))
eta = np.loadtxt(etafilepath)
logtopics = np.loadtxt(topicfilepath)
topics = np.exp(logtopics)
topics += 1e-9
topics /= topics.sum(axis=1)[:, np.newaxis]
assert np.all(np.isfinite(topics))
if returnTPA:
K = topics.shape[0]
probs = 1.0 / K * np.ones(K)
return topics, probs, alpha, eta
infAlg = 'VB'
aPriorDict = dict(alpha=alpha)
amodel = AllocModelConstructorsByName[
'FiniteTopicModel'](infAlg, aPriorDict)
omodel = ObsModelConstructorsByName['Mult'](infAlg,
lam=0.001, D=topics.shape[1])
hmodel = HModel(amodel, omodel)
hmodel.obsModel.set_global_params(topics=topics, nTotalTokens=1000)
return hmodel
def loadTopicModel(matfilepath, prefix=None,
returnWordCounts=0, returnTPA=0):
''' Load saved topic model
'''
# avoids circular import
from bnpy.HModel import HModel
if len(glob.glob(os.path.join(matfilepath, "*.log_prob_w"))) > 0:
return loadTopicModelFromMEDLDA(matfilepath, prefix,
returnTPA=returnTPA)
if prefix is not None:
matfilepath = os.path.join(matfilepath, prefix + 'TopicModel.mat')
Mdict = loadDictFromMatfile(matfilepath)
if 'SparseWordCount_data' in Mdict:
data = np.asarray(Mdict['SparseWordCount_data'], dtype=np.float64)
K = int(Mdict['K'])
vocab_size = int(Mdict['vocab_size'])
try:
indices = Mdict['SparseWordCount_indices']
indptr = Mdict['SparseWordCount_indptr']
WordCounts = scipy.sparse.csr_matrix((data, indices, indptr),
shape=(K, vocab_size))
except KeyError:
rowIDs = Mdict['SparseWordCount_i'] - 1
colIDs = Mdict['SparseWordCount_j'] - 1
WordCounts = scipy.sparse.csr_matrix((data, (rowIDs, colIDs)),
shape=(K, vocab_size))
Mdict['WordCounts'] = WordCounts.toarray()
if returnTPA:
if 'WordCounts' in Mdict:
topics = Mdict['WordCounts'] + Mdict['lam']
else:
topics = Mdict['topics']
K = topics.shape[0]
try:
probs = Mdict['probs']
except KeyError:
probs = (1.0 / K) * np.ones(K)
try:
alpha = float(Mdict['alpha'])
except KeyError:
if 'alpha' in os.environ:
alpha = float(os.environ['alpha'])
else:
raise ValueError('Unknown parameter alpha')
if 'eta' in Mdict:
return topics, probs, alpha, as1D(toCArray(Mdict['eta']))
return topics, probs, alpha
infAlg = 'VB'
if 'gamma' in Mdict:
aPriorDict = dict(alpha=Mdict['alpha'], gamma=Mdict['gamma'])
HDPTopicModel = AllocModelConstructorsByName['HDPTopicModel']
amodel = HDPTopicModel(infAlg, aPriorDict)
else:
FiniteTopicModel = AllocModelConstructorsByName['FiniteTopicModel']
amodel = FiniteTopicModel(infAlg, dict(alpha=Mdict['alpha']))
omodel = ObsModelConstructorsByName['Mult'](infAlg, **Mdict)
hmodel = HModel(amodel, omodel)
hmodel.set_global_params(**Mdict)
if returnWordCounts:
return hmodel, Mdict['WordCounts']
return hmodel
| 34.017964
| 78
| 0.617321
|
fb24c6fb6c0180add67d9e0b3bd1850752dafa7d
| 18,370
|
py
|
Python
|
old/determined-xd/model_grid.py
|
rtu715/NAS-Bench-360
|
d075006848c664371855c34082b0a00cda62be67
|
[
"MIT"
] | 10
|
2021-06-15T17:48:34.000Z
|
2022-02-23T18:34:28.000Z
|
old/determined-xd/model_grid.py
|
rtu715/NAS-Bench-360
|
d075006848c664371855c34082b0a00cda62be67
|
[
"MIT"
] | 1
|
2021-11-12T15:12:38.000Z
|
2021-11-12T19:38:00.000Z
|
old/determined-xd/model_grid.py
|
rtu715/NAS-Bench-360
|
d075006848c664371855c34082b0a00cda62be67
|
[
"MIT"
] | 1
|
2021-11-15T04:07:17.000Z
|
2021-11-15T04:07:17.000Z
|
import tempfile
from typing import Any, Dict, Sequence, Tuple, Union, cast
from functools import partial, reduce
import operator
import boto3
import os
import json
import numpy as np
import torch
import torchvision
from torch import nn
from torchvision import transforms
import torch.nn.functional as F
from determined.pytorch import DataLoader, PyTorchTrial, PyTorchTrialContext, LRScheduler
#from backbone_grid_pde import Backbone_Grid
from backbone_grid_unet import Backbone_Grid, Tiny_Backbone_Grid
from backbone_grid_wrn import Backbone
from utils_grid import LpLoss, MatReader, UnitGaussianNormalizer, LogCoshLoss
from utils_grid import create_grid, calculate_mae
from xd.chrysalis import Chrysalis
from xd.darts import Supernet
from xd.nas import MixedOptimizer
from xd.ops import Conv
from data_utils.protein_io import load_list
from data_utils.protein_gen import PDNetDataset
TorchData = Union[Dict[str, torch.Tensor], Sequence[torch.Tensor], torch.Tensor]
class AttrDict(dict):
'''Auxillary class for hyperparams'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class XDTrial(PyTorchTrial):
'''The Main Class'''
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
# self.data_config = trial_context.get_data_config()
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
# Create a unique download directory for each rank so they don't overwrite each other.
self.download_directory = self.download_data_from_s3()
# Define loss function, pde is lploss
if self.hparams.task == 'pde':
self.grid, self.s = create_grid(self.hparams.sub)
self.criterion = LpLoss(size_average=False)
self.in_channels = 3
elif self.hparams.task == 'protein':
self.criterion = nn.MSELoss(reduction='mean')
#self.criterion = LogCoshLoss()
#error is reported via MAE
self.error = nn.L1Loss(reduction='sum')
self.in_channels = 57
else:
raise NotImplementedError
# Changing our backbone
#self.backbone = Backbone_Grid(12, 32, 5)
#self.backbone = Backbone_Grid(self.in_channels, 32, 1)
self.backbone = Backbone(16, 1, 2, self.in_channels, 0.0)
self.chrysalis, self.original = Chrysalis.metamorphosize(self.backbone), self.backbone
self.patch_modules = [(n,m) for n, m in self.chrysalis.named_modules() if
hasattr(m, 'kernel_size') and type(m.kernel_size) == tuple and type(m) == Conv(len(m.kernel_size)) and m.kernel_size[0]!=1]
print(self.patch_modules)
'''
arch_kwargs = {'kmatrix_depth':self.hparams.kmatrix_depth,
'max_kernel_size': self.hparams.max_kernel_size,
'base': 2,
'global_biasing': False,
'channel_gating': False,
'warm_start': True}
'''
arch_kwargs = {
'kmatrix_depth': self.hparams.kmatrix_depth,
'max_kernel_size': self.hparams.max_kernel_size,
'global_biasing': False,
'channel_gating': False,
'base': 2,
'fixed': (False, False, False),
}
#X = torch.zeros([self.context.get_per_slot_batch_size(), self.s, self.s, 3])
#named_modules = []
#for name, layer in self.chrysalis.named_modules():
#if isinstance(layer, torch.nn.Conv2d):
#named_modules.append((name, layer))
if self.hparams.patch:
#self.chrysalis.patch_conv(X[:1], **arch_kwargs)
X, _ = next(iter(self.build_training_data_loader()))
self.chrysalis.patch_conv(X[:1], named_modules=self.patch_modules, **arch_kwargs)
else:
self.hparams.arch_lr = 0.0
self.model = self.context.wrap_model(self.chrysalis)
total_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)/ 1e6
print('Parameter size in MB: ', total_params)
total_params = sum(p.numel() for p in self.backbone.parameters() if p.requires_grad)/ 1e6
print('Parameter size in MB: ', total_params)
'''
Definition of optimizers, no Adam implementation
'''
if self.hparams.momentum:
momentum = partial(torch.optim.SGD, momentum=self.hparams.momentum, nesterov=True)
else:
momentum = partial(torch.optim.SGD)
opts = [
momentum(self.model.model_weights(), lr=self.hparams.learning_rate, weight_decay=self.hparams.weight_decay)]
if self.hparams.arch_lr:
arch_opt = torch.optim.Adam if self.hparams.arch_adam else momentum
opts.append(arch_opt(self.model.arch_params(), lr=self.hparams.arch_lr, weight_decay=0.0 if self.hparams.arch_adam else self.hparams.weight_decay))
optimizer = MixedOptimizer(opts)
self.opt = self.context.wrap_optimizer(optimizer)
sched_groups = [self.weight_sched if g['params'][0] in set(self.model.model_weights()) else self.arch_sched for g in
optimizer.param_groups]
self.lr_scheduler = self.context.wrap_lr_scheduler(
lr_scheduler=torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=sched_groups,
last_epoch=self.hparams.start_epoch-1
),
step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH,
)
def weight_sched(self, epoch) -> Any:
# deleted scheduling for different architectures
if self.hparams.epochs != 200:
return 0.2 ** (epoch >= int(0.3 * self.hparams.epochs)) * 0.2 ** (epoch > int(0.6 * self.hparams.epochs)) * 0.2 ** (epoch > int(0.8 * self.hparams.epochs))
print('using original weight schedule')
return 0.2 ** (epoch >= 60) * 0.2 ** (epoch >= 120) * 0.2 ** (epoch >=160)
def arch_sched(self, epoch) -> Any:
return 0.0 if epoch < self.hparams.warmup_epochs or epoch > self.hparams.epochs-self.hparams.cooldown_epochs else self.weight_sched(epoch)
def download_data_from_s3(self):
'''Download pde data/protein data from s3 to store in temp directory'''
s3_bucket = self.context.get_data_config()["bucket"]
download_directory = f"/tmp/data-rank{self.context.distributed.get_rank()}"
os.makedirs(download_directory, exist_ok=True)
if self.hparams.task == 'pde':
data_files = ["piececonst_r421_N1024_smooth1.mat", "piececonst_r421_N1024_smooth2.mat"]
s3_path = None
elif self.hparams.task == 'protein':
data_files = ['protein.zip']
data_dir = download_directory
self.all_feat_paths = [data_dir + '/deepcov/features/',
data_dir + '/psicov/features/', data_dir + '/cameo/features/']
self.all_dist_paths = [data_dir + '/deepcov/distance/',
data_dir + '/psicov/distance/', data_dir + '/cameo/distance/']
s3_path = None
else:
raise NotImplementedError
s3 = boto3.client("s3")
for data_file in data_files:
filepath = os.path.join(download_directory, data_file)
s3_loc = os.path.join(s3_path, data_file) if s3_path is not None else data_file
if not os.path.exists(filepath):
s3.download_file(s3_bucket, s3_loc, filepath)
return download_directory
def build_training_data_loader(self) -> DataLoader:
if self.hparams.task == 'pde':
TRAIN_PATH = os.path.join(self.download_directory, 'piececonst_r421_N1024_smooth1.mat')
self.reader = MatReader(TRAIN_PATH)
s = self.s
r = self.hparams["sub"]
ntrain = 1000
ntest = 100
if self.hparams.train:
x_train = self.reader.read_field('coeff')[:ntrain - ntest, ::r, ::r][:, :s, :s]
y_train = self.reader.read_field('sol')[:ntrain - ntest, ::r, ::r][:, :s, :s]
self.x_normalizer = UnitGaussianNormalizer(x_train)
x_train = self.x_normalizer.encode(x_train)
self.y_normalizer = UnitGaussianNormalizer(y_train)
y_train = self.y_normalizer.encode(y_train)
ntrain = ntrain - ntest
x_train = torch.cat([x_train.reshape(ntrain, s, s, 1), self.grid.repeat(ntrain, 1, 1, 1)], dim=3)
train_data = torch.utils.data.TensorDataset(x_train, y_train)
else:
x_train = self.reader.read_field('coeff')[:ntrain, ::r, ::r][:, :s, :s]
y_train = self.reader.read_field('sol')[:ntrain, ::r, ::r][:, :s, :s]
self.x_normalizer = UnitGaussianNormalizer(x_train)
x_train = self.x_normalizer.encode(x_train)
self.y_normalizer = UnitGaussianNormalizer(y_train)
y_train = self.y_normalizer.encode(y_train)
x_train = torch.cat([x_train.reshape(ntrain, s, s, 1), self.grid.repeat(ntrain, 1, 1, 1)], dim=3)
train_data = torch.utils.data.TensorDataset(x_train, y_train)
elif self.hparams.task == 'protein':
os.chdir(self.download_directory)
import zipfile
with zipfile.ZipFile('protein.zip', 'r') as zip_ref:
zip_ref.extractall()
self.deepcov_list = load_list('deepcov.lst', -1)
self.length_dict = {}
for pdb in self.deepcov_list:
(ly, seqy, cb_map) = np.load(
'deepcov/distance/' + pdb + '-cb.npy',
allow_pickle=True)
self.length_dict[pdb] = ly
if self.hparams.train:
train_pdbs = self.deepcov_list[100:]
train_data = PDNetDataset(train_pdbs, self.all_feat_paths, self.all_dist_paths,
128, 10, self.context.get_per_slot_batch_size(), 57,
label_engineering = '16.0')
else:
train_pdbs = self.deepcov_list[:]
train_data = PDNetDataset(train_pdbs, self.all_feat_paths, self.all_dist_paths,
128, 10, self.context.get_per_slot_batch_size(), 57,
label_engineering = '16.0')
else:
print('no such dataset')
raise NotImplementedError
train_queue = DataLoader(
train_data,
batch_size=self.context.get_per_slot_batch_size(),
shuffle=True,
num_workers=2,
)
return train_queue
def build_validation_data_loader(self) -> DataLoader:
if self.hparams.task == 'pde':
ntrain = 1000
ntest = 100
s = self.s
r = self.hparams["sub"]
if self.hparams.train:
x_test = self.reader.read_field('coeff')[ntrain - ntest:ntrain, ::r, ::r][:, :s, :s]
y_test = self.reader.read_field('sol')[ntrain - ntest:ntrain, ::r, ::r][:, :s, :s]
x_test = self.x_normalizer.encode(x_test)
x_test = torch.cat([x_test.reshape(ntest, s, s, 1), self.grid.repeat(ntest, 1, 1, 1)], dim=3)
else:
TEST_PATH = os.path.join(self.download_directory, 'piececonst_r421_N1024_smooth2.mat')
reader = MatReader(TEST_PATH)
x_test = reader.read_field('coeff')[:ntest, ::r, ::r][:, :s, :s]
y_test = reader.read_field('sol')[:ntest, ::r, ::r][:, :s, :s]
x_test = self.x_normalizer.encode(x_test)
x_test = torch.cat([x_test.reshape(ntest, s, s, 1), self.grid.repeat(ntest, 1, 1, 1)], dim=3)
valid_queue = DataLoader(torch.utils.data.TensorDataset(x_test, y_test),
batch_size=self.context.get_per_slot_batch_size(), shuffle=False, num_workers=2,)
elif self.hparams.task == 'protein':
if self.hparams.train:
valid_pdbs = self.deepcov_list[:100]
valid_data = PDNetDataset(valid_pdbs, self.all_feat_paths, self.all_dist_paths,
128, 10, self.context.get_per_slot_batch_size(), 57,
label_engineering = '16.0')
valid_queue = DataLoader(valid_data, batch_size=self.hparams.eval_batch_size,
shuffle=True, num_workers=2)
else:
psicov_list = load_list('psicov.lst')
psicov_length_dict = {}
for pdb in psicov_list:
(ly, seqy, cb_map) = np.load('psicov/distance/' + pdb + '-cb.npy',
allow_pickle=True)
psicov_length_dict[pdb] = ly
self.my_list = psicov_list
self.length_dict = psicov_length_dict
#note, when testing batch size should be different
test_data = PDNetDataset(self.my_list, self.all_feat_paths, self.all_dist_paths,
512, 10, 1, 57, label_engineering = None)
valid_queue = DataLoader(test_data, batch_size=2, shuffle=True, num_workers=0)
else:
print('no such dataset')
raise NotImplementedError
return valid_queue
'''
Train and Evaluate Methods
'''
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
x_train, y_train = batch
self.model.train()
logits = self.model(x_train)
if self.hparams.task == 'pde':
self.y_normalizer.cuda()
target = self.y_normalizer.decode(y_train)
logits = self.y_normalizer.decode(logits.squeeze())
loss = self.criterion(logits.view(logits.size(0), -1), target.view(logits.size(0), -1))
mae = 0.0
elif self.hparams.task == 'protein':
loss = self.criterion(logits.squeeze(), y_train.squeeze())
mae = F.l1_loss(logits.squeeze(), y_train.squeeze(), reduction='mean').item()
self.context.backward(loss)
self.context.step_optimizer(self.opt)
return {
'loss': loss,
'MAE': mae,
}
def evaluate_full_dataset(
self, data_loader: torch.utils.data.DataLoader
) -> Dict[str, Any]:
#evaluate on test proteins, not validation procedures
if self.hparams.task == 'protein' and not self.hparams.train:
return self.evaluate_test_protein(data_loader)
loss_sum = 0
error_sum = 0
num_batches = 0
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
input, target = batch
num_batches += 1
logits = self.model(input)
if self.hparams.task == 'pde':
self.y_normalizer.cuda()
logits = self.y_normalizer.decode(logits.squeeze())
loss = self.criterion(logits.view(logits.size(0), -1), target.view(target.size(0), -1)).item()
loss = loss / logits.size(0)
error = 0
elif self.hparams.task == 'protein':
logits = logits.squeeze()
target = target.squeeze()
loss = self.criterion(logits, target)
mae = F.l1_loss(logits, target, reduction='mean')
error_sum += mae.item()
#target, logits, num = filter_MAE(target, logits, 8.0)
#error = self.error(logits, target)
#error = error / num
loss_sum += loss
results = {
"validation_loss": loss_sum / num_batches,
"MAE": error_sum / num_batches,
}
return results
def evaluate_test_protein(
self, data_loader: torch.utils.data.DataLoader
) -> Dict[str, Any]:
'''performs evaluation on protein'''
LMAX = 512 #psicov constant
pad_size = 10
self.model.cuda()
with torch.no_grad():
P = []
targets = []
for batch in data_loader:
batch = self.context.to_device(batch)
data, target = batch
for i in range(data.size(0)):
targets.append(
np.expand_dims(
target.cpu().numpy()[i].transpose(1,2,0), axis=0))
out = self.model.forward_window(data, 128)
P.append(out.cpu().numpy().transpose(0,2,3,1))
# Combine P, convert to numpy
P = np.concatenate(P, axis=0)
Y = np.full((len(targets), LMAX, LMAX, 1), np.nan)
for i, xy in enumerate(targets):
Y[i, :, :, 0] = xy[0, :, :, 0]
# Average the predictions from both triangles
for j in range(0, len(P[0, :, 0, 0])):
for k in range(j, len(P[0, :, 0, 0])):
P[:, j, k, :] = (P[:, k, j, :] + P[:, j, k, :]) / 2.0
P[P < 0.01] = 0.01
# Remove padding, i.e. shift up and left by int(pad_size/2)
P[:, :LMAX - pad_size, :LMAX - pad_size, :] = P[:, int(pad_size / 2): LMAX - int(pad_size / 2),
int(pad_size / 2): LMAX - int(pad_size / 2), :]
Y[:, :LMAX - pad_size, :LMAX - pad_size, :] = Y[:, int(pad_size / 2): LMAX - int(pad_size / 2),
int(pad_size / 2): LMAX - int(pad_size / 2), :]
print('')
print('Evaluating distances..')
lr8, mlr8, lr12, mlr12 = calculate_mae(P, Y, self.my_list, self.length_dict)
return {
'mae': lr8,
'mlr8': mlr8,
'mae12': lr12,
'mlr12': mlr12,
}
| 39.085106
| 167
| 0.566304
|
dcc43b5406c4cf234cc14dcbc3b846b2ca1fa52f
| 59,609
|
py
|
Python
|
src/qibo/abstractions/gates.py
|
daxkoh/qibo
|
5b98a7442cd314f095adf6217fef03308fb13ece
|
[
"Apache-2.0"
] | null | null | null |
src/qibo/abstractions/gates.py
|
daxkoh/qibo
|
5b98a7442cd314f095adf6217fef03308fb13ece
|
[
"Apache-2.0"
] | null | null | null |
src/qibo/abstractions/gates.py
|
daxkoh/qibo
|
5b98a7442cd314f095adf6217fef03308fb13ece
|
[
"Apache-2.0"
] | 1
|
2022-03-28T17:52:46.000Z
|
2022-03-28T17:52:46.000Z
|
# -*- coding: utf-8 -*-
# @authors: S. Carrazza and A. Garcia
import math
from abc import abstractmethod
from qibo.config import raise_error, EINSUM_CHARS
from typing import Dict, List, Optional, Tuple
from qibo.abstractions.abstract_gates import Gate, Channel, SpecialGate, ParametrizedGate
QASM_GATES = {"h": "H", "x": "X", "y": "Y", "z": "Z",
"rx": "RX", "ry": "RY", "rz": "RZ",
"u1": "U1", "u2": "U2", "u3": "U3",
"cx": "CNOT", "swap": "SWAP", "fswap": "FSWAP", "cz": "CZ",
"crx": "CRX", "cry": "CRY", "crz": "CRZ",
"cu1": "CU1", "cu3": "CU3",
"ccx": "TOFFOLI", "id": "I", "s": "S",
"sdg": "SDG", "t": "T", "tdg": "TDG"}
PARAMETRIZED_GATES = {"rx", "ry", "rz", "u1", "u2", "u3",
"crx", "cry", "crz", "cu1", "cu3"}
class H(Gate):
"""The Hadamard gate.
Args:
q (int): the qubit id number.
"""
def __init__(self, q):
super(H, self).__init__()
self.name = "h"
self.target_qubits = (q,)
self.init_args = [q]
class X(Gate):
"""The Pauli X gate.
Args:
q (int): the qubit id number.
"""
def __init__(self, q):
super(X, self).__init__()
self.name = "x"
self.target_qubits = (q,)
self.init_args = [q]
@Gate.check_controls
def controlled_by(self, *q):
"""Fall back to CNOT and Toffoli if there is one or two controls."""
if len(q) == 1:
gate = getattr(self.module, "CNOT")(q[0], self.target_qubits[0])
elif len(q) == 2:
gate = getattr(self.module, "TOFFOLI")(q[0], q[1], self.target_qubits[0])
else:
gate = super(X, self).controlled_by(*q)
return gate
def decompose(self, *free: int, use_toffolis: bool = True) -> List[Gate]:
"""Decomposes multi-control ``X`` gate to one-qubit, ``CNOT`` and ``TOFFOLI`` gates.
Args:
free: Ids of free qubits to use for the gate decomposition.
use_toffolis: If ``True`` the decomposition contains only ``TOFFOLI`` gates.
If ``False`` a congruent representation is used for ``TOFFOLI`` gates.
See :class:`qibo.abstractions.gates.TOFFOLI` for more details on this representation.
Returns:
List with one-qubit, ``CNOT`` and ``TOFFOLI`` gates that have the
same effect as applying the original multi-control gate.
"""
if set(free) & set(self.qubits):
raise_error(ValueError, "Cannot decompose multi-control X gate if free "
"qubits coincide with target or controls.")
if self._nqubits is not None:
for q in free:
if q >= self.nqubits:
raise_error(ValueError, "Gate acts on {} qubits but {} was given "
"as free qubit.".format(self.nqubits, q))
controls = self.control_qubits
target = self.target_qubits[0]
m = len(controls)
if m < 3:
return [self.__class__(target).controlled_by(*controls)]
decomp_gates = []
n = m + 1 + len(free)
TOFFOLI = self.module.TOFFOLI
if (n >= 2 * m - 1) and (m >= 3):
gates1 = [TOFFOLI(controls[m - 2 - i],
free[m - 4 - i],
free[m - 3 - i]
).congruent(use_toffolis=use_toffolis)
for i in range(m - 3)]
gates2 = TOFFOLI(controls[0], controls[1], free[0]
).congruent(use_toffolis=use_toffolis)
first_toffoli = TOFFOLI(controls[m - 1], free[m - 3], target)
decomp_gates.append(first_toffoli)
for gates in gates1:
decomp_gates.extend(gates)
decomp_gates.extend(gates2)
for gates in gates1[::-1]:
decomp_gates.extend(gates)
elif len(free) >= 1:
m1 = n // 2
free1 = controls[m1:] + (target,) + tuple(free[1:])
x1 = self.__class__(free[0]).controlled_by(*controls[:m1])
part1 = x1.decompose(*free1, use_toffolis=use_toffolis)
free2 = controls[:m1] + tuple(free[1:])
controls2 = controls[m1:] + (free[0],)
x2 = self.__class__(target).controlled_by(*controls2)
part2 = x2.decompose(*free2, use_toffolis=use_toffolis)
decomp_gates = [*part1, *part2]
else: # pragma: no cover
# impractical case
raise_error(NotImplementedError, "X decomposition not implemented "
"for zero free qubits.")
decomp_gates.extend(decomp_gates)
return decomp_gates
class Y(Gate):
"""The Pauli Y gate.
Args:
q (int): the qubit id number.
"""
def __init__(self, q):
super(Y, self).__init__()
self.name = "y"
self.target_qubits = (q,)
self.init_args = [q]
class Z(Gate):
"""The Pauli Z gate.
Args:
q (int): the qubit id number.
"""
def __init__(self, q):
super(Z, self).__init__()
self.name = "z"
self.target_qubits = (q,)
self.init_args = [q]
@Gate.check_controls
def controlled_by(self, *q):
"""Fall back to CZ if there is only one control."""
if len(q) == 1:
gate = getattr(self.module, "CZ")(q[0], self.target_qubits[0])
else:
gate = super(Z, self).controlled_by(*q)
return gate
class S(Gate):
"""The S gate.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 \\\\
0 & i \\\\
\\end{pmatrix}
Args:
q (int): the qubit id number.
"""
def __init__(self, q):
super().__init__()
self.name = "s"
self.target_qubits = (q,)
self.init_args = [q]
class SDG(Gate):
"""The conjugate transpose of the S gate.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 \\\\
0 & -i \\\\
\\end{pmatrix}
Args:
q (int): the qubit id number.
"""
def __init__(self, q):
super().__init__()
self.name = "sdg"
self.target_qubits = (q,)
self.init_args = [q]
class T(Gate):
"""The T gate.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 \\\\
0 & e^{i \\pi / 4} \\\\
\\end{pmatrix}
Args:
q (int): the qubit id number.
"""
def __init__(self, q):
super().__init__()
self.name = "t"
self.target_qubits = (q,)
self.init_args = [q]
class TDG(Gate):
"""The conjugate transpose of the T gate.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 \\\\
0 & e^{-i \\pi / 4} \\\\
\\end{pmatrix}
Args:
q (int): the qubit id number.
"""
def __init__(self, q):
super().__init__()
self.name = "tdg"
self.target_qubits = (q,)
self.init_args = [q]
class I(Gate):
"""The identity gate.
Args:
*q (int): the qubit id numbers.
"""
def __init__(self, *q):
super(I, self).__init__()
self.name = "id"
self.target_qubits = tuple(q)
self.init_args = q
class Align(Gate):
def __init__(self, *q):
super(Align, self).__init__()
self.name = "align"
self.target_qubits = tuple(q)
self.init_args = q
class M(Gate):
"""The Measure Z gate.
Args:
*q (int): id numbers of the qubits to measure.
It is possible to measure multiple qubits using ``gates.M(0, 1, 2, ...)``.
If the qubits to measure are held in an iterable (eg. list) the ``*``
operator can be used, for example ``gates.M(*[0, 1, 4])`` or
``gates.M(*range(5))``.
register_name (str): Optional name of the register to distinguish it
from other registers when used in circuits.
collapse (bool): Collapse the state vector after the measurement is
performed. Can be used only for single shot measurements.
If ``True`` the collapsed state vector is returned. If ``False``
the measurement result is returned.
p0 (dict): Optional bitflip probability map. Can be:
A dictionary that maps each measured qubit to the probability
that it is flipped, a list or tuple that has the same length
as the tuple of measured qubits or a single float number.
If a single float is given the same probability will be used
for all qubits.
p1 (dict): Optional bitflip probability map for asymmetric bitflips.
Same as ``p0`` but controls the 1->0 bitflip probability.
If ``p1`` is ``None`` then ``p0`` will be used both for 0->1 and
1->0 bitflips.
"""
def __init__(self, *q, register_name: Optional[str] = None,
collapse: bool = False,
p0: Optional["ProbsType"] = None,
p1: Optional["ProbsType"] = None):
super(M, self).__init__()
self.name = "measure"
self.target_qubits = q
self.register_name = register_name
self.collapse = collapse
self.result = None
self._symbol = None
self.init_args = q
self.init_kwargs = {"register_name": register_name,
"collapse": collapse,
"p0": p0, "p1": p1}
if collapse and (p0 is not None or p1 is not None):
raise_error(NotImplementedError, "Bitflip measurement noise is not "
"available when collapsing.")
if p1 is None: p1 = p0
if p0 is None: p0 = p1
self.bitflip_map = (self._get_bitflip_map(p0),
self._get_bitflip_map(p1))
@staticmethod
def _get_bitflip_tuple(qubits: Tuple[int], probs: "ProbsType"
) -> Tuple[float]:
if isinstance(probs, float):
if probs < 0 or probs > 1:
raise_error(ValueError, "Invalid bitflip probability {}."
"".format(probs))
return len(qubits) * (probs,)
if isinstance(probs, (tuple, list)):
if len(probs) != len(qubits):
raise_error(ValueError, "{} qubits were measured but the given "
"bitflip probability list contains {} "
"values.".format(
len(qubits), len(probs)))
return tuple(probs)
if isinstance(probs, dict):
diff = set(probs.keys()) - set(qubits)
if diff:
raise_error(KeyError, "Bitflip map contains {} qubits that are "
"not measured.".format(diff))
return tuple(probs[q] if q in probs else 0.0 for q in qubits)
raise_error(TypeError, "Invalid type {} of bitflip map.".format(probs))
@staticmethod
def einsum_string(qubits, nqubits, measuring=False):
"""Generates einsum string for partial trace of density matrices.
Args:
qubits (list): Set of qubit ids that are traced out.
nqubits (int): Total number of qubits in the state.
measuring (bool): If True non-traced-out indices are multiplied and
the output has shape (nqubits - len(qubits),).
If False the output has shape 2 * (nqubits - len(qubits),).
Returns:
String to use in einsum for performing partial density of a
density matrix.
"""
if (2 - int(measuring)) * nqubits > len(EINSUM_CHARS): # pragma: no cover
# case not tested because it requires large instance
raise_error(NotImplementedError, "Not enough einsum characters.")
left_in, right_in, left_out, right_out = [], [], [], []
for i in range(nqubits):
left_in.append(EINSUM_CHARS[i])
if i in qubits:
right_in.append(EINSUM_CHARS[i])
else:
left_out.append(EINSUM_CHARS[i])
if measuring:
right_in.append(EINSUM_CHARS[i])
else:
right_in.append(EINSUM_CHARS[i + nqubits])
right_out.append(EINSUM_CHARS[i + nqubits])
left_in, left_out = "".join(left_in), "".join(left_out)
right_in, right_out = "".join(right_in), "".join(right_out)
return f"{left_in}{right_in}->{left_out}{right_out}"
def _get_bitflip_map(self, p: Optional["ProbsType"] = None
) -> Dict[int, float]:
"""Creates dictionary with bitflip probabilities."""
if p is None:
return {q: 0 for q in self.qubits}
pt = self._get_bitflip_tuple(self.qubits, p)
return {q: p for q, p in zip(self.qubits, pt)}
def symbol(self):
"""Returns symbol containing measurement outcomes for ``collapse=True`` gates."""
return self._symbol
def add(self, gate: "M"):
"""Adds target qubits to a measurement gate.
This method is only used for creating the global measurement gate used
by the `models.Circuit`.
The user is not supposed to use this method and a `ValueError` is
raised if he does so.
Args:
gate: Measurement gate to add its qubits in the current gate.
"""
assert isinstance(gate, self.__class__)
self.target_qubits += gate.target_qubits
self.bitflip_map[0].update(gate.bitflip_map[0])
self.bitflip_map[1].update(gate.bitflip_map[1])
def controlled_by(self, *q):
""""""
raise_error(NotImplementedError, "Measurement gates cannot be controlled.")
class _Rn_(ParametrizedGate):
"""Abstract class for defining the RX, RY and RZ rotations.
Args:
q (int): the qubit id number.
theta (float): the rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
axis = "n"
def __init__(self, q, theta, trainable=True):
super(_Rn_, self).__init__(trainable)
self.name = "r{}".format(self.axis)
self.target_qubits = (q,)
self.parameters = theta
self.init_args = [q]
self.init_kwargs = {"theta": theta, "trainable": trainable}
def _dagger(self) -> "Gate":
""""""
return self.__class__(self.target_qubits[0], -self.parameters) # pylint: disable=E1130
@Gate.check_controls
def controlled_by(self, *q):
"""Fall back to CRn if there is only one control."""
if len(q) == 1:
gate = getattr(self.module, "CR{}".format(self.axis.capitalize()))(
q[0], self.target_qubits[0], **self.init_kwargs)
else:
gate = super(_Rn_, self).controlled_by(*q)
return gate
class RX(_Rn_):
"""Rotation around the X-axis of the Bloch sphere.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
\\cos \\frac{\\theta }{2} &
-i\\sin \\frac{\\theta }{2} \\\\
-i\\sin \\frac{\\theta }{2} &
\\cos \\frac{\\theta }{2} \\\\
\\end{pmatrix}
Args:
q (int): the qubit id number.
theta (float): the rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
axis = "x"
class RY(_Rn_):
"""Rotation around the Y-axis of the Bloch sphere.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
\\cos \\frac{\\theta }{2} &
-\\sin \\frac{\\theta }{2} \\\\
\\sin \\frac{\\theta }{2} &
\\cos \\frac{\\theta }{2} \\\\
\\end{pmatrix}
Args:
q (int): the qubit id number.
theta (float): the rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
axis = "y"
class RZ(_Rn_):
"""Rotation around the Z-axis of the Bloch sphere.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
e^{-i \\theta / 2} & 0 \\\\
0 & e^{i \\theta / 2} \\\\
\\end{pmatrix}
Args:
q (int): the qubit id number.
theta (float): the rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
axis = "z"
class _Un_(ParametrizedGate):
"""Abstract class for defining the U1, U2 and U3 gates.
Args:
q (int): the qubit id number.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
order = 0
def __init__(self, q, trainable=True):
super(_Un_, self).__init__(trainable)
self.name = "u{}".format(self.order)
self.nparams = self.order
self.target_qubits = (q,)
self.init_args = [q]
self.init_kwargs = {"trainable": trainable}
@Gate.check_controls
def controlled_by(self, *q):
"""Fall back to CUn if there is only one control."""
if len(q) == 1:
gate = getattr(self.module, "CU{}".format(self.order))(
q[0], self.target_qubits[0], **self.init_kwargs)
else:
gate = super(_Un_, self).controlled_by(*q)
return gate
class U1(_Un_):
"""First general unitary gate.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 \\\\
0 & e^{i \\theta} \\\\
\\end{pmatrix}
Args:
q (int): the qubit id number.
theta (float): the rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
order = 1
def __init__(self, q, theta, trainable=True):
super(U1, self).__init__(q, trainable=trainable)
self.parameters = theta
self.init_kwargs = {"theta": theta, "trainable": trainable}
def _dagger(self) -> "Gate":
""""""
return self.__class__(self.target_qubits[0], -self.parameters) # pylint: disable=E1130
class U2(_Un_):
"""Second general unitary gate.
Corresponds to the following unitary matrix
.. math::
\\frac{1}{\\sqrt{2}}
\\begin{pmatrix}
e^{-i(\\phi + \\lambda )/2} & -e^{-i(\\phi - \\lambda )/2} \\\\
e^{i(\\phi - \\lambda )/2} & e^{i (\\phi + \\lambda )/2} \\\\
\\end{pmatrix}
Args:
q (int): the qubit id number.
phi (float): first rotation angle.
lamb (float): second rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
order = 2
def __init__(self, q, phi, lam, trainable=True):
super(U2, self).__init__(q, trainable=trainable)
self._phi, self._lam = None, None
self.init_kwargs = {"phi": phi, "lam": lam, "trainable": trainable}
self.parameter_names = ["phi", "lam"]
self.parameters = phi, lam
def _dagger(self) -> "Gate":
""""""
phi, lam = self.parameters
phi, lam = math.pi - lam, - math.pi - phi
return self.__class__(self.target_qubits[0], phi, lam)
class U3(_Un_):
"""Third general unitary gate.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
e^{-i(\\phi + \\lambda )/2}\\cos\\left (\\frac{\\theta }{2}\\right ) & -e^{-i(\\phi - \\lambda )/2}\\sin\\left (\\frac{\\theta }{2}\\right ) \\\\
e^{i(\\phi - \\lambda )/2}\\sin\\left (\\frac{\\theta }{2}\\right ) & e^{i (\\phi + \\lambda )/2}\\cos\\left (\\frac{\\theta }{2}\\right ) \\\\
\\end{pmatrix}
Args:
q (int): the qubit id number.
theta (float): first rotation angle.
phi (float): second rotation angle.
lamb (float): third rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
order = 3
def __init__(self, q, theta, phi, lam, trainable=True):
super(U3, self).__init__(q, trainable=trainable)
self._theta, self._phi, self._lam = None, None, None
self.init_kwargs = {"theta": theta, "phi": phi, "lam": lam,
"trainable": trainable}
self.parameter_names = ["theta", "phi", "lam"]
self.parameters = theta, phi, lam
def _dagger(self) -> "Gate":
""""""
theta, lam, phi = tuple(-x for x in self.parameters)
return self.__class__(self.target_qubits[0], theta, phi, lam)
class CNOT(Gate):
"""The Controlled-NOT gate.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & 1 & 0 & 0 \\\\
0 & 0 & 0 & 1 \\\\
0 & 0 & 1 & 0 \\\\
\\end{pmatrix}
Args:
q0 (int): the control qubit id number.
q1 (int): the target qubit id number.
"""
def __init__(self, q0, q1):
super(CNOT, self).__init__()
self.name = "cx"
self.control_qubits = (q0,)
self.target_qubits = (q1,)
self.init_args = [q0, q1]
def decompose(self, *free, use_toffolis: bool = True) -> List[Gate]:
q0, q1 = self.control_qubits[0], self.target_qubits[0]
return [self.__class__(q0, q1)]
class CZ(Gate):
"""The Controlled-Phase gate.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & 1 & 0 & 0 \\\\
0 & 0 & 1 & 0 \\\\
0 & 0 & 0 & -1 \\\\
\\end{pmatrix}
Args:
q0 (int): the control qubit id number.
q1 (int): the target qubit id number.
"""
def __init__(self, q0, q1):
super(CZ, self).__init__()
self.name = "cz"
self.control_qubits = (q0,)
self.target_qubits = (q1,)
self.init_args = [q0, q1]
class _CRn_(ParametrizedGate):
"""Abstract method for defining the CRX, CRY and CRZ gates.
Args:
q0 (int): the control qubit id number.
q1 (int): the target qubit id number.
theta (float): the rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
axis = "n"
def __init__(self, q0, q1, theta, trainable=True):
super(_CRn_, self).__init__(trainable)
self.name = "cr{}".format(self.axis)
self.control_qubits = (q0,)
self.target_qubits = (q1,)
self.parameters = theta
self.init_args = [q0, q1]
self.init_kwargs = {"theta": theta, "trainable": trainable}
def _dagger(self) -> "Gate":
""""""
q0 = self.control_qubits[0]
q1 = self.target_qubits[0]
return self.__class__(q0, q1, -self.parameters) # pylint: disable=E1130
class CRX(_CRn_):
"""Controlled rotation around the X-axis for the Bloch sphere.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & 1 & 0 & 0 \\\\
0 & 0 & \\cos \\frac{\\theta }{2} & -i\\sin \\frac{\\theta }{2} \\\\
0 & 0 & -i\\sin \\frac{\\theta }{2} & \\cos \\frac{\\theta }{2} \\\\
\\end{pmatrix}
Args:
q0 (int): the control qubit id number.
q1 (int): the target qubit id number.
theta (float): the rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
axis = "x"
class CRY(_CRn_):
"""Controlled rotation around the Y-axis for the Bloch sphere.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & 1 & 0 & 0 \\\\
0 & 0 & \\cos \\frac{\\theta }{2} & -\\sin \\frac{\\theta }{2} \\\\
0 & 0 & \\sin \\frac{\\theta }{2} & \\cos \\frac{\\theta }{2} \\\\
\\end{pmatrix}
Note that this differs from the :class:`qibo.abstractions.gates.RZ` gate.
Args:
q0 (int): the control qubit id number.
q1 (int): the target qubit id number.
theta (float): the rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
axis = "y"
class CRZ(_CRn_):
"""Controlled rotation around the Z-axis for the Bloch sphere.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & 1 & 0 & 0 \\\\
0 & 0 & e^{-i \\theta / 2} & 0 \\\\
0 & 0 & 0 & e^{i \\theta / 2} \\\\
\\end{pmatrix}
Args:
q0 (int): the control qubit id number.
q1 (int): the target qubit id number.
theta (float): the rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
axis = "z"
class _CUn_(ParametrizedGate):
"""Abstract method for defining the CU1, CU2 and CU3 gates.
Args:
q0 (int): the control qubit id number.
q1 (int): the target qubit id number.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
order = 0
def __init__(self, q0, q1, trainable=True):
super(_CUn_, self).__init__(trainable)
self.name = "cu{}".format(self.order)
self.nparams = self.order
self.control_qubits = (q0,)
self.target_qubits = (q1,)
self.init_args = [q0, q1]
self.init_kwargs = {"trainable": trainable}
class CU1(_CUn_):
"""Controlled first general unitary gate.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & 1 & 0 & 0 \\\\
0 & 0 & 1 & 0 \\\\
0 & 0 & 0 & e^{i \\theta } \\\\
\\end{pmatrix}
Note that this differs from the :class:`qibo.abstractions.gates.CRZ` gate.
Args:
q0 (int): the control qubit id number.
q1 (int): the target qubit id number.
theta (float): the rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
order = 1
def __init__(self, q0, q1, theta, trainable=True):
super(CU1, self).__init__(q0, q1, trainable=trainable)
self.parameters = theta
self.init_kwargs = {"theta": theta, "trainable": trainable}
def _dagger(self) -> "Gate":
""""""
q0 = self.control_qubits[0]
q1 = self.target_qubits[0]
return self.__class__(q0, q1, -self.parameters) # pylint: disable=E1130
class CU2(_CUn_):
"""Controlled second general unitary gate.
Corresponds to the following unitary matrix
.. math::
\\frac{1}{\\sqrt{2}}
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & 1 & 0 & 0 \\\\
0 & 0 & e^{-i(\\phi + \\lambda )/2} & -e^{-i(\\phi - \\lambda )/2} \\\\
0 & 0 & e^{i(\\phi - \\lambda )/2} & e^{i (\\phi + \\lambda )/2} \\\\
\\end{pmatrix}
Args:
q0 (int): the control qubit id number.
q1 (int): the target qubit id number.
phi (float): first rotation angle.
lamb (float): second rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
order = 2
def __init__(self, q0, q1, phi, lam, trainable=True):
super(CU2, self).__init__(q0, q1, trainable=trainable)
self.init_kwargs = {"phi": phi, "lam": lam, "trainable": trainable}
self.parameter_names = ["phi", "lam"]
self.parameters = phi, lam
def _dagger(self) -> "Gate":
""""""
q0 = self.control_qubits[0]
q1 = self.target_qubits[0]
phi, lam = self.parameters
phi, lam = math.pi - lam, - math.pi - phi
return self.__class__(q0, q1, phi, lam)
class CU3(_CUn_):
"""Controlled third general unitary gate.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & 1 & 0 & 0 \\\\
0 & 0 & e^{-i(\\phi + \\lambda )/2}\\cos\\left (\\frac{\\theta }{2}\\right ) & -e^{-i(\\phi - \\lambda )/2}\\sin\\left (\\frac{\\theta }{2}\\right ) \\\\
0 & 0 & e^{i(\\phi - \\lambda )/2}\\sin\\left (\\frac{\\theta }{2}\\right ) & e^{i (\\phi + \\lambda )/2}\\cos\\left (\\frac{\\theta }{2}\\right ) \\\\
\\end{pmatrix}
Args:
q0 (int): the control qubit id number.
q1 (int): the target qubit id number.
theta (float): first rotation angle.
phi (float): second rotation angle.
lamb (float): third rotation angle.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
order = 3
def __init__(self, q0, q1, theta, phi, lam, trainable=True):
super(CU3, self).__init__(q0, q1, trainable=trainable)
self._theta, self._phi, self._lam = None, None, None
self.init_kwargs = {"theta": theta, "phi": phi, "lam": lam,
"trainable": trainable}
self.parameter_names = ["theta", "phi", "lam"]
self.parameters = theta, phi, lam
def _dagger(self) -> "Gate":
""""""
q0 = self.control_qubits[0]
q1 = self.target_qubits[0]
theta, lam, phi = tuple(-x for x in self.parameters)
return self.__class__(q0, q1, theta, phi, lam)
class SWAP(Gate):
"""The swap gate.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & 0 & 1 & 0 \\\\
0 & 1 & 0 & 0 \\\\
0 & 0 & 0 & 1 \\\\
\\end{pmatrix}
Args:
q0 (int): the first qubit to be swapped id number.
q1 (int): the second qubit to be swapped id number.
"""
def __init__(self, q0, q1):
super(SWAP, self).__init__()
self.name = "swap"
self.target_qubits = (q0, q1)
self.init_args = [q0, q1]
class FSWAP(Gate):
"""The fermionic swap gate.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & 0 & 1 & 0 \\\\
0 & 1 & 0 & 0 \\\\
0 & 0 & 0 & -1 \\\\
\\end{pmatrix}
Args:
q0 (int): the first qubit to be f-swapped id number.
q1 (int): the second qubit to be f-swapped id number.
"""
def __init__(self, q0, q1):
super(FSWAP, self).__init__()
self.name = "fswap"
self.target_qubits = (q0, q1)
self.init_args = [q0, q1]
class fSim(ParametrizedGate):
"""The fSim gate defined in `arXiv:2001.08343 <https://arxiv.org/abs/2001.08343>`_.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & \\cos \\theta & -i\\sin \\theta & 0 \\\\
0 & -i\\sin \\theta & \\cos \\theta & 0 \\\\
0 & 0 & 0 & e^{-i \\phi } \\\\
\\end{pmatrix}
Args:
q0 (int): the first qubit to be swapped id number.
q1 (int): the second qubit to be swapped id number.
theta (float): Angle for the one-qubit rotation.
phi (float): Angle for the ``|11>`` phase.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
# TODO: Check how this works with QASM.
def __init__(self, q0, q1, theta, phi, trainable=True):
super(fSim, self).__init__(trainable)
self.name = "fsim"
self.target_qubits = (q0, q1)
self.parameter_names = ["theta", "phi"]
self.parameters = theta, phi
self.nparams = 2
self.init_args = [q0, q1]
self.init_kwargs = {"theta": theta, "phi": phi, "trainable": trainable}
def _dagger(self) -> "Gate":
""""""
q0, q1 = self.target_qubits
return self.__class__(q0, q1, *(-x for x in self.parameters))
class GeneralizedfSim(ParametrizedGate):
"""The fSim gate with a general rotation.
Corresponds to the following unitary matrix
.. math::
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & R_{00} & R_{01} & 0 \\\\
0 & R_{10} & R_{11} & 0 \\\\
0 & 0 & 0 & e^{-i \\phi } \\\\
\\end{pmatrix}
Args:
q0 (int): the first qubit to be swapped id number.
q1 (int): the second qubit to be swapped id number.
unitary (np.ndarray): Unitary that corresponds to the one-qubit rotation.
phi (float): Angle for the ``|11>`` phase.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
"""
def __init__(self, q0, q1, unitary, phi, trainable=True):
super(GeneralizedfSim, self).__init__(trainable)
self.name = "generalizedfsim"
self.target_qubits = (q0, q1)
self.parameter_names = ["u", "phi"]
self.parameters = unitary, phi
self.nparams = 5
self.init_args = [q0, q1]
self.init_kwargs = {"unitary": unitary, "phi": phi,
"trainable": trainable}
@abstractmethod
def _dagger(self) -> "Gate": # pragma: no cover
""""""
raise_error(NotImplementedError)
@ParametrizedGate.parameters.setter
def parameters(self, x):
shape = tuple(x[0].shape)
if shape != (2, 2):
raise_error(ValueError, "Invalid rotation shape {} for generalized "
"fSim gate".format(shape))
ParametrizedGate.parameters.fset(self, x) # pylint: disable=no-member
class TOFFOLI(Gate):
"""The Toffoli gate.
Args:
q0 (int): the first control qubit id number.
q1 (int): the second control qubit id number.
q2 (int): the target qubit id number.
"""
def __init__(self, q0, q1, q2):
super(TOFFOLI, self).__init__()
self.name = "ccx"
self.control_qubits = (q0, q1)
self.target_qubits = (q2,)
self.init_args = [q0, q1, q2]
def decompose(self, *free, use_toffolis: bool = True) -> List[Gate]:
c0, c1 = self.control_qubits
t = self.target_qubits[0]
return [self.__class__(c0, c1, t)]
def congruent(self, use_toffolis: bool = True) -> List[Gate]:
"""Congruent representation of ``TOFFOLI`` gate.
This is a helper method for the decomposition of multi-control ``X`` gates.
The congruent representation is based on Sec. 6.2 of
`arXiv:9503016 <https://arxiv.org/abs/quant-ph/9503016>`_.
The sequence of the gates produced here has the same effect as ``TOFFOLI``
with the phase of the ``|101>`` state reversed.
Args:
use_toffolis: If ``True`` a single ``TOFFOLI`` gate is returned.
If ``False`` the congruent representation is returned.
Returns:
List with ``RY`` and ``CNOT`` gates that have the same effect as
applying the original ``TOFFOLI`` gate.
"""
if use_toffolis:
return self.decompose()
import importlib
control0, control1 = self.control_qubits
target = self.target_qubits[0]
RY = self.module.RY
CNOT = self.module.CNOT
return [RY(target, -math.pi / 4), CNOT(control1, target),
RY(target, -math.pi / 4), CNOT(control0, target),
RY(target, math.pi / 4), CNOT(control1, target),
RY(target, math.pi / 4)]
class Unitary(ParametrizedGate):
"""Arbitrary unitary gate.
Args:
unitary: Unitary matrix as a tensor supported by the backend.
Note that there is no check that the matrix passed is actually
unitary. This allows the user to create non-unitary gates.
*q (int): Qubit id numbers that the gate acts on.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
name (str): Optional name for the gate.
"""
def __init__(self, unitary, *q, trainable=True, name=None):
super(Unitary, self).__init__(trainable)
self.name = "Unitary" if name is None else name
self.target_qubits = tuple(q)
self.parameter_names = "u"
self.parameters = unitary
self.nparams = 4 ** len(self.target_qubits)
self.init_args = [unitary] + list(q)
self.init_kwargs = {"name": name, "trainable": trainable}
@property
def rank(self) -> int:
return len(self.target_qubits)
def _on_qubits(self, *q) -> "Gate":
args = [self.init_args[0]]
args.extend((q[i] for i in self.target_qubits))
gate = self.__class__(*args, **self.init_kwargs)
if self.is_controlled_by:
controls = (q[i] for i in self.control_qubits)
gate = gate.controlled_by(*controls)
return gate
@abstractmethod
def _dagger(self) -> "Gate": # pragma: no cover
""""""
raise_error(NotImplementedError)
class VariationalLayer(ParametrizedGate):
"""Layer of one-qubit parametrized gates followed by two-qubit entangling gates.
Performance is optimized by fusing the variational one-qubit gates with the
two-qubit entangling gates that follow them and applying a single layer of
two-qubit gates as 4x4 matrices.
Args:
qubits (list): List of one-qubit gate target qubit IDs.
pairs (list): List of pairs of qubit IDs on which the two qubit gate act.
one_qubit_gate: Type of one qubit gate to use as the variational gate.
two_qubit_gate: Type of two qubit gate to use as entangling gate.
params (list): Variational parameters of one-qubit gates as a list that
has the same length as ``qubits``. These gates act before the layer
of entangling gates.
params2 (list): Variational parameters of one-qubit gates as a list that
has the same length as ``qubits``. These gates act after the layer
of entangling gates.
trainable (bool): whether gate parameters can be updated using
:meth:`qibo.abstractions.circuit.AbstractCircuit.set_parameters`
(default is ``True``).
name (str): Optional name for the gate.
If ``None`` the name ``"VariationalLayer"`` will be used.
Example:
.. testcode::
import numpy as np
from qibo.models import Circuit
from qibo import gates
# generate an array of variational parameters for 8 qubits
theta = 2 * np.pi * np.random.random(8)
# define qubit pairs that two qubit gates will act
pairs = [(i, i + 1) for i in range(0, 7, 2)]
# define a circuit of 8 qubits and add the variational layer
c = Circuit(8)
c.add(gates.VariationalLayer(range(8), pairs, gates.RY, gates.CZ, theta))
# this will create an optimized version of the following circuit
c2 = Circuit(8)
c.add((gates.RY(i, th) for i, th in enumerate(theta)))
c.add((gates.CZ(i, i + 1) for i in range(7)))
"""
def __init__(self, qubits: List[int], pairs: List[Tuple[int, int]],
one_qubit_gate, two_qubit_gate,
params: List[float], params2: Optional[List[float]] = None,
trainable: bool = True,
name: Optional[str] = None):
super(VariationalLayer, self).__init__(trainable)
self.init_args = [qubits, pairs, one_qubit_gate, two_qubit_gate]
self.init_kwargs = {"params": params, "params2": params2,
"trainable": trainable, "name": name}
self.name = "VariationalLayer" if name is None else name
self.unitaries = []
self.additional_unitary = None
self.target_qubits = tuple(qubits)
self.parameter_names = [f"theta{i}" for i, _ in enumerate(params)]
parameter_values = list(params)
self.params = self._create_params_dict(params)
self.params2 = {}
if params2 is not None:
self.params2 = self._create_params_dict(params2)
n = len(self.parameter_names)
self.parameter_names.extend([f"theta{i + n}" for i, _ in enumerate(params2)])
parameter_values.extend(params2)
self.parameters = parameter_values
self.nparams = len(parameter_values)
self.pairs = pairs
targets = set(self.target_qubits)
two_qubit_targets = set(q for p in pairs for q in p)
additional_targets = targets - two_qubit_targets
if not additional_targets:
self.additional_target = None
elif len(additional_targets) == 1:
self.additional_target = additional_targets.pop()
else:
raise_error(ValueError, "Variational layer can have at most one "
"additional target for one qubit gates but "
" has {}.".format(additional_targets))
self.one_qubit_gate = one_qubit_gate
self.two_qubit_gate = two_qubit_gate
def _create_params_dict(self, params: List[float]) -> Dict[int, float]:
if len(self.target_qubits) != len(params):
raise_error(ValueError, "VariationalLayer has {} target qubits but "
"{} parameters were given."
"".format(len(self.target_qubits), len(params)))
return {q: p for q, p in zip(self.target_qubits, params)}
@ParametrizedGate.parameters.setter
def parameters(self, x):
if self.params2:
n = len(x) // 2
self.params = self._create_params_dict(x[:n])
self.params2 = self._create_params_dict(x[n:])
else:
self.params = self._create_params_dict(x)
ParametrizedGate.parameters.fset(self, x) # pylint: disable=no-member
class Flatten(SpecialGate):
"""Passes an arbitrary state vector in the circuit.
Args:
coefficients (list): list of the target state vector components.
This can also be a tensor supported by the backend.
"""
def __init__(self, coefficients):
super(Flatten, self).__init__()
self.name = "Flatten"
self.coefficients = coefficients
self.init_args = [coefficients]
class CallbackGate(SpecialGate):
"""Calculates a :class:`qibo.core.callbacks.Callback` at a specific point in the circuit.
This gate performs the callback calulation without affecting the state vector.
Args:
callback (:class:`qibo.core.callbacks.Callback`): Callback object to calculate.
"""
def __init__(self, callback: "Callback"):
super(CallbackGate, self).__init__()
self.name = callback.__class__.__name__
self.callback = callback
self.init_args = [callback]
@Gate.nqubits.setter
def nqubits(self, n: int):
Gate.nqubits.fset(self, n) # pylint: disable=no-member
self.callback.nqubits = n
class PartialTrace(Gate):
"""Collapses a density matrix by tracing out selected qubits.
Works only with density matrices (not state vectors) and implements the
following transformation:
.. math::
\\mathcal{E}(\\rho ) = (|0\\rangle \\langle 0|) _A \\otimes \\mathrm{Tr} _A (\\rho )
where A denotes the subsystem of qubits that are traced out.
Args:
q (int): Qubit ids that will be traced-out and collapsed to the zero
state. More than one qubits can be given.
"""
def __init__(self, *q):
super().__init__()
self.name = "PartialTrace"
self.target_qubits = tuple(q)
self.init_args = q
self.init_kwargs = {}
class KrausChannel(Channel):
"""General channel defined by arbitrary Kraus operators.
Implements the following transformation:
.. math::
\\mathcal{E}(\\rho ) = \\sum _k A_k \\rho A_k^\\dagger
where A are arbitrary Kraus operators given by the user. Note that Kraus
operators set should be trace preserving, however this is not checked.
Simulation of this gate requires the use of density matrices.
For more information on channels and Kraus operators please check
`J. Preskill's notes <http://theory.caltech.edu/~preskill/ph219/chap3_15.pdf>`_.
Args:
ops (list): List of Kraus operators as pairs ``(qubits, Ak)`` where
``qubits`` refers the qubit ids that ``Ak`` acts on and ``Ak`` is
the corresponding matrix as a ``np.ndarray`` or ``tf.Tensor``.
Example:
.. testcode::
import numpy as np
from qibo.models import Circuit
from qibo import gates
# initialize circuit with 3 qubits
c = Circuit(3, density_matrix=True)
# define a sqrt(0.4) * X gate
a1 = np.sqrt(0.4) * np.array([[0, 1], [1, 0]])
# define a sqrt(0.6) * CNOT gate
a2 = np.sqrt(0.6) * np.array([[1, 0, 0, 0], [0, 1, 0, 0],
[0, 0, 0, 1], [0, 0, 1, 0]])
# define the channel rho -> 0.4 X{1} rho X{1} + 0.6 CNOT{0, 2} rho CNOT{0, 2}
channel = gates.KrausChannel([((1,), a1), ((0, 2), a2)])
# add the channel to the circuit
c.add(channel)
"""
def __init__(self, ops):
super(KrausChannel, self).__init__()
self.name = "KrausChannel"
self.density_matrix = True
if isinstance(ops[0], Gate):
self.gates = tuple(ops)
self.target_qubits = tuple(sorted(set(
q for gate in ops for q in gate.target_qubits)))
else:
self.gates, self.target_qubits = self._from_matrices(ops)
self.init_args = [self.gates]
def _from_matrices(self, matrices):
"""Creates gates from qubits and matrices list."""
gatelist, qubitset = [], set()
for qubits, matrix in matrices:
# Check that given operators have the proper shape.
rank = 2 ** len(qubits)
shape = tuple(matrix.shape)
if shape != (rank, rank):
raise_error(ValueError, "Invalid Kraus operator shape {} for "
"acting on {} qubits."
"".format(shape, len(qubits)))
qubitset.update(qubits)
gatelist.append(self.module.Unitary(matrix, *list(qubits)))
gatelist[-1].density_matrix = True
return tuple(gatelist), tuple(sorted(qubitset))
class UnitaryChannel(KrausChannel):
"""Channel that is a probabilistic sum of unitary operations.
Implements the following transformation:
.. math::
\\mathcal{E}(\\rho ) = \\left (1 - \\sum _k p_k \\right )\\rho +
\\sum _k p_k U_k \\rho U_k^\\dagger
where U are arbitrary unitary operators and p are floats between 0 and 1.
Note that unlike :class:`qibo.abstractions.gates.KrausChannel` which requires
density matrices, it is possible to simulate the unitary channel using
state vectors and probabilistic sampling. For more information on this
approach we refer to :ref:`Using repeated execution <repeatedexec-example>`.
Args:
p (list): List of floats that correspond to the probability that each
unitary Uk is applied.
ops (list): List of operators as pairs ``(qubits, Uk)`` where
``qubits`` refers the qubit ids that ``Uk`` acts on and ``Uk`` is
the corresponding matrix as a ``np.ndarray``/``tf.Tensor``.
Must have the same length as the given probabilities ``p``.
seed (int): Optional seed for the random number generator when sampling
instead of density matrices is used to simulate this gate.
"""
def __init__(self, p, ops, seed=None):
if len(p) != len(ops):
raise_error(ValueError, "Probabilities list has length {} while "
"{} gates were given."
"".format(len(p), len(ops)))
for pp in p:
if pp < 0 or pp > 1:
raise_error(ValueError, "Probabilities should be between 0 "
"and 1 but {} was given.".format(pp))
super(UnitaryChannel, self).__init__(ops)
self.name = "UnitaryChannel"
self.probs = p
self.psum = sum(p)
self.seed = seed
self.density_matrix = False
self.init_args = [p, self.gates]
self.init_kwargs = {"seed": seed}
class PauliNoiseChannel(UnitaryChannel):
"""Noise channel that applies Pauli operators with given probabilities.
Implements the following transformation:
.. math::
\\mathcal{E}(\\rho ) = (1 - p_x - p_y - p_z) \\rho + p_x X\\rho X + p_y Y\\rho Y + p_z Z\\rho Z
which can be used to simulate phase flip and bit flip errors.
This channel can be simulated using either density matrices or state vectors
and sampling with repeated execution.
See :ref:`How to perform noisy simulation? <noisy-example>` for more
information.
Args:
q (int): Qubit id that the noise acts on.
px (float): Bit flip (X) error probability.
py (float): Y-error probability.
pz (float): Phase flip (Z) error probability.
seed (int): Optional seed for the random number generator when sampling
instead of density matrices is used to simulate this gate.
"""
def __init__(self, q, px=0, py=0, pz=0, seed=None):
probs, gates = [], []
for p, gate in [(px, "X"), (py, "Y"), (pz, "Z")]:
if p > 0:
probs.append(p)
gates.append(getattr(self.module, gate)(q))
super(PauliNoiseChannel, self).__init__(probs, gates, seed=seed)
self.name = "PauliNoiseChannel"
assert self.target_qubits == (q,)
self.init_args = [q]
self.init_kwargs = {"px": px, "py": py, "pz": pz, "seed": seed}
class ResetChannel(UnitaryChannel):
"""Single-qubit reset channel.
Implements the following transformation:
.. math::
\\mathcal{E}(\\rho ) = (1 - p_0 - p_1) \\rho
+ p_0 (|0\\rangle \\langle 0| \\otimes \\tilde{\\rho })
+ p_1 (|1\\rangle \langle 1| \otimes \\tilde{\\rho })
with
.. math::
\\tilde{\\rho } = \\frac{\langle 0|\\rho |0\\rangle }{\mathrm{Tr}\langle 0|\\rho |0\\rangle}
Args:
q (int): Qubit id that the channel acts on.
p0 (float): Probability to reset to 0.
p1 (float): Probability to reset to 1.
seed (int): Optional seed for the random number generator when sampling
instead of density matrices is used to simulate this gate.
"""
def __init__(self, q, p0=0.0, p1=0.0, seed=None):
probs = [p0, p1]
gates = [self.module.M(q, collapse=True), self.module.X(q)]
super(ResetChannel, self).__init__(probs, gates, seed=seed)
self.name = "ResetChannel"
assert self.target_qubits == (q,)
self.init_args = [q]
self.init_kwargs = {"p0": p0, "p1": p1, "seed": seed}
class ThermalRelaxationChannel:
"""Single-qubit thermal relaxation error channel.
Implements the following transformation:
If :math:`T_1 \\geq T_2`:
.. math::
\\mathcal{E} (\\rho ) = (1 - p_z - p_0 - p_1)\\rho + p_zZ\\rho Z
+ p_0 (|0\\rangle \\langle 0| \\otimes \\tilde{\\rho })
+ p_1 (|1\\rangle \langle 1| \otimes \\tilde{\\rho })
with
.. math::
\\tilde{\\rho } = \\frac{\langle 0|\\rho |0\\rangle }{\mathrm{Tr}\langle 0|\\rho |0\\rangle}
while if :math:`T_1 < T_2`:
.. math::
\\mathcal{E}(\\rho ) = \\mathrm{Tr} _\\mathcal{X}\\left [\\Lambda _{\\mathcal{X}\\mathcal{Y}}(\\rho _\\mathcal{X} ^T \\otimes \\mathbb{I}_\\mathcal{Y})\\right ]
with
.. math::
\\Lambda = \\begin{pmatrix}
1 - p_1 & 0 & 0 & e^{-t / T_2} \\\\
0 & p_1 & 0 & 0 \\\\
0 & 0 & p_0 & 0 \\\\
e^{-t / T_2} & 0 & 0 & 1 - p_0
\\end{pmatrix}
where :math:`p_0 = (1 - e^{-t / T_1})(1 - \\eta )` :math:`p_1 = (1 - e^{-t / T_1})\\eta`
and :math:`p_z = 1 - e^{-t / T_1} + e^{-t / T_2} - e^{t / T_1 - t / T_2}`.
Here :math:`\\eta` is the ``excited_population``
and :math:`t` is the ``time``, both controlled by the user.
This gate is based on
`Qiskit's thermal relaxation error channel <https://qiskit.org/documentation/stubs/qiskit.providers.aer.noise.thermal_relaxation_error.html#qiskit.providers.aer.noise.thermal_relaxation_error>`_.
Args:
q (int): Qubit id that the noise channel acts on.
t1 (float): T1 relaxation time. Should satisfy ``t1 > 0``.
t2 (float): T2 dephasing time.
Should satisfy ``t1 > 0`` and ``t2 < 2 * t1``.
time (float): the gate time for relaxation error.
excited_population (float): the population of the excited state at
equilibrium. Default is 0.
seed (int): Optional seed for the random number generator when sampling
instead of density matrices is used to simulate this gate.
"""
def __init__(self, q, t1, t2, time, excited_population=0, seed=None):
self.name = "ThermalRelaxationChannel"
self.init_args = [q, t1, t2, time]
self.init_kwargs = {"excited_population": excited_population,
"seed": seed}
def calculate_probabilities(self, t1, t2, time, excited_population):
if excited_population < 0 or excited_population > 1:
raise_error(ValueError, "Invalid excited state population {}."
"".format(excited_population))
if time < 0:
raise_error(ValueError, "Invalid gate_time ({} < 0)".format(time))
if t1 <= 0:
raise_error(ValueError, "Invalid T_1 relaxation time parameter: "
"T_1 <= 0.")
if t2 <= 0:
raise_error(ValueError, "Invalid T_2 relaxation time parameter: "
"T_2 <= 0.")
if t2 > 2 * t1:
raise_error(ValueError, "Invalid T_2 relaxation time parameter: "
"T_2 greater than 2 * T_1.")
class _ThermalRelaxationChannelA(UnitaryChannel):
"""Implements thermal relaxation when T1 >= T2."""
def calculate_probabilities(self, t1, t2, time, excited_population): # pragma: no cover
# function not tested because it is redefined in `qibo.core.cgates._ThermalRelaxationChannelA`
return ThermalRelaxationChannel.calculate_probabilities(
self, t1, t2, time, excited_population)
def __init__(self, q, t1, t2, time, excited_population=0, seed=None):
probs = self.calculate_probabilities(t1, t2, time, excited_population)
gates = [self.module.Z(q), self.module.M(q, collapse=True),
self.module.X(q)]
super(_ThermalRelaxationChannelA, self).__init__(
probs, gates, seed=seed)
ThermalRelaxationChannel.__init__(
self, q, t1, t2, time, excited_population=excited_population,
seed=seed)
assert self.target_qubits == (q,)
class _ThermalRelaxationChannelB(Gate):
"""Implements thermal relaxation when T1 < T2."""
def calculate_probabilities(self, t1, t2, time, excited_population): # pragma: no cover
# function not tested because it is redefined in `qibo.core.cgates._ThermalRelaxationChannelB`
return ThermalRelaxationChannel.calculate_probabilities(
self, t1, t2, time, excited_population)
def __init__(self, q, t1, t2, time, excited_population=0, seed=None):
probs = self.calculate_probabilities(t1, t2, time, excited_population)
self.exp_t2, self.preset0, self.preset1 = probs # pylint: disable=E0633
super(_ThermalRelaxationChannelB, self).__init__()
self.target_qubits = (q,)
ThermalRelaxationChannel.__init__(
self, q, t1, t2, time, excited_population=excited_population,
seed=seed)
# this case can only be applied to density matrices
self.density_matrix = True
class FusedGate(Gate):
"""Collection of gates that will be fused and applied as single gate during simulation.
This gate is constructed automatically by :meth:`qibo.core.circuit.Circuit.fuse`
and should not be used by user.
:class:`qibo.abstractions.gates.FusedGate` works with arbitrary number of
target qubits however the backend implementation
:class:`qibo.core.gates.FusedGate` assumes two target qubits.
"""
def __init__(self, *q):
super().__init__()
self.name = "fused"
self.target_qubits = tuple(q)
self.init_args = list(q)
self.qubit_set = set(q)
self.gates = []
def add(self, gate):
if not set(gate.qubits).issubset(self.qubit_set):
raise_error(ValueError, "Cannot add gate that targets {} "
"in fused gate acting on {}."
"".format(gate.qubits, self.qubits))
if isinstance(gate, self.__class__):
self.gates.extend(gate.gates)
else:
self.gates.append(gate)
def __iter__(self):
return iter(self.gates)
def _dagger(self):
dagger = self.__class__(*self.init_args)
for gate in self.gates[::-1]:
dagger.add(gate.dagger())
return dagger
| 35.084756
| 199
| 0.567834
|
08bf7ad6d99fbc2e56b9562e402bbe75f20d499f
| 4,634
|
py
|
Python
|
pos_orders_history_return/models/pos_order.py
|
ShaheenHossain/itpp-labs_pos-addons
|
8c5047af10447eb3d137c84111127fae1a8970b6
|
[
"MIT"
] | null | null | null |
pos_orders_history_return/models/pos_order.py
|
ShaheenHossain/itpp-labs_pos-addons
|
8c5047af10447eb3d137c84111127fae1a8970b6
|
[
"MIT"
] | null | null | null |
pos_orders_history_return/models/pos_order.py
|
ShaheenHossain/itpp-labs_pos-addons
|
8c5047af10447eb3d137c84111127fae1a8970b6
|
[
"MIT"
] | 4
|
2020-08-25T01:49:14.000Z
|
2021-04-04T10:29:04.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 Dinar Gabbasov <https://it-projects.info/team/GabbasovDinar>
# License MIT (https://opensource.org/licenses/MIT).
import logging
import psycopg2
from odoo import _, api, fields, models, tools
from odoo.tools import float_is_zero
_logger = logging.getLogger(__name__)
class PosOrder(models.Model):
_inherit = "pos.order"
returned_order = fields.Boolean("Returned Order", default=False)
@api.model
def create_from_ui(self, orders):
# Keep return orders
submitted_references = [o["data"]["name"] for o in orders]
pos_order = self.search([("pos_reference", "in", submitted_references)])
existing_orders = pos_order.read(["pos_reference"])
existing_references = {o["pos_reference"] for o in existing_orders}
orders_to_save = [o for o in orders if o["data"]["name"] in existing_references]
pos_retuned_orders = [
o
for o in orders_to_save
if o["data"].get("mode") and o["data"].get("mode") == "return"
]
self.return_from_ui(pos_retuned_orders)
return super(PosOrder, self).create_from_ui(orders)
@api.multi
def return_from_ui(self, orders):
for tmp_order in orders:
# eliminates the return of the order several times at the same time
returned_order = self.search(
[
("pos_reference", "=", tmp_order["data"]["name"]),
("date_order", "=", tmp_order["data"]["creation_date"]),
("returned_order", "=", True),
]
)
if not returned_order:
to_invoice = tmp_order["to_invoice"]
order = tmp_order["data"]
if to_invoice:
self._match_payment_to_invoice(order)
order["returned_order"] = True
pos_order = self._process_order(order)
try:
pos_order.action_pos_order_paid()
except psycopg2.OperationalError:
raise
except Exception as e:
_logger.error(
"Could not fully process the POS Order: %s", tools.ustr(e)
)
if to_invoice:
pos_order.action_pos_order_invoice()
pos_order.invoice_id.sudo().action_invoice_open()
pos_order.account_move = pos_order.invoice_id.move_id
@api.model
def _process_order(self, pos_order):
if pos_order.get("returned_order"):
prec_acc = self.env["decimal.precision"].precision_get("Account")
pos_session = self.env["pos.session"].browse(pos_order["pos_session_id"])
if pos_session.state == "closing_control" or pos_session.state == "closed":
pos_order["pos_session_id"] = self._get_valid_session(pos_order).id
order = self.create(self._order_fields(pos_order))
order.write({"returned_order": True})
journal_ids = set()
for payments in pos_order["statement_ids"]:
if not float_is_zero(payments[2]["amount"], precision_digits=prec_acc):
order.add_payment(self._payment_fields(payments[2]))
journal_ids.add(payments[2]["journal_id"])
if pos_session.sequence_number <= pos_order["sequence_number"]:
pos_session.write({"sequence_number": pos_order["sequence_number"] + 1})
pos_session.refresh()
if not float_is_zero(pos_order["amount_return"], prec_acc):
cash_journal_id = pos_session.cash_journal_id.id
if not cash_journal_id:
cash_journal = self.env["account.journal"].search(
[("id", "in", list(journal_ids))], limit=1
)
if not cash_journal:
cash_journal = [
statement.journal_id
for statement in pos_session.statement_ids
]
cash_journal_id = cash_journal[0].id
order.add_payment(
{
"amount": -pos_order["amount_return"],
"payment_date": fields.Datetime.now(),
"payment_name": _("return"),
"journal": cash_journal_id,
}
)
return order
else:
return super(PosOrder, self)._process_order(pos_order)
| 41.375
| 88
| 0.554596
|
dbd14afeeaac7a4701ee03615e03bd69f9454a73
| 3,735
|
py
|
Python
|
timm/data/lmdb_loader.py
|
lusinlu/pytorch-image-models
|
7c85407bda63dd29217ee36948a0e16d20927f48
|
[
"Apache-2.0"
] | 1
|
2020-06-24T07:56:21.000Z
|
2020-06-24T07:56:21.000Z
|
timm/data/lmdb_loader.py
|
lusinlu/pytorch-image-models
|
7c85407bda63dd29217ee36948a0e16d20927f48
|
[
"Apache-2.0"
] | null | null | null |
timm/data/lmdb_loader.py
|
lusinlu/pytorch-image-models
|
7c85407bda63dd29217ee36948a0e16d20927f48
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import six
import string
import argparse
import lmdb
import pickle
import msgpack
import tqdm
from PIL import Image
import torch
import torch.utils.data as data
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from torchvision.datasets import ImageFolder
from torchvision import transforms, datasets
# This segfaults when imported before torch: https://github.com/apache/arrow/issues/2637
import pyarrow as pa
class ImageFolderLMDB(data.Dataset):
def __init__(self, db_path, transform=None, target_transform=None):
self.db_path = db_path
self.env = lmdb.open(db_path, subdir=os.path.isdir(db_path),
readonly=True, lock=False,
readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
# self.length = txn.stat()['entries'] - 1
self.length = pa.deserialize(txn.get(b'__len__'))
self.keys = pa.deserialize(txn.get(b'__keys__'))
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img, target = None, None
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(self.keys[index])
unpacked = pa.deserialize(byteflow)
# load image
imgbuf = unpacked[0]
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
img = Image.open(buf).convert('RGB')
# load label
target = unpacked[1]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return self.length
def __repr__(self):
return self.__class__.__name__ + ' (' + self.db_path + ')'
def raw_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
return bin_data
def dumps_pyarrow(obj):
"""
Serialize an object.
Returns:
Implementation-dependent bytes-like object
"""
return pa.serialize(obj).to_buffer()
def folder2lmdb(path, outpath, write_frequency=5000):
directory = os.path.expanduser(path)
print("Loading dataset from %s" % directory)
dataset = ImageFolder(directory, loader=raw_reader)
data_loader = DataLoader(dataset, num_workers=16, collate_fn=lambda x: x)
lmdb_path = os.path.expanduser(outpath)
isdir = os.path.isdir(lmdb_path)
print("Generate LMDB to %s" % lmdb_path)
db = lmdb.open(lmdb_path, subdir=isdir,
map_size=1099511627776 * 2, readonly=False,
meminit=False, map_async=True)
txn = db.begin(write=True)
for idx, data in enumerate(data_loader):
image, label = data[0]
txn.put(u'{}'.format(idx).encode('ascii'), dumps_pyarrow((image, label)))
if idx % write_frequency == 0:
print("[%d/%d]" % (idx, len(data_loader)))
txn.commit()
txn = db.begin(write=True)
# finish iterating through dataset
txn.commit()
keys = [u'{}'.format(k).encode('ascii') for k in range(idx + 1)]
with db.begin(write=True) as txn:
txn.put(b'__keys__', dumps_pyarrow(keys))
txn.put(b'__len__', dumps_pyarrow(len(keys)))
print("Flushing database ...")
db.sync()
db.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dataset", help="Path to original image dataset folder")
parser.add_argument("-o", "--outpath", help="Path to output LMDB file")
args = parser.parse_args()
folder2lmdb(args.dataset, args.outpath)
| 29.88
| 88
| 0.636948
|
9cc3d2f74e6fc16d20c9c59555224db3e06d7468
| 4,692
|
py
|
Python
|
Classification_emotion.py
|
brendaspears/Intelligent-System-Final-Project
|
897f098e2dc6ebbf0b60de20d37092444d52c579
|
[
"MIT"
] | null | null | null |
Classification_emotion.py
|
brendaspears/Intelligent-System-Final-Project
|
897f098e2dc6ebbf0b60de20d37092444d52c579
|
[
"MIT"
] | null | null | null |
Classification_emotion.py
|
brendaspears/Intelligent-System-Final-Project
|
897f098e2dc6ebbf0b60de20d37092444d52c579
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense,Dropout,Activation,Flatten,BatchNormalization
from keras.layers import Conv2D,MaxPooling2D
import os
num_classes = 5
img_rows,img_cols = 48,48
batch_size = 32
train_data_dir = 'C:/Users/Jennifer I/Desktop/Semester 4/Intelligent System/EmotionTest/Emotion Detector/data/train'
validation_data_dir = 'C:/Users/Jennifer I/Desktop/Semester 4/Intelligent System/EmotionTest/Emotion Detector/data/val'
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=30,
shear_range=0.3,
zoom_range=0.3,
width_shift_range=0.4,
height_shift_range=0.4,
horizontal_flip=True,
fill_mode='nearest')
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
color_mode='grayscale',
target_size=(img_rows,img_cols),
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
color_mode='grayscale',
target_size=(img_rows,img_cols),
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
model = Sequential()
# Block-1
model.add(Conv2D(32,(3,3),padding='same',kernel_initializer='he_normal',input_shape=(img_rows,img_cols,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(32,(3,3),padding='same',kernel_initializer='he_normal',input_shape=(img_rows,img_cols,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
# Block-2
model.add(Conv2D(64,(3,3),padding='same',kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(64,(3,3),padding='same',kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
# Block-3
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
# Block-4
model.add(Conv2D(256,(3,3),padding='same',kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(256,(3,3),padding='same',kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
# Block-5
model.add(Flatten())
model.add(Dense(64,kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# Block-6
model.add(Dense(64,kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# Block-7
model.add(Dense(num_classes,kernel_initializer='he_normal'))
model.add(Activation('softmax'))
print(model.summary())
from keras.optimizers import RMSprop,SGD,Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
checkpoint = ModelCheckpoint('Emotions_vgg.h5',
monitor='val_loss',
mode='min',
save_best_only=True,
verbose=1)
earlystop = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=3,
verbose=1,
restore_best_weights=True
)
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.2,
patience=3,
verbose=1,
min_delta=0.0001)
callbacks = [earlystop,checkpoint,reduce_lr]
model.compile(loss='categorical_crossentropy',
optimizer = Adam(lr=0.001),
metrics=['accuracy'])
nb_train_samples = 24176
nb_validation_samples = 3006
epochs=25
history=model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples//batch_size,
epochs=epochs,
callbacks=callbacks,
validation_data=validation_generator,
validation_steps=nb_validation_samples//batch_size)
| 22.666667
| 119
| 0.680733
|
294c66a9f83a6444f3a7cfdc87e65ff75f718c20
| 3,341
|
py
|
Python
|
main.py
|
yE-os/lenovo_auto_signin
|
10b26a29cc43c56a9cec6ad8ecb6a730769acf9c
|
[
"Apache-2.0"
] | 2
|
2021-04-04T05:31:20.000Z
|
2021-04-05T23:36:01.000Z
|
main.py
|
yE-os/lenovo_auto_signin
|
10b26a29cc43c56a9cec6ad8ecb6a730769acf9c
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
yE-os/lenovo_auto_signin
|
10b26a29cc43c56a9cec6ad8ecb6a730769acf9c
|
[
"Apache-2.0"
] | null | null | null |
import os
import requests
from bs4 import BeautifulSoup
import json
USERNAME = '17607096003'
PASSWORD = 'yyl19980316'
WID = 'wwbbcdca597778242d'
SECRET = 'sqYysdzza56QTzdJBTTRhRlOUjdrGThLT8mCMqom5lU'
ID = '1000002'
HEADER_GET = {
"user-agent": "Mozilla/5.0 (Linux; Android 11; Mi 10 Build/RKQ1.200826.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/86.0.4240.185 Mobile Safari/537.36/lenovoofficialapp/16112154380982287_10181446134/newversion/versioncode-124/"
}
HEADER_COUNT = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36",
}
def login():
url = "https://reg.lenovo.com.cn/auth/v3/dologin"
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36",
"Host": "reg.lenovo.com.cn",
"Referer": "https://www.lenovo.com.cn/",
'Cookie': 'LA_F_T_10000001=1614393605462; LA_C_Id=_ck21022710400514675618549440548; LA_M_W_10000001=_ck21022710400514675618549440548%7C10000001%7C%7C%7C; LA_C_C_Id=_sk202102271040090.05206000.3687; _ga=GA1.3.1245350653.1614393605; leid=1.VljlpE1LZ7I; LA_F_T_10000231=1614395016398; LA_R_T_10000231=1614395016398; LA_V_T_10000231=1614395016398; LA_M_W_10000231=_ck21022710400514675618549440548%7C10000231%7C%7C%7C; LA_R_C_10000001=1; LA_R_T_10000001=1614593722192; LA_V_T_10000001=1614593722192; _gid=GA1.3.1974081891.1614593723; _gat=1; ar=1'
}
data = {"account": USERNAME, "password": PASSWORD, "ticket": "e40e7004-4c8a-4963-8564-31271a8337d8"}
session = requests.Session()
r = session.post(url, headers=header, data=data)
if r.text.find("cerpreg-passport") == -1: # 若未找到相关cookie则返回空值
return None
return session
def signin(session):
signin = session.get("https://i.lenovo.com.cn/signIn/add.jhtml?sts=e40e7004-4c8a-4963-8564-31271a8337d8",headers=HEADER_GET)
check = str(signin.text)
if "true" in check:
if "乐豆" in check:
print("签到成功")
else:
print("请不要重复签到")
else:
print("签到失败,请重试")
def getContinuousDays(session):
url = "https://club.lenovo.com.cn/signlist/"
c = session.get(url,headers=HEADER_COUNT)
soup = BeautifulSoup(c.text,"html.parser")
day = soup.select("body > div.signInMiddleWrapper > div > div.signInTimeInfo > div.signInTimeInfoMiddle > p.signInTimeMiddleBtn")
day = day[0].get_text()
return day
def getkey():
url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=%s&corpsecret=%s'%(WID, SECRET)
getkey = requests.get(url)
return getkey.text
def push(token,message):
url = "https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=%s&debug=1"%token
json = {
"touser": "@all",
"msgtype": "textcard",
"agentid": ID,
"textcard": {
"title": "联想商城签到情况",
"description": "%s"%message,
"url": "https://www.locjj.com"
},
"safe": "1"
}
push = requests.post(url,json=json)
return push.text
if __name__ == '__main__':
token = json.loads(getkey())['access_token']
s = login()
if not s:
push(token,"登录失败,请检查账号密码")
else:
signin(s)
day = getContinuousDays(s)
print(push(token,day))
| 39.77381
| 550
| 0.676743
|
9c9210d6548dc4680abd7c7085a692c5373c4c46
| 4,617
|
py
|
Python
|
src/compas_igs/ui/Rhino/IGS/dev/IGS_unified_diagram_cmd.py
|
BlockResearchGroup/compas-IGS
|
b40698466b91c867600b94ae2530b19d336ad1b0
|
[
"MIT"
] | 1
|
2021-11-03T23:22:37.000Z
|
2021-11-03T23:22:37.000Z
|
src/compas_igs/ui/Rhino/IGS/dev/IGS_unified_diagram_cmd.py
|
BlockResearchGroup/compas-IGS
|
b40698466b91c867600b94ae2530b19d336ad1b0
|
[
"MIT"
] | 1
|
2021-11-10T03:27:58.000Z
|
2021-11-17T13:51:17.000Z
|
src/compas_igs/ui/Rhino/IGS/dev/IGS_unified_diagram_cmd.py
|
BlockResearchGroup/compas-IGS
|
b40698466b91c867600b94ae2530b19d336ad1b0
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas
from compas.geometry import centroid_points
from compas.geometry import subtract_vectors
from compas.geometry import scale_vector
from compas.geometry import distance_point_point_xy
import scriptcontext as sc
import compas_rhino
from compas_igs.rhino import mesh_ud
try:
import Rhino
import rhinoscriptsyntax as rs
except ImportError:
compas.raise_if_ironpython()
__commandname__ = "IGS_unified_diagram"
def RunCommand(is_interactive):
if 'IGS' not in sc.sticky:
compas_rhino.display_message('IGS has not been initialised yet.')
return
scene = sc.sticky['IGS']['scene']
objects = scene.find_by_name('Form')
if not objects:
compas_rhino.display_message("There is no FormDiagram in the scene.")
return
form = objects[0]
objects = scene.find_by_name('Force')
if not objects:
compas_rhino.display_message("There is no ForceDiagram in the scene.")
return
force = objects[0]
# translation
form_center = centroid_points(form.vertex_xyz.values())
force_center = centroid_points(force.vertex_xyz.values())
translation = subtract_vectors(force_center, form_center)
# get scale
go = Rhino.Input.Custom.GetOption()
go.SetCommandPrompt("Enter scale for unified diagram (press ESC to exit)")
go.AcceptNothing(True)
scale_opt = Rhino.Input.Custom.OptionDouble(0.50, 0.01, 0.99)
go.AddOptionDouble("Alpha", scale_opt)
# get scale and rotation
def _draw_ud(form, force, translation=translation, scale=0.5):
compas_rhino.clear_layer(force.layer)
# 2. compute unified diagram geometries
geometry = mesh_ud(form, force, translation=translation, scale=scale)
if not geometry:
return
faces, bars = geometry
# 3. draw polygons
for face, face_xyz in faces.items():
count = len(face_xyz)
filtered_xyz = []
for i in range(-1, count - 1):
if distance_point_point_xy(face_xyz[i], face_xyz[i + 1]) < 0.01:
continue
filtered_xyz.append(face_xyz[i])
if len(filtered_xyz) == 2:
line = {'start': filtered_xyz[0], 'end': filtered_xyz[1], 'layer': force.layer}
compas_rhino.draw_lines([line])
continue
compas_rhino.draw_mesh(filtered_xyz, [range(len(filtered_xyz))], layer=force.layer, name=str(face), redraw=False)
# 4. draw bars
bar_colors = {}
for edge in force.diagram.edges_where_dual({'is_external': False}):
if force.diagram.dual_edge_force(edge) > + force.settings['tol.forces']:
bar_colors[edge] = force.settings['color.tension']
elif force.diagram.dual_edge_force(edge) < - force.settings['tol.forces']:
bar_colors[edge] = force.settings['color.compression']
for bar, bar_xyz in bars.items():
count = len(bar_xyz)
filtered_xyz = []
for i in range(-1, count - 1):
if distance_point_point_xy(bar_xyz[i], bar_xyz[i + 1]) < 0.01:
continue
filtered_xyz.append(bar_xyz[i])
if len(filtered_xyz) == 2:
line = {'start': filtered_xyz[0], 'end': filtered_xyz[1], 'layer': force.layer}
compas_rhino.draw_lines([line])
continue
compas_rhino.draw_mesh(filtered_xyz, [range(len(filtered_xyz))], layer=force.layer, name=str(bar), color=bar_colors[bar], redraw=False)
# unified diagram
while True:
rs.EnableRedraw(True)
opt = go.Get()
scale = scale_opt.CurrentValue
if not opt:
print("The scale for unified diagram needs to be between 0.01 and 0.99!")
if opt == Rhino.Input.GetResult.Cancel: # esc
keep = rs.GetBoolean("Keep unified diagram? (press ESC to exit)", [("Copy", "No", "Yes")], (False))
scene.clear_layers()
if keep and keep[0]:
_draw_ud(form, force, translation=scale_vector(translation, 2.5), scale=scale)
scene.update()
scene.save()
return
_draw_ud(form, force, translation=translation, scale=scale)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
RunCommand(True)
| 33.948529
| 147
| 0.610136
|
d4c4ef0f4b28aaf8b854c1a983be675041943305
| 1,444
|
py
|
Python
|
setup.py
|
stweil/sbb_ner
|
319c29fc96667937f85d2cba111902386c95ba23
|
[
"Apache-2.0"
] | 9
|
2019-08-27T16:13:17.000Z
|
2021-06-18T06:58:25.000Z
|
setup.py
|
stweil/sbb_ner
|
319c29fc96667937f85d2cba111902386c95ba23
|
[
"Apache-2.0"
] | 2
|
2020-01-13T12:50:37.000Z
|
2022-01-28T10:51:06.000Z
|
setup.py
|
stweil/sbb_ner
|
319c29fc96667937f85d2cba111902386c95ba23
|
[
"Apache-2.0"
] | 1
|
2019-09-07T20:40:09.000Z
|
2019-09-07T20:40:09.000Z
|
from io import open
from setuptools import find_packages, setup
with open('requirements.txt') as fp:
install_requires = fp.read()
setup(
name="qurator-sbb-ner",
version="0.0.1",
author="The Qurator Team",
author_email="qurator@sbb.spk-berlin.de",
description="Qurator",
long_description=open("README.md", "r", encoding='utf-8').read(),
long_description_content_type="text/markdown",
keywords='qurator',
license='Apache',
url="https://qurator.ai",
packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]),
install_requires=install_requires,
entry_points={
'console_scripts': [
"compile_europeana_historic=qurator.sbb_ner.ground_truth.europeana_historic:main",
"compile_germ_eval=qurator.sbb_ner.ground_truth.germeval:main",
"compile_conll=qurator.sbb_ner.ground_truth.conll:main",
"compile_wikiner=qurator.sbb_ner.ground_truth.wikiner:main",
"join-gt=qurator.sbb_ner.ground_truth.join_gt:main",
"bert-ner=qurator.sbb_ner.models.bert:main"
]
},
python_requires='>=3.6.0',
tests_require=['pytest'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| 37.025641
| 90
| 0.653047
|
c92f6741082316ef813f51e11ee2273268685838
| 8,559
|
py
|
Python
|
ros/src/waypoint_updater/waypoint_updater.py
|
anonymint/udacity-self-driving-p13-capstone
|
16ae005bd985dd7ef9dbd1e35047f91f0c3bdaef
|
[
"MIT"
] | null | null | null |
ros/src/waypoint_updater/waypoint_updater.py
|
anonymint/udacity-self-driving-p13-capstone
|
16ae005bd985dd7ef9dbd1e35047f91f0c3bdaef
|
[
"MIT"
] | 3
|
2018-08-20T03:42:38.000Z
|
2018-08-21T14:11:41.000Z
|
ros/src/waypoint_updater/waypoint_updater.py
|
anonymint/udacity-self-driving-p13-capstone
|
16ae005bd985dd7ef9dbd1e35047f91f0c3bdaef
|
[
"MIT"
] | 3
|
2018-08-15T11:58:43.000Z
|
2018-08-24T04:32:58.000Z
|
#!/usr/bin/env python
"""
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
"""
import math
import numpy as np
import rospy
from std_msgs.msg import Int32
from scipy.spatial import KDTree
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
# Number of waypoints we will publish.
_NUM_WAYPOINTS_AHEAD = 200
# Spin frequency in hertz.
_SPIN_FREQUENCY = 50
# Waypoint cushion from targeted stopline before traffic light or obstacle.
_STOP_CUSHION = 3
# Maximum deceleration
_MAX_DECEL = 0.5
class WaypointUpdater(object):
"""
This node publishes waypoints from the car's current position to some distance ahead.
"""
def __init__(self):
rospy.init_node('waypoint_updater')
# Subscribers.
rospy.Subscriber('/current_pose', PoseStamped, self.pose_callback)
rospy.Subscriber('/base_waypoints', Lane, self.base_waypoints_callback)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_callback)
rospy.Subscriber('/obstacle_waypoint', Int32, self.obstacle_callback)
# Publishers.
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# Member variables.
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoints_tree = None
self.traffic_light_wp_idx = None
self.obstacle_wp_idx = None
def spin(self, freq):
"""
Spins this ROS node based on the given frequency.
:param freq: frequency in hertz.
"""
rate = rospy.Rate(freq)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints:
# Get the closest waypoint and publish it.
self.publish_waypoints(self.get_closest_waypoint_idx())
rate.sleep()
def get_closest_waypoint_idx(self):
"""
Gets the index of the closest waypoint.
:return: index of the closest waypoint.
"""
x = self.pose.pose.position.x
y = self.pose.pose.position.y
# The first 1 is for closest. The second 1 is for the index element.
closest_idx = self.waypoints_tree.query([x, y], 1)[1]
# Check if the closest waypoint is ahead or behind the ego car.
closest_2d = self.waypoints_2d[closest_idx]
prev_2d = self.waypoints_2d[closest_idx - 1]
closest_vect = np.array(closest_2d)
prev_vector = np.array(prev_2d)
curr_vector = np.array([x, y])
if np.dot(closest_vect - prev_vector, curr_vector - closest_vect) > 0:
# The closest waypoint is behind. Pick the next index.
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self, index):
"""
Publishes the waypoints to ROS.
:param index of the first waypoint.
"""
self.final_waypoints_pub.publish(self.get_final_lane(index))
def get_final_lane(self, closest_wp_idx):
"""
Updates final lane's waypoints based on traffic light or obstacle waypoint index.
:return: lane with waypoints updated with decelerating linear velocity.
"""
lane = Lane()
lane.header = self.base_waypoints.header
farthest_wp_idx = closest_wp_idx + _NUM_WAYPOINTS_AHEAD
sliced_base_waypoints = self.base_waypoints.waypoints[closest_wp_idx:farthest_wp_idx]
# Determine if vehicle is clear from traffic light and obstacle.
traffic_light_clear = (self.traffic_light_wp_idx is None or
self.traffic_light_wp_idx == -1 or
self.traffic_light_wp_idx >= farthest_wp_idx)
obstacle_clear = (self.obstacle_wp_idx is None or
self.obstacle_wp_idx == -1 or
self.obstacle_wp_idx >= farthest_wp_idx)
if traffic_light_clear and obstacle_clear:
# No traffic light or obstacle detected.
lane.waypoints = sliced_base_waypoints
else:
if not traffic_light_clear and obstacle_clear:
# Only traffic light is detected.
target_wp_idx = self.traffic_light_wp_idx
elif traffic_light_clear and not obstacle_clear:
# Only obstacle is detected.
target_wp_idx = self.obstacle_wp_idx
else:
# Both traffic light and obstacle are detected.
target_wp_idx = min(self.traffic_light_wp_idx, self.obstacle_wp_idx)
lane.waypoints = self.decelerate_waypoints(sliced_base_waypoints, target_wp_idx - closest_wp_idx)
return lane
@staticmethod
def decelerate_waypoints(sliced_base_waypoints, stop_idx):
"""
Loops through base waypoints to update the linear velocity base on deceleration with
respect to the targeting stop waypoint.
:return: list of waypoints with updated linear velocity.
"""
decel_wp = []
stop_idx = max(stop_idx - _STOP_CUSHION, 0)
# Loop through each base_waypoint to adjust its linear velocity x.
for i, wp in enumerate(sliced_base_waypoints):
p = Waypoint()
# Position of waypoint won't change.
p.pose = wp.pose
# To decelerate from speed v to 0 in a distance of s:
# s = 1/2 * a * v^2 => v = sqrt(2 * a * s)
dist = WaypointUpdater.waypoint_distance(sliced_base_waypoints, i, stop_idx)
vel = math.sqrt(2 * _MAX_DECEL * dist)
if vel < 1.:
vel = 0.
WaypointUpdater.set_waypoint_velocity(p, min(vel, WaypointUpdater.get_waypoint_velocity(wp)))
decel_wp.append(p)
return decel_wp
def pose_callback(self, pose):
"""
Pose subscriber callback function.
"""
self.pose = pose
def base_waypoints_callback(self, base_waypoints):
"""
Base waypoints subscriber callback function.
The publisher has latch set to True, which means this message will be received only once.
"""
# Get the waypoints in X, Y plane and set up the KDTree for efficient comparison.
self.waypoints_2d = [[w.pose.pose.position.x, w.pose.pose.position.y]
for w in base_waypoints.waypoints]
self.waypoints_tree = KDTree(self.waypoints_2d)
self.base_waypoints = base_waypoints
rospy.loginfo('base_waypoints initialized')
def traffic_callback(self, data):
"""
Traffic waypoints subscriber callback function.
"""
self.traffic_light_wp_idx = data.data
def obstacle_callback(self, data):
"""
Obstacle waypoints subscriber callback function.
"""
self.obstacle_wp_idx = data
@staticmethod
def get_waypoint_velocity(waypoint):
"""
Get the longitudinal velocity from a waypoint.
"""
return waypoint.twist.twist.linear.x
@staticmethod
def set_waypoint_velocity(waypoint, velocity):
"""
Sets the longitudinal velocity on a waypoint.
"""
waypoint.twist.twist.linear.x = velocity
@staticmethod
def waypoint_distance(waypoints, wp1, wp2):
"""
Gets piece-wise sum of the distances between adjacent waypoints.
:param waypoints: waypoint list
:param wp1: start index
:param wp2: end index
"""
dist = 0
dl = lambda a, b: math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)
for i in range(wp1, wp2 - 1):
dist += dl(waypoints[i].pose.pose.position, waypoints[i + 1].pose.pose.position)
return dist
if __name__ == '__main__':
try:
WaypointUpdater().spin(_SPIN_FREQUENCY)
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 36.576923
| 109
| 0.644234
|
63af11c7d7b62ff2ce77e12daf355833f85f46e9
| 7,649
|
py
|
Python
|
tests/modules/test_processing.py
|
geomatikzh/openadms-node
|
aedfdc0b20e5e2cd668090f97a6bbb0b9c59d658
|
[
"BSD-2-Clause"
] | null | null | null |
tests/modules/test_processing.py
|
geomatikzh/openadms-node
|
aedfdc0b20e5e2cd668090f97a6bbb0b9c59d658
|
[
"BSD-2-Clause"
] | null | null | null |
tests/modules/test_processing.py
|
geomatikzh/openadms-node
|
aedfdc0b20e5e2cd668090f97a6bbb0b9c59d658
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3.6
"""Tests the classes of the processing modules."""
__author__ = 'Philipp Engel'
__copyright__ = 'Copyright (c) 2017 Hochschule Neubrandenburg'
__license__ = 'BSD-2-Clause'
from typing import List
import pytest
from testfixtures import LogCapture
from core.observation import Observation
from modules.processing import (PreProcessor, ResponseValueInspector,
ReturnCodeInspector, UnitConverter)
@pytest.fixture(scope='module')
def pre_processor(manager) -> PreProcessor:
"""Returns a PreProcessor object.
Args:
manager (Manager): Instance of ``core.Manager``.
Returns:
An instance of class ``module.processing.PreProcessor``.
"""
return PreProcessor('preProcessor',
'modules.processing.PreProcessor',
manager)
@pytest.fixture(scope='module')
def rv_inspector(manager) -> ResponseValueInspector:
"""Returns a ResponseValueInspector object.
Args:
manager (Manager): Instance of ``core.Manager``.
Returns:
An instance of class ``module.processing.ResponseValueInspector``.
"""
return ResponseValueInspector('responseValueInspector',
'modules.processing.ResponseValueInspector',
manager)
@pytest.fixture(scope='module')
def rc_inspector(manager) -> ReturnCodeInspector:
"""Returns a ReturnCodeInspector object.
Args:
manager (Manager): Instance of ``core.Manager``.
Returns:
An instance of class ``module.processing.ReturnCodeInspector``.
"""
return ReturnCodeInspector('returnCodeInspector',
'modules.processing.ReturnCodeInspector',
manager)
@pytest.fixture(scope='module')
def unit_converter(manager) -> UnitConverter:
"""Returns a UnitConverter object.
Args:
manager (Manager): Instance of ``core.Manager``.
Returns:
An instance of class ``module.processing.UnitConverter``.
"""
return UnitConverter('unitConverter',
'modules.processing.UnitConverter',
manager)
class TestPreProcessor:
"""
Test for the ``module.processing.PreProcessor`` class.
"""
def test_process_observation(self,
pre_processor: PreProcessor,
observations: List[Observation]) -> None:
"""Tests the processing of observations."""
obs_in = observations[0]
obs_out = pre_processor.process_observation(obs_in)
assert obs_out.get_response_value('temperature') == 23.1
assert obs_out.get_response_value('pressure') == 1011.3
def test_is_float(self, pre_processor: PreProcessor) -> None:
assert pre_processor.is_float('10.5') is True
assert pre_processor.is_float('foo') is False
def test_is_int(self, pre_processor: PreProcessor) -> None:
assert pre_processor.is_float('10') is True
assert pre_processor.is_float('10.5') is True
assert pre_processor.is_float('foo') is False
def test_sanitize(self, pre_processor: PreProcessor) -> None:
assert pre_processor.sanitize('\n\r\t') == '\\n\\r\\t'
def test_to_float(self, pre_processor: PreProcessor) -> None:
assert pre_processor.to_float('10,5') == 10.5
assert pre_processor.to_float('0.9995') == 0.9995
assert pre_processor.to_float('foo') is None
def test_to_int(self, pre_processor: PreProcessor) -> None:
assert pre_processor.to_int('10') == 10
assert pre_processor.to_int('10.5') is None
assert pre_processor.to_int('foo') is None
class TestResponseValueInspector:
"""
Test for the ``module.processing.ResponseValueInspector`` class.
"""
def test_process_observation(self,
rv_inspector: ResponseValueInspector,
observations: List[Observation]) -> None:
"""Check whether valid log messages are created."""
obs = observations[1]
obs_name = obs.get('name')
obs_target = obs.get('target')
response_name = 'slopeDist'
min_val = 10.0
max_val = 100.0
valid_val = 25.0
lt_min_val = 0.0
gt_max_val = 200.0
with LogCapture() as log_capture:
# Test 1 (observation undefined).
obs.data['name'] = 'test'
rv_inspector.process_observation(obs)
# Test 2 (invalid response type).
obs.data['name'] = obs_name
obs.data['responseSets']['slopeDist']['value'] = 'test'
rv_inspector.process_observation(obs)
# Test 3 (success).
obs.data['responseSets']['slopeDist']['value'] = valid_val
rv_inspector.process_observation(obs)
# Test 4 (response value less than minimum).
obs.data['responseSets']['slopeDist']['value'] = lt_min_val
rv_inspector.process_observation(obs)
# Test 5 (response value greater than maximum).
obs.data['responseSets']['slopeDist']['value'] = gt_max_val
rv_inspector.process_observation(obs)
# Capture log messages.
log_capture.check(
(rv_inspector.name,
'WARNING',
f'Undefined observation "test" of target "{obs_target}"'),
(rv_inspector.name,
'WARNING',
f'Response value "{response_name}" in observation '
f'"{obs_name}" of target "{obs_target}" is not a number'),
(rv_inspector.name,
'DEBUG',
f'Response value "{response_name}" in observation '
f'"{obs_name}" of target "{obs_target}" is within limits'),
(rv_inspector.name,
'CRITICAL',
f'Response value "{response_name}" in observation '
f'"{obs_name}" of target "{obs_target}" is less than '
f'minimum ({lt_min_val} < {min_val})'),
(rv_inspector.name,
'CRITICAL',
f'Response value "{response_name}" in observation '
f'"{obs_name}" of target "{obs_target}" is greater than '
f'maximum ({gt_max_val} > {max_val})')
)
def test_is_number(self,
rv_inspector: ResponseValueInspector) -> None:
assert rv_inspector.is_number('10') is True
assert rv_inspector.is_number('10.5') is True
assert rv_inspector.is_number('foo') is False
class TestReturnCodeInspector:
"""
Test for the ``module.processing.ReturnCodeInspector`` class.
"""
def test_process_observation(self,
rc_inspector: ReturnCodeInspector,
observations: List[Observation]) -> None:
obs = rc_inspector.process_observation(observations[1])
assert obs.data['corrupted'] is True
obs.data['responseSets']['returnCode']['value'] = 0
obs = rc_inspector.process_observation(obs)
assert obs.data['corrupted'] is False
assert obs.data['nextReceiver'] == 1
class TestUnitConverter:
def test_process_observation(self,
unit_converter: UnitConverter,
observations: List[Observation]) -> None:
pass
def test_scale(self, unit_converter: UnitConverter) -> None:
assert unit_converter.scale(10, 10) == 100
| 34.768182
| 78
| 0.599425
|
f4c01d21c61a514047eaff9d29806222f40f5052
| 1,587
|
py
|
Python
|
lib/rotaryencoder.py
|
jacoblb64/pico_rgb_keypad_hid
|
3251ca6a98ef86d9f98c54f639c4d61810601a0b
|
[
"MIT"
] | 47
|
2021-02-15T23:02:36.000Z
|
2022-03-04T21:30:03.000Z
|
lib/rotaryencoder.py
|
jacoblb64/pico_rgb_keypad_hid
|
3251ca6a98ef86d9f98c54f639c4d61810601a0b
|
[
"MIT"
] | 7
|
2021-02-19T20:00:08.000Z
|
2022-01-14T10:51:12.000Z
|
lib/rotaryencoder.py
|
jacoblb64/pico_rgb_keypad_hid
|
3251ca6a98ef86d9f98c54f639c4d61810601a0b
|
[
"MIT"
] | 14
|
2021-02-20T17:40:56.000Z
|
2022-01-01T19:53:38.000Z
|
import time
import board
from digitalio import DigitalInOut, Direction, Pull
ROTARY_NO_MOTION = 0
ROTARY_CCW = 1
ROTARY_CW = 2
class RotaryEncoder:
def timeInMillis(self):
return int(time.monotonic() * 1000)
def __init__(self, aPin=board.GP12, bPin=board.GP10, bluePin=board.GP14):
self.encoderAPin = DigitalInOut(aPin)
self.encoderAPin.direction = Direction.INPUT
self.encoderAPin.pull = Pull.UP
self.encoderBPin = DigitalInOut(bPin)
self.encoderBPin.direction = Direction.INPUT
self.encoderBPin.pull = Pull.UP
self.loopTime = self.timeInMillis()
self.encoderA_prev = 0
# https://www.hobbytronics.co.uk/arduino-tutorial6-rotary-encoder
def read(self):
event = ROTARY_NO_MOTION
# get the current elapsed time
currentTime = self.timeInMillis()
if currentTime >= (self.loopTime + 5):
# 5ms since last check of encoder = 200Hz
encoderA = self.encoderAPin.value
encoderB = self.encoderBPin.value
if (not encoderA) and (self.encoderA_prev):
# encoder A has gone from high to low
# CW and CCW determined
if encoderB:
# B is low so counter-clockwise
event = ROTARY_CW
else:
# encoder B is high so clockwise
event = ROTARY_CCW
self.encoderA_prev = encoderA # Store value of A for next time
self.loopTime = currentTime
return event
| 33.765957
| 77
| 0.604915
|
9f58c1ef62d38d358f3768f3c4eaccaaf713ee28
| 636
|
py
|
Python
|
{{cookiecutter.profile_name}}/lsf_status.py
|
iromeo/generic-enhanced
|
c4b6d3972346197551228030352f554e521fa82b
|
[
"MIT"
] | null | null | null |
{{cookiecutter.profile_name}}/lsf_status.py
|
iromeo/generic-enhanced
|
c4b6d3972346197551228030352f554e521fa82b
|
[
"MIT"
] | null | null | null |
{{cookiecutter.profile_name}}/lsf_status.py
|
iromeo/generic-enhanced
|
c4b6d3972346197551228030352f554e521fa82b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import subprocess
jobid = sys.argv[1]
# print("Checking status for Job ID <" + jobid + ">...", file=sys.stderr)
out = subprocess.run(
# fix output format using -o because user's columns order could be custom
['bjobs', '-noheader', '-o', 'stat:', jobid],
stdout=subprocess.PIPE
).stdout.decode('utf-8')
state = out.strip()
map_state = {
"PEND": 'running',
"RUN": 'running',
"PROV": "running",
"WAIT": 'running',
"DONE": 'success',
"": 'success'
}
# print("Job ID <" + jobid + "> state is <" + state + ">", file=sys.stderr)
print(map_state.get(state, 'failed'))
| 22.714286
| 77
| 0.602201
|
72b9db5ea31fa1e37a4b9f8139106e1f4c7110cf
| 61,370
|
py
|
Python
|
test/test_site_api.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
test/test_site_api.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
test/test_site_api.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is like` ` not like` | | `container-status` | `is` ` is not` | | `containers` | `are` | | `criticality-tag` | `is` ` is not` ` is greater than` ` is less than` ` is applied` ` is not applied` | | `custom-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `cve` | `is` ` is not` ` contains` ` does not contain` | | `cvss-access-complexity` | `is` ` is not` | | `cvss-authentication-required` | `is` ` is not` | | `cvss-access-vector` | `is` ` is not` | | `cvss-availability-impact` | `is` ` is not` | | `cvss-confidentiality-impact` | `is` ` is not` | | `cvss-integrity-impact` | `is` ` is not` | | `cvss-v3-confidentiality-impact` | `is` ` is not` | | `cvss-v3-integrity-impact` | `is` ` is not` | | `cvss-v3-availability-impact` | `is` ` is not` | | `cvss-v3-attack-vector` | `is` ` is not` | | `cvss-v3-attack-complexity` | `is` ` is not` | | `cvss-v3-user-interaction` | `is` ` is not` | | `cvss-v3-privileges-required` | `is` ` is not` | | `host-name` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is empty` ` is not empty` ` is like` ` not like` | | `host-type` | `in` ` not in` | | `ip-address` | `is` ` is not` ` in range` ` not in range` ` is like` ` not like` | | `ip-address-type` | `in` ` not in` | | `last-scan-date` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `location-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is earlier than` | | `open-ports` | `is` ` is not` ` in range` | | `operating-system` | `contains` ` does not contain` ` is empty` ` is not empty` | | `owner-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is not` ` in range` ` greater than` ` less than` | | `service-name` | `contains` ` does not contain` | | `site-id` | `in` ` not in` | | `software` | `contains` ` does not contain` | | `vAsset-cluster` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-datacenter` | `is` ` is not` | | `vAsset-host-name` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-power-state` | `in` ` not in` | | `vAsset-resource-pool-path` | `contains` ` does not contain` | | `vulnerability-assessed` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `vulnerability-category` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` | | `vulnerability-cvss-v3-score` | `is` ` is not` | | `vulnerability-cvss-score` | `is` ` is not` ` in range` ` is greater than` ` is less than` | | `vulnerability-exposures` | `includes` ` does not include` | | `vulnerability-title` | `contains` ` does not contain` ` is` ` is not` ` starts with` ` ends with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `string` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.site_api import SiteApi # noqa: E501
from swagger_client.rest import ApiException
class TestSiteApi(unittest.TestCase):
"""SiteApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.site_api.SiteApi() # noqa: E501
def tearDown(self):
pass
def test_add_site_tag(self):
"""Test case for add_site_tag
Site Tag # noqa: E501
"""
pass
def test_add_site_user(self):
"""Test case for add_site_user
Site Users Access # noqa: E501
"""
pass
def test_create_site(self):
"""Test case for create_site
Sites # noqa: E501
"""
pass
def test_create_site_credential(self):
"""Test case for create_site_credential
Site Scan Credentials # noqa: E501
"""
pass
def test_create_site_scan_schedule(self):
"""Test case for create_site_scan_schedule
Site Scan Schedules # noqa: E501
"""
pass
def test_create_site_smtp_alert(self):
"""Test case for create_site_smtp_alert
Site SMTP Alerts # noqa: E501
"""
pass
def test_create_site_snmp_alert(self):
"""Test case for create_site_snmp_alert
Site SNMP Alerts # noqa: E501
"""
pass
def test_create_site_syslog_alert(self):
"""Test case for create_site_syslog_alert
Site Syslog Alerts # noqa: E501
"""
pass
def test_delete_all_site_alerts(self):
"""Test case for delete_all_site_alerts
Site Alerts # noqa: E501
"""
pass
def test_delete_all_site_credentials(self):
"""Test case for delete_all_site_credentials
Site Scan Credentials # noqa: E501
"""
pass
def test_delete_all_site_scan_schedules(self):
"""Test case for delete_all_site_scan_schedules
Site Scan Schedules # noqa: E501
"""
pass
def test_delete_all_site_smtp_alerts(self):
"""Test case for delete_all_site_smtp_alerts
Site SMTP Alerts # noqa: E501
"""
pass
def test_delete_all_site_snmp_alerts(self):
"""Test case for delete_all_site_snmp_alerts
Site SNMP Alerts # noqa: E501
"""
pass
def test_delete_all_site_syslog_alerts(self):
"""Test case for delete_all_site_syslog_alerts
Site Syslog Alerts # noqa: E501
"""
pass
def test_delete_site(self):
"""Test case for delete_site
Site # noqa: E501
"""
pass
def test_delete_site_credential(self):
"""Test case for delete_site_credential
Site Scan Credential # noqa: E501
"""
pass
def test_delete_site_scan_schedule(self):
"""Test case for delete_site_scan_schedule
Site Scan Schedule # noqa: E501
"""
pass
def test_delete_site_smtp_alert(self):
"""Test case for delete_site_smtp_alert
Site SMTP Alert # noqa: E501
"""
pass
def test_delete_site_snmp_alert(self):
"""Test case for delete_site_snmp_alert
Site SNMP Alert # noqa: E501
"""
pass
def test_delete_site_syslog_alert(self):
"""Test case for delete_site_syslog_alert
Site Syslog Alert # noqa: E501
"""
pass
def test_enable_shared_credential_on_site(self):
"""Test case for enable_shared_credential_on_site
Assigned Shared Credential Enablement # noqa: E501
"""
pass
def test_enable_site_credential(self):
"""Test case for enable_site_credential
Site Credential Enablement # noqa: E501
"""
pass
def test_get_excluded_asset_groups(self):
"""Test case for get_excluded_asset_groups
Site Excluded Asset Groups # noqa: E501
"""
pass
def test_get_excluded_targets(self):
"""Test case for get_excluded_targets
Site Excluded Targets # noqa: E501
"""
pass
def test_get_included_asset_groups(self):
"""Test case for get_included_asset_groups
Site Included Asset Groups # noqa: E501
"""
pass
def test_get_included_targets(self):
"""Test case for get_included_targets
Site Included Targets # noqa: E501
"""
pass
def test_get_site(self):
"""Test case for get_site
Site # noqa: E501
"""
pass
def test_get_site_alerts(self):
"""Test case for get_site_alerts
Site Alerts # noqa: E501
"""
pass
def test_get_site_assets(self):
"""Test case for get_site_assets
Site Assets # noqa: E501
"""
pass
def test_get_site_credential(self):
"""Test case for get_site_credential
Site Scan Credential # noqa: E501
"""
pass
def test_get_site_credentials(self):
"""Test case for get_site_credentials
Site Scan Credentials # noqa: E501
"""
pass
def test_get_site_discovery_connection(self):
"""Test case for get_site_discovery_connection
Site Discovery Connection # noqa: E501
"""
pass
def test_get_site_discovery_search_criteria(self):
"""Test case for get_site_discovery_search_criteria
Site Discovery Search Criteria # noqa: E501
"""
pass
def test_get_site_organization(self):
"""Test case for get_site_organization
Site Organization Information # noqa: E501
"""
pass
def test_get_site_scan_engine(self):
"""Test case for get_site_scan_engine
Site Scan Engine # noqa: E501
"""
pass
def test_get_site_scan_schedule(self):
"""Test case for get_site_scan_schedule
Site Scan Schedule # noqa: E501
"""
pass
def test_get_site_scan_schedules(self):
"""Test case for get_site_scan_schedules
Site Scan Schedules # noqa: E501
"""
pass
def test_get_site_scan_template(self):
"""Test case for get_site_scan_template
Site Scan Template # noqa: E501
"""
pass
def test_get_site_shared_credentials(self):
"""Test case for get_site_shared_credentials
Assigned Shared Credentials # noqa: E501
"""
pass
def test_get_site_smtp_alert(self):
"""Test case for get_site_smtp_alert
Site SMTP Alert # noqa: E501
"""
pass
def test_get_site_smtp_alerts(self):
"""Test case for get_site_smtp_alerts
Site SMTP Alerts # noqa: E501
"""
pass
def test_get_site_snmp_alert(self):
"""Test case for get_site_snmp_alert
Site SNMP Alert # noqa: E501
"""
pass
def test_get_site_snmp_alerts(self):
"""Test case for get_site_snmp_alerts
Site SNMP Alerts # noqa: E501
"""
pass
def test_get_site_syslog_alert(self):
"""Test case for get_site_syslog_alert
Site Syslog Alert # noqa: E501
"""
pass
def test_get_site_syslog_alerts(self):
"""Test case for get_site_syslog_alerts
Site Syslog Alerts # noqa: E501
"""
pass
def test_get_site_tags(self):
"""Test case for get_site_tags
Site Tags # noqa: E501
"""
pass
def test_get_site_users(self):
"""Test case for get_site_users
Site Users Access # noqa: E501
"""
pass
def test_get_sites(self):
"""Test case for get_sites
Sites # noqa: E501
"""
pass
def test_get_web_auth_html_forms(self):
"""Test case for get_web_auth_html_forms
Web Authentication HTML Forms # noqa: E501
"""
pass
def test_get_web_auth_http_headers(self):
"""Test case for get_web_auth_http_headers
Web Authentication HTTP Headers # noqa: E501
"""
pass
def test_remove_all_excluded_asset_groups(self):
"""Test case for remove_all_excluded_asset_groups
Site Excluded Asset Groups # noqa: E501
"""
pass
def test_remove_all_included_asset_groups(self):
"""Test case for remove_all_included_asset_groups
Site Included Asset Groups # noqa: E501
"""
pass
def test_remove_asset_from_site(self):
"""Test case for remove_asset_from_site
Site Asset # noqa: E501
"""
pass
def test_remove_excluded_asset_group(self):
"""Test case for remove_excluded_asset_group
Site Excluded Asset Group # noqa: E501
"""
pass
def test_remove_included_asset_group(self):
"""Test case for remove_included_asset_group
Site Included Asset Group # noqa: E501
"""
pass
def test_remove_site_assets(self):
"""Test case for remove_site_assets
Site Assets # noqa: E501
"""
pass
def test_remove_site_tag(self):
"""Test case for remove_site_tag
Site Tag # noqa: E501
"""
pass
def test_remove_site_user(self):
"""Test case for remove_site_user
Site User Access # noqa: E501
"""
pass
def test_set_site_credentials(self):
"""Test case for set_site_credentials
Site Scan Credentials # noqa: E501
"""
pass
def test_set_site_discovery_connection(self):
"""Test case for set_site_discovery_connection
Site Discovery Connection # noqa: E501
"""
pass
def test_set_site_discovery_search_criteria(self):
"""Test case for set_site_discovery_search_criteria
Site Discovery Search Criteria # noqa: E501
"""
pass
def test_set_site_scan_engine(self):
"""Test case for set_site_scan_engine
Site Scan Engine # noqa: E501
"""
pass
def test_set_site_scan_schedules(self):
"""Test case for set_site_scan_schedules
Site Scan Schedules # noqa: E501
"""
pass
def test_set_site_scan_template(self):
"""Test case for set_site_scan_template
Site Scan Template # noqa: E501
"""
pass
def test_set_site_smtp_alerts(self):
"""Test case for set_site_smtp_alerts
Site SMTP Alerts # noqa: E501
"""
pass
def test_set_site_snmp_alerts(self):
"""Test case for set_site_snmp_alerts
Site SNMP Alerts # noqa: E501
"""
pass
def test_set_site_syslog_alerts(self):
"""Test case for set_site_syslog_alerts
Site Syslog Alerts # noqa: E501
"""
pass
def test_set_site_tags(self):
"""Test case for set_site_tags
Site Tags # noqa: E501
"""
pass
def test_set_site_users(self):
"""Test case for set_site_users
Site Users Access # noqa: E501
"""
pass
def test_update_excluded_asset_groups(self):
"""Test case for update_excluded_asset_groups
Site Excluded Asset Groups # noqa: E501
"""
pass
def test_update_excluded_targets(self):
"""Test case for update_excluded_targets
Site Excluded Targets # noqa: E501
"""
pass
def test_update_included_asset_groups(self):
"""Test case for update_included_asset_groups
Site Included Asset Groups # noqa: E501
"""
pass
def test_update_included_targets(self):
"""Test case for update_included_targets
Site Included Targets # noqa: E501
"""
pass
def test_update_site(self):
"""Test case for update_site
Site # noqa: E501
"""
pass
def test_update_site_credential(self):
"""Test case for update_site_credential
Site Scan Credential # noqa: E501
"""
pass
def test_update_site_organization(self):
"""Test case for update_site_organization
Site Organization Information # noqa: E501
"""
pass
def test_update_site_scan_schedule(self):
"""Test case for update_site_scan_schedule
Site Scan Schedule # noqa: E501
"""
pass
def test_update_site_smtp_alert(self):
"""Test case for update_site_smtp_alert
Site SMTP Alert # noqa: E501
"""
pass
def test_update_site_snmp_alert(self):
"""Test case for update_site_snmp_alert
Site SNMP Alert # noqa: E501
"""
pass
def test_update_site_syslog_alert(self):
"""Test case for update_site_syslog_alert
Site Syslog Alert # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 103.142857
| 48,043
| 0.511748
|
184a031b397e07a4967ebc22e6ecfb412ccd1bbd
| 47,359
|
py
|
Python
|
pirates/leveleditor/worldData/anvil_island_area_barbossa_cave.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/leveleditor/worldData/anvil_island_area_barbossa_cave.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/leveleditor/worldData/anvil_island_area_barbossa_cave.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.anvil_island_area_barbossa_cave
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1172209006.11sdnaik': {'Type': 'Island Game Area', 'Name': 'anvil_island_area_barbossa_cave', 'File': '', 'Environment': 'Cave', 'Footstep Sound': 'Sand', 'Instanced': True, 'Minimap': False, 'Objects': {'1172209074.56sdnaik': {'Type': 'Locator Node', 'Name': 'portal_interior_1', 'Hpr': VBase3(95.675, 0.0, 0.0), 'Pos': Point3(85.919, -190.083, 24.757), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1172618710.78sdnaik': {'Type': 'Townsperson', 'Category': 'Cast', 'AnimSet': 'cb_apple', 'AuraFX': 'None', 'Boss': False, 'CustomModel': 'models/char/cb_2000', 'GhostColor': 'None', 'GhostFX': 0, 'Greeting Animation': '', 'HelpID': 'NONE', 'Hpr': VBase3(-111.252, 0.0, 0.0), 'Instanced World': 'None', 'Level': '37', 'Notice Animation 1': '', 'Notice Animation 2': '', 'Patrol Radius': '12.0000', 'Pos': Point3(-21.98, 19.615, 6.041), 'PoseAnim': '', 'PoseFrame': '', 'Private Status': 'All', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'ShopID': 'PORT_ROYAL_DEFAULTS', 'Start State': 'Idle', 'StartFrame': '0', 'Team': 'Villager', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Zombie': False, 'spawnTimeAlt': '', 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1173468367.09kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(65.89, 0.0, 10.222), 'Pos': Point3(-22.972, 16.554, 6.811), 'Scale': VBase3(0.863, 0.863, 0.863), 'Visual': {'Model': 'models/props/treasureChest_open'}}, '1173468423.53kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(81.692, 0.0, 0.0), 'Objects': {'1173471720.95kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(175.579, 56.568, 0.0), 'Pos': Point3(-1.241, 2.011, 0.413), 'Scale': VBase3(1.362, 1.362, 1.362), 'Visual': {'Model': 'models/props/treasure_sconce'}}}, 'Pos': Point3(-21.29, 7.239, 3.606), 'Scale': VBase3(0.734, 0.734, 0.734), 'Visual': {'Model': 'models/props/treasureTrough'}}, '1173468471.78kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(-27.286, 0.0, 0.0), 'Objects': {'1173471825.44kmuller': {'Type': 'Furniture - Fancy', 'DisableCollision': False, 'Hpr': VBase3(103.874, -24.165, 0.0), 'Pos': Point3(-0.17, 3.557, 0.148), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chair_fancy'}}, '1173471860.03kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'Hpr': VBase3(27.286, 0.0, 0.0), 'Pos': Point3(-4.353, 2.819, 4.024), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1.0, 0.71, 0.82, 1.0), 'Model': 'models/props/bottle_red'}}}, 'Pos': Point3(-33.657, -18.259, 3.981), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/treasureTrough'}}, '1173468497.0kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(-52.269, 1.207, 1.067), 'Objects': {'1173471924.11kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(-176.001, 0.143, -1.083), 'Pos': Point3(-1.148, -7.0, 0.554), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/treasureChest_open'}}, '1173471969.92kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(51.827, -19.244, -1.327), 'Pos': Point3(-3.079, 0.042, 0.331), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/treasure_chandelier'}}, '1173472001.3kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'Hpr': VBase3(54.562, 6.976, -16.417), 'Pos': Point3(-1.915, 3.088, 0.404), 'Scale': VBase3(1.072, 1.072, 1.072), 'Visual': {'Model': 'models/props/bottle_green'}}, '1173473947.56kmuller': {'Type': 'Trunks', 'DisableCollision': False, 'Hpr': VBase3(-177.109, -1.995, -1.679), 'Pos': Point3(1.736, -1.62, 0.559), 'Scale': VBase3(0.761, 0.761, 0.761), 'Visual': {'Color': (0.7200000286102295, 0.699999988079071, 0.5899999737739563, 1.0), 'Model': 'models/props/Trunk_rounded_2'}}}, 'Pos': Point3(-24.383, -15.746, 3.792), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/treasureTrough'}}, '1173471575.44kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(56.363, 0.0, 0.0), 'Objects': {'1173471627.81kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(-56.363, 4.799, -7.37), 'Pos': Point3(-7.898, -2.057, -0.921), 'Scale': VBase3(0.778, 0.778, 0.778), 'Visual': {'Color': (0.7099999785423279, 0.6700000166893005, 0.6000000238418579, 1.0), 'Model': 'models/props/barrel_worn'}}, '1173471671.51kmuller': {'Type': 'Wall_Hangings', 'DisableCollision': False, 'Hpr': VBase3(-161.949, 16.647, 13.013), 'Pos': Point3(4.633, 5.65, 3.513), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/seascape_battle'}}, '1173472099.22kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'Hpr': VBase3(-56.744, -12.669, -6.175), 'Pos': Point3(-1.816, -0.758, 0.362), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6000000238418579, 0.7200000286102295, 0.6000000238418579, 1.0), 'Model': 'models/props/bottle_tan'}}, '1250877604.84akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(78.429, 1.866, -11.711), 'Pos': Point3(-1.296, 3.362, 0.865), 'Scale': VBase3(0.84, 0.84, 0.84), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}}, 'Pos': Point3(-33.146, -6.186, 3.947), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/treasureTrough'}}, '1173471597.2kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Objects': {}, 'Pos': Point3(-34.728, 2.632, 3.882), 'Scale': VBase3(1.221, 1.221, 1.221), 'Visual': {'Model': 'models/props/treasureTrough_single'}}, '1173471783.73kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-35.795, -19.76, 4.397), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/crates_group_2'}}, '1173472048.89kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(65.655, 0.0, 0.0), 'Pos': Point3(-29.55, 1.392, 4.183), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/treasureChest_closed'}}, '1173472175.67kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.494, 1.263), 'Objects': {'1173472214.83kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, 0.0), 'Pos': Point3(3.135, -4.125, 0.325), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/treasureTrough_single'}}, '1173472843.22kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': VBase3(0.0, 1.884, 0.947), 'Pos': Point3(2.816, 3.404, 0.009), 'Scale': VBase3(0.784, 0.784, 0.784), 'Visual': {'Color': (0.7200000286102295, 0.699999988079071, 0.5899999737739563, 1.0), 'Model': 'models/props/barrel_grey'}}, '1173473890.48kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(-74.672, -3.73, -2.843), 'Pos': Point3(1.553, -1.297, 0.889), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/treasureChest_open'}}, '1173474959.44kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'Hpr': VBase3(0.0, 12.717, 0.0), 'Pos': Point3(-1.903, -0.442, 0.318), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/bottle_red'}}}, 'Pos': Point3(26.083, -7.65, 2.748), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/treasureTrough'}}, '1173472197.86kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(-43.152, 0.0, 0.0), 'Objects': {'1173474384.75kmuller': {'Type': 'Trunks', 'DisableCollision': False, 'Hpr': VBase3(-24.158, 0.0, 0.0), 'Pos': Point3(-2.887, -1.469, 0.652), 'Scale': VBase3(0.789, 0.789, 0.789), 'Visual': {'Color': (0.49000000953674316, 0.47999998927116394, 0.4000000059604645, 1.0), 'Model': 'models/props/Trunk_rounded'}}, '1250877328.98akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(34.287, -13.693, -15.183), 'Pos': Point3(2.829, 1.448, -0.03), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}}, 'Pos': Point3(25.568, 3.214, 2.794), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/treasureTrough'}}, '1173472392.78kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-17.706, 0.035, -0.11), 'Objects': {'1250877589.53akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(134.792, 1.866, -11.711), 'Pos': Point3(-0.072, -0.955, 1.21), 'Scale': VBase3(0.84, 0.84, 0.84), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}}, 'Pos': Point3(-9.835, -31.083, 3.176), 'Scale': VBase3(1.19, 1.19, 1.19), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureTrough_single'}}, '1173472402.28kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(-17.706, 0.035, 0.005), 'Pos': Point3(-10.198, -24.927, 3.4), 'Scale': VBase3(0.931, 0.931, 0.931), 'Visual': {'Model': 'models/props/treasureTrough_single'}}, '1173472651.34kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(-0.111, -14.424, -1.428), 'Pos': Point3(-31.521, 9.349, 8.04), 'Scale': VBase3(0.775, 0.775, 0.775), 'Visual': {'Model': 'models/props/treasureTrough_single'}}, '1173472721.78kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-33.341, 6.35, 9.026), 'Scale': VBase3(0.675, 0.675, 0.675), 'Visual': {'Color': (0.7200000286102295, 0.699999988079071, 0.5899999737739563, 1.0), 'Model': 'models/props/barrel_grey'}}, '1173473917.97kmuller': {'Type': 'Wall_Hangings', 'DisableCollision': False, 'Hpr': VBase3(72.697, 33.022, 29.472), 'Pos': Point3(26.998, 6.135, 4.941), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/portrait_gov'}}, '1173474152.67kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-33.123, -4.407, 4.426), 'Scale': VBase3(0.825, 0.825, 0.825), 'Visual': {'Color': (0.7099999785423279, 0.6700000166893005, 0.6000000238418579, 1.0), 'Model': 'models/props/crate_04'}}, '1173474457.5kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': VBase3(30.719, 0.0, 0.0), 'Pos': Point3(25.244, -4.318, 3.273), 'Scale': VBase3(0.799, 0.799, 0.799), 'Visual': {'Color': (0.49000000953674316, 0.47999998927116394, 0.4000000059604645, 1.0), 'Model': 'models/props/crate'}}, '1173474718.7kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(170.127, 0.0, 0.0), 'Pos': Point3(-19.421, -19.11, 3.925), 'Scale': VBase3(0.738, 0.738, 0.738), 'Visual': {'Model': 'models/props/treasureChest_closed'}}, '1173475002.56kmuller': {'Type': 'Crate', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(24.603, 0.0, 0.0), 'Pos': Point3(-105.99, -27.167, 41.989), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.899999976158, 0.899999976158, 0.699999988079, 1.0), 'Model': 'models/props/crates_group_1'}}, '1173475022.66kmuller': {'Type': 'Barrel', 'DisableCollision': True, 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-101.726, -21.398, 40.132), 'Scale': VBase3(0.728, 0.728, 0.728), 'VisSize': '', 'Visual': {'Color': (0.91, 0.86, 0.65, 1.0), 'Model': 'models/props/barrel_worn'}}, '1173475076.05kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(0.175, 13.381, 11.028), 'Objects': {'1173475259.56kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'Hpr': VBase3(-5.229, -24.249, -12.155), 'Pos': Point3(2.971, 0.399, -0.891), 'Scale': VBase3(1.792, 1.792, 1.792), 'Visual': {'Model': 'models/props/largejug_A'}}, '1173475343.87kmuller': {'Type': 'Wall_Hangings', 'DisableCollision': False, 'Hpr': VBase3(-54.715, 21.019, -3.286), 'Pos': Point3(-0.355, -3.566, 1.789), 'Scale': VBase3(0.636, 0.636, 0.636), 'Visual': {'Model': 'models/props/seascape_port'}}}, 'Pos': Point3(-102.684, -32.061, 40.039), 'Scale': VBase3(1.572, 1.572, 1.572), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureTrough_single'}}, '1173475106.3kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-88.578, -13.952, 0.629), 'Pos': Point3(-97.499, -26.255, 38.394), 'Scale': VBase3(1.53, 1.53, 1.53), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureTrough_single'}}, '1173475150.47kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(102.4, 18.802, 4.057), 'Pos': Point3(-96.706, -21.344, 39.702), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/treasureChest_open'}}, '1173475212.69kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(2.574, -0.798, 14.081), 'Pos': Point3(-97.35, -18.315, 38.277), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureTrough_single'}}, '1173475484.11kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(0.191, -7.866, 3.947), 'Objects': {'1173475560.97kmuller': {'Type': 'Barrel', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-17.717, 8.694, -1.336), 'Pos': Point3(-4.788, -3.67, -0.11), 'Scale': VBase3(0.662, 0.662, 0.662), 'VisSize': '', 'Visual': {'Color': (0.589999973774, 0.589999973774, 0.490000009537, 1.0), 'Model': 'models/props/barrel_group_3'}}, '1173475599.84kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(161.762, -8.706, 1.257), 'Pos': Point3(-1.743, -0.101, 0.606), 'Scale': VBase3(0.724, 0.724, 0.724), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureChest_open'}}, '1173475645.34kmuller': {'Type': 'Furniture - Fancy', 'DisableCollision': False, 'Hpr': VBase3(-27.381, -30.075, -28.026), 'Pos': Point3(3.899, 1.605, 0.527), 'Scale': VBase3(0.724, 0.724, 0.724), 'Visual': {'Model': 'models/props/stool_fancy'}}, '1173475705.97kmuller': {'Type': 'Trunks', 'DisableCollision': False, 'Hpr': VBase3(-170.091, -1.954, 5.226), 'Pos': Point3(-2.933, 2.714, 0.278), 'Scale': VBase3(0.724, 0.724, 0.724), 'Visual': {'Model': 'models/props/Trunk_rounded_2'}}, '1173494051.56kmuller': {'Type': 'Trunks', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(132.966, -4.395, -7.056), 'Pos': Point3(5.434, -3.625, 0.838), 'Scale': VBase3(0.747, 0.747, 0.747), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/Trunk_rounded_2'}}}, 'Pos': Point3(-71.338, -79.459, 29.129), 'Scale': VBase3(1.38, 1.38, 1.38), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureTrough'}}, '1173475529.08kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(0.923, 0.0, 0.0), 'Pos': Point3(-77.951, -72.216, 28.19), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureTrough_single'}}, '1173476346.23kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-61.004, -84.022, 28.875), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureTrough_single'}}, '1173476370.78kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'Hpr': VBase3(0.0, 10.732, 0.0), 'Pos': Point3(-64.089, -80.774, 31.04), 'Scale': VBase3(1.115, 1.115, 1.115), 'Visual': {'Model': 'models/props/bottle_green'}}, '1173476412.98kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'Hpr': VBase3(-8.405, 28.067, 6.498), 'Pos': Point3(-61.858, -81.625, 29.208), 'Scale': VBase3(2.558, 2.558, 2.558), 'Visual': {'Model': 'models/props/waterpitcher'}}, '1173476478.11kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(0.0, -8.664, 0.0), 'Objects': {'1173476535.72kmuller': {'Type': 'Crate', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-70.524, -1.407, 8.187), 'Pos': Point3(2.747, -5.673, 0.935), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.589999973774, 0.589999973774, 0.490000009537, 1.0), 'Model': 'models/props/crates_group_1'}}, '1173476711.03kmuller': {'Type': 'Barrel', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-0.533, 14.375, 5.277), 'Pos': Point3(-0.415, -3.12, 0.775), 'Scale': VBase3(0.943, 0.943, 0.943), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/barrel_grey'}}, '1173476839.64kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(0.0, 8.664, -10.81), 'Pos': Point3(-1.813, -0.603, 0.396), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/bottle_green'}}}, 'Pos': Point3(79.984, -90.141, 5.219), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureTrough'}}, '1173476568.78kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-26.274, -11.342, -5.545), 'Objects': {'1173476683.0kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-103.181, -7.123, 3.621), 'Pos': Point3(-1.421, -0.953, 0.526), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureChest_open'}}}, 'Pos': Point3(78.207, -95.333, 6.207), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureTrough'}}, '1173476595.0kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-0.003, -10.571, 1.208), 'Objects': {'1173476640.59kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(0.229, 10.568, -1.229), 'Pos': Point3(-2.054, -0.088, 2.839), 'Scale': VBase3(1.172, 1.172, 1.172), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/bottle_red'}}}, 'Pos': Point3(87.998, -95.619, 6.261), 'Scale': VBase3(1.28, 1.28, 1.28), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureTrough_single'}}, '1173476807.97kmuller': {'Type': 'Furniture - Fancy', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-105.689, -0.169, -86.025), 'Pos': Point3(80.358, -92.174, 6.756), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/chair_fancy'}}, '1173476913.16kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-0.527, -13.011, -4.646), 'Objects': {'1173476992.98kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-159.013, 0.788, 8.363), 'Pos': Point3(5.298, -0.068, 0.456), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureChest_closed'}}, '1173494163.92kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'Hpr': VBase3(-0.267, 13.019, 6.306), 'Pos': Point3(-1.023, 2.279, 0.473), 'Scale': VBase3(1.375, 1.375, 1.375), 'Visual': {'Color': (1.0, 0.89, 0.77, 1.0), 'Model': 'models/props/bottle_red'}}}, 'Pos': Point3(-56.248, -79.205, 10.348), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureTrough_single'}}, '1173476951.55kmuller': {'Type': 'Treasure Chest', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-0.296, -14.959, -4.882), 'Pos': Point3(-60.628, -75.756, 9.378), 'Scale': VBase3(0.807, 0.807, 0.807), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/treasureTrough_single'}}, '1173477100.75kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, 0.901), 'Objects': {'1173477184.05kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'Hpr': VBase3(5.719, 18.512, -32.928), 'Pos': Point3(-0.84, -0.577, 1.416), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/props/bottle_tan'}}, '1250877310.59akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-172.606, -0.064, 15.101), 'Pos': Point3(1.441, 0.427, -0.478), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}}, 'Pos': Point3(-2.817, 26.98, 3.312), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/treasureTrough_single'}}, '1173477117.31kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(0.774, 22.338, 3.276), 'Scale': VBase3(0.649, 0.649, 0.649), 'Visual': {'Model': 'models/props/treasureTrough_single'}}, '1173477133.91kmuller': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(-7.998, 0.0, 0.0), 'Pos': Point3(-1.361, 23.038, 3.417), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/treasureChest_closed'}}, '1173494260.01kmuller': {'Type': 'Cups', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-23.149, 34.955, -16.484), 'Pos': Point3(-59.985, -74.367, 10.035), 'Scale': VBase3(1.402, 1.402, 1.402), 'VisSize': '', 'Visual': {'Model': 'models/props/beerstein'}}, '1175216546.7kmuller': {'Type': 'Tunnel Cap', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-179.257, 0.0, 0.0), 'Pos': Point3(87.158, -179.257, 26.766), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.40000000596, 0.40000000596, 0.40000000596, 1.0), 'Model': 'models/tunnels/pir_m_are_tun_caveInterior_cap'}}, '1175912064.0JB2': {'Type': 'Animal', 'Hpr': Point3(0.0, 0.0, 0.0), 'MinHP': 10, 'Patrol Radius': 12, 'Pos': Point3(-0.907, -7.683, 4.223), 'PoseAnim': '', 'PoseFrame': '', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'Monkey', 'Start State': 'Idle', 'StartFrame': '0', 'Team': 1, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1176164583.28dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '15.0000', 'DropOff': '90.0000', 'FlickRate': 0.5, 'Flickering': False, 'Hpr': VBase3(-137.198, -12.488, -99.924), 'Intensity': '0.3939', 'LightType': 'SPOT', 'Pos': Point3(-25.129, 29.622, 13.381), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1, 1, 1, 1), 'Model': 'models/props/light_tool_bulb'}}, '1176165564.08dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '108.8636', 'DropOff': '65.4545', 'FlickRate': 0.5, 'Flickering': False, 'Hpr': VBase3(98.927, 18.357, -92.791), 'Intensity': '0.4545', 'LightType': 'SPOT', 'Pos': Point3(-8.182, 23.236, 6.965), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1, 1, 1, 1), 'Model': 'models/props/light_tool_bulb'}}, '1176167113.18dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '0.0000', 'FlickRate': 0.5, 'Flickering': False, 'Hpr': VBase3(0.0, 0.0, -0.474), 'Intensity': '0.2121', 'LightType': 'POINT', 'Pos': Point3(-6.907, 19.341, 6.928), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1.0, 0.7799999713897705, 0.5299999713897705, 1.0), 'Model': 'models/props/light_tool_bulb'}}, '1176225288.77dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '12.2727', 'FlickRate': 0.5, 'Flickering': True, 'Hpr': VBase3(1.676, 25.173, 33.894), 'Intensity': '1.6667', 'LightType': 'POINT', 'Pos': Point3(-21.229, 19.109, 9.82), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.9900000095367432, 0.8399999737739563, 0.5600000023841858, 1.0), 'Model': 'models/props/light_tool_bulb'}}, '1176342559.48dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '65.9091', 'DropOff': '24.5455', 'FlickRate': 0.5, 'Flickering': False, 'Hpr': VBase3(-55.948, -11.457, -89.954), 'Intensity': '0.6061', 'LightType': 'SPOT', 'Pos': Point3(-26.332, 18.178, 11.598), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/light_tool_bulb'}}, '1177608371.49dzlu': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(-17.785, -3.09, 1.969), 'Pos': Point3(-4.896, -32.682, 3.386), 'Scale': VBase3(0.59, 0.59, 0.59), 'Visual': {'Model': 'models/props/treasureTrough_single'}}, '1178323712.0dchiappe': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(65.89, 0.0, 10.222), 'Pos': Point3(-8.318, 16.465, 3.695), 'Scale': VBase3(0.863, 0.863, 0.863), 'Visual': {'Model': 'models/props/treasureChest_open'}}, '1178323712.0dchiappe0': {'Type': 'Treasure Chest', 'DisableCollision': False, 'Hpr': VBase3(65.89, 0.0, 10.222), 'Pos': Point3(-8.318, 16.465, 3.695), 'Scale': VBase3(0.863, 0.863, 0.863), 'Visual': {'Model': 'models/props/treasureChest_open'}}, '1213983053.36aapatel': {'Type': 'Townsperson', 'Category': 'PvPRewards', 'AnimSet': 'default', 'AuraFX': 'None', 'Boss': False, 'CustomModel': 'None', 'GhostColor': 'None', 'GhostFX': 0, 'Greeting Animation': '', 'HelpID': 'NONE', 'Holiday': '', 'Hpr': VBase3(169.842, 0.0, 0.0), 'Instanced World': 'None', 'Level': '37', 'Notice Animation 1': '', 'Notice Animation 2': '', 'Patrol Radius': '12.0000', 'Pos': Point3(9.477, 14.543, 3.276), 'PoseAnim': '', 'PoseFrame': '', 'Private Status': 'All', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'ShopID': 'PRIVATEER_TATTOOS', 'Start State': 'Idle', 'StartFrame': '0', 'Team': 'Villager', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'VisSize': '', 'Zombie': False, 'spawnTimeAlt': '', 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1250816577.11akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-70.015, 67.695, 0.088), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}, '1250816607.06akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-6.294, 0.0, 0.0), 'Pos': Point3(-21.687, 79.745, -0.936), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}, '1250876943.56akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-86.964, 0.0, 0.0), 'Pos': Point3(114.079, 67.157, -0.98), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}, '1250877014.44akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-86.964, 0.0, 0.0), 'Pos': Point3(93.212, -32.115, -0.131), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}, '1250877050.25akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-86.964, 0.0, 0.0), 'Pos': Point3(150.027, -51.636, -1.113), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}, '1250877088.86akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-158.034, 0.0, 0.0), 'Pos': Point3(72.603, -80.586, 0.284), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}, '1250877112.28akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-158.034, 0.0, 0.0), 'Pos': Point3(52.532, -98.638, 10.099), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}, '1250877144.7akelts': {'Type': 'Light_Fixtures', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(131.997, -11.72, 0.0), 'Pos': Point3(16.811, -90.386, 2.429), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/torch'}}, '1250877166.08akelts': {'Type': 'Light_Fixtures', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(131.997, -0.317, 0.0), 'Pos': Point3(-35.907, -92.812, -1.333), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/torch'}}, '1250877270.7akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(131.997, -0.317, 0.0), 'Pos': Point3(-18.641, 23.404, 3.92), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}, '1250877286.14akelts': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-172.608, -0.18, 14.208), 'Pos': Point3(-9.33, 37.096, 2.003), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/torch'}}, '1250877376.02akelts': {'Type': 'Light_Fixtures', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-79.034, 0.0, -6.327), 'Pos': Point3(32.824, -16.682, 3.201), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/torch'}}, '1250877472.52akelts': {'Type': 'Light_Fixtures', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(117.084, 1.763, -11.659), 'Pos': Point3(7.106, -40.55, 3.447), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/props/torch'}}, '1251140212.95piwanow': {'Type': 'Effect Node', 'EffectName': 'watersplash_effect', 'Hpr': VBase3(-128.191, 0.0, 0.0), 'Pos': Point3(-85.61, 48.499, 2.16), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1251140289.59piwanow': {'Type': 'Effect Node', 'EffectName': 'watersplash_effect', 'Hpr': VBase3(-176.566, 0.0, 0.0), 'Pos': Point3(8.715, 78.784, 2.884), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1251140327.19piwanow': {'Type': 'Effect Node', 'EffectName': 'watersplash_effect', 'Hpr': VBase3(152.045, 0.0, 0.0), 'Pos': Point3(74.63, 91.223, 3.637), 'Scale': VBase3(0.8, 0.8, 0.8), 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1251140366.42piwanow': {'Type': 'Effect Node', 'EffectName': 'watersplash_effect', 'Hpr': VBase3(163.301, 0.0, 0.0), 'Pos': Point3(139.836, 48.412, 2.707), 'Scale': VBase3(0.8, 0.8, 0.8), 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1251148581.42piwanow': {'Type': 'Cave_Props', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(144.817, 0.0, 0.0), 'Objects': {'1251154942.28akelts': {'Type': 'Cave_Props', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-56.912, 0.0, 0.0), 'Pos': Point3(5.302, 0.976, -0.03), 'RenderEffect': False, 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.346, 0.432, 0.391, 1.0), 'Model': 'models/props/pir_m_prp_cav_rockGroup_i'}}}, 'Pos': Point3(8.854, 77.776, -0.496), 'RenderEffect': False, 'Scale': VBase3(1.224, 1.224, 1.224), 'VisSize': '', 'Visual': {'Color': (0.323, 0.404, 0.365, 1.0), 'Model': 'models/props/pir_m_prp_cav_rockGroup_g'}}, '1251148751.73piwanow': {'Type': 'Cave_Props', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(40.355, 7.197, -21.452), 'Pos': Point3(76.426, 90.621, -2.115), 'RenderEffect': False, 'Scale': VBase3(0.2, 0.3, 0.286), 'VisSize': '', 'Visual': {'Color': (0.32, 0.44, 0.41, 1.0), 'Model': 'models/props/pir_m_prp_cav_rockGroup_a'}}, '1251149030.66piwanow': {'Type': 'Cave_Props', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-47.141, 0.0, 0.0), 'Pos': Point3(137.696, 40.061, -1.582), 'RenderEffect': False, 'Scale': VBase3(0.568, 0.451, 0.451), 'VisSize': '', 'Visual': {'Color': (0.279, 0.349, 0.341, 1.0), 'Model': 'models/props/pir_m_prp_cav_rockGroup_c'}}, '1251149293.97piwanow': {'Type': 'Cave_Props', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-92.315, 0.0, 0.0), 'Pos': Point3(-85.427, 47.662, -1.031), 'RenderEffect': False, 'Scale': VBase3(1.838, 1.838, 1.838), 'VisSize': '', 'Visual': {'Color': (0.27, 0.329, 0.293, 1.0), 'Model': 'models/props/pir_m_prp_cav_rockGroup_k'}}, '1251154757.73akelts': {'Type': 'Cave_Props', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(41.068, -6.877, -0.44), 'Pos': Point3(86.04, 90.778, -1.571), 'RenderEffect': False, 'Scale': VBase3(1.389, 2.083, 1.986), 'VisSize': '', 'Visual': {'Color': (0.324, 0.438, 0.411, 1.0), 'Model': 'models/props/pir_m_prp_cav_rockGroup_k'}}, '1251155149.92akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(82.063, 88.344, 1.109), 'Scale': VBase3(1.779, 1.779, 1.779), 'VisSize': '', 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0), 'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}}, '1251161466.69akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-72.323, 0.0, 0.0), 'Pos': Point3(119.014, 52.431, -2.076), 'Scale': VBase3(1.14, 1.354, 1.875), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1251161509.13akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-59.964, 0.0, 0.0), 'Pos': Point3(130.185, 30.776, -2.195), 'Scale': VBase3(3.834, 1.354, 1.875), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}}, 'Visibility': 'Grid', 'Visual': {'Model': 'models/caves/pir_m_are_cav_barbossa'}}}, 'TodSettings': {'AmbientColors': {0: Vec4(0.45, 0.53, 0.65, 1), 2: Vec4(0.537255, 0.494118, 0.627451, 1), 4: Vec4(0.4, 0.447059, 0.498039, 1), 6: Vec4(0.439216, 0.447059, 0.556863, 1), 8: Vec4(0.388235, 0.419608, 0.537255, 1), 12: Vec4(0.337255, 0.278431, 0.407843, 1), 13: Vec4(0.337255, 0.278431, 0.407843, 1), 16: Vec4(0.247059, 0.247059, 0.247059, 1), 17: Vec4(0.34, 0.28, 0.41, 1)}, 'DirectionalColors': {0: Vec4(0.55, 0.46, 0.35, 1), 2: Vec4(0.458824, 0.458824, 0.364706, 1), 4: Vec4(0.6, 0.337255, 0.0980392, 1), 6: Vec4(0.458824, 0.478431, 0.447059, 1), 8: Vec4(0.419608, 0.419608, 0.4, 1), 12: Vec4(0.658824, 0.756863, 0.0470588, 1), 13: Vec4(0.658824, 0.756863, 0.0470588, 1), 16: Vec4(0, 0, 0, 1), 17: Vec4(0.66, 0.76, 0.05, 1)}, 'FogColors': {0: Vec4(0.3, 0.2, 0.15, 0), 2: Vec4(0.6, 0.694118, 0.894118, 1), 4: Vec4(0.298039, 0.176471, 0.14902, 1), 6: Vec4(0.14902, 0.2, 0.34902, 1), 8: Vec4(0.0470588, 0.0588235, 0.168627, 1), 12: Vec4(0.0980392, 0.117647, 0.027451, 1), 13: Vec4(0.0980392, 0.117647, 0.027451, 1), 16: Vec4(0.054902, 0.0392157, 0, 1), 17: Vec4(0.1, 0.12, 0.03, 0)}, 'FogRanges': {0: 0.0001, 2: 9.999999747378752e-05, 4: 9.999999747378752e-05, 6: 9.999999747378752e-05, 8: 0.00019999999494757503, 12: 0.0002500000118743628, 13: 0.0002500000118743628, 16: 9.999999747378752e-05, 17: 0.005}, 'LinearFogRanges': {0: (0.0, 100.0), 2: (0.0, 100.0), 4: (0.0, 100.0), 6: (0.0, 100.0), 8: (0.0, 100.0), 12: (0.0, 100.0), 13: (0.0, 100.0), 16: (150.0, 300.0), 17: (0.0, 100.0)}}, 'Node Links': [], 'Layers': {}, 'ObjectIds': {'1172209006.11sdnaik': '["Objects"]["1172209006.11sdnaik"]', '1172209074.56sdnaik': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1172209074.56sdnaik"]', '1172618710.78sdnaik': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1172618710.78sdnaik"]', '1173468367.09kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173468367.09kmuller"]', '1173468423.53kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173468423.53kmuller"]', '1173468471.78kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173468471.78kmuller"]', '1173468497.0kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173468497.0kmuller"]', '1173471575.44kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173471575.44kmuller"]', '1173471597.2kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173471597.2kmuller"]', '1173471627.81kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173471575.44kmuller"]["Objects"]["1173471627.81kmuller"]', '1173471671.51kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173471575.44kmuller"]["Objects"]["1173471671.51kmuller"]', '1173471720.95kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173468423.53kmuller"]["Objects"]["1173471720.95kmuller"]', '1173471783.73kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173471783.73kmuller"]', '1173471825.44kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173468471.78kmuller"]["Objects"]["1173471825.44kmuller"]', '1173471860.03kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173468471.78kmuller"]["Objects"]["1173471860.03kmuller"]', '1173471924.11kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173468497.0kmuller"]["Objects"]["1173471924.11kmuller"]', '1173471969.92kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173468497.0kmuller"]["Objects"]["1173471969.92kmuller"]', '1173472001.3kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173468497.0kmuller"]["Objects"]["1173472001.3kmuller"]', '1173472048.89kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472048.89kmuller"]', '1173472099.22kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173471575.44kmuller"]["Objects"]["1173472099.22kmuller"]', '1173472175.67kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472175.67kmuller"]', '1173472197.86kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472197.86kmuller"]', '1173472214.83kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472175.67kmuller"]["Objects"]["1173472214.83kmuller"]', '1173472392.78kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472392.78kmuller"]', '1173472402.28kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472402.28kmuller"]', '1173472651.34kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472651.34kmuller"]', '1173472721.78kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472721.78kmuller"]', '1173472843.22kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472175.67kmuller"]["Objects"]["1173472843.22kmuller"]', '1173473890.48kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472175.67kmuller"]["Objects"]["1173473890.48kmuller"]', '1173473917.97kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173473917.97kmuller"]', '1173473947.56kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173468497.0kmuller"]["Objects"]["1173473947.56kmuller"]', '1173474152.67kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173474152.67kmuller"]', '1173474384.75kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472197.86kmuller"]["Objects"]["1173474384.75kmuller"]', '1173474457.5kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173474457.5kmuller"]', '1173474718.7kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173474718.7kmuller"]', '1173474959.44kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472175.67kmuller"]["Objects"]["1173474959.44kmuller"]', '1173475002.56kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475002.56kmuller"]', '1173475022.66kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475022.66kmuller"]', '1173475076.05kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475076.05kmuller"]', '1173475106.3kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475106.3kmuller"]', '1173475150.47kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475150.47kmuller"]', '1173475212.69kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475212.69kmuller"]', '1173475259.56kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475076.05kmuller"]["Objects"]["1173475259.56kmuller"]', '1173475343.87kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475076.05kmuller"]["Objects"]["1173475343.87kmuller"]', '1173475484.11kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475484.11kmuller"]', '1173475529.08kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475529.08kmuller"]', '1173475560.97kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475484.11kmuller"]["Objects"]["1173475560.97kmuller"]', '1173475599.84kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475484.11kmuller"]["Objects"]["1173475599.84kmuller"]', '1173475645.34kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475484.11kmuller"]["Objects"]["1173475645.34kmuller"]', '1173475705.97kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475484.11kmuller"]["Objects"]["1173475705.97kmuller"]', '1173476346.23kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476346.23kmuller"]', '1173476370.78kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476370.78kmuller"]', '1173476412.98kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476412.98kmuller"]', '1173476478.11kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476478.11kmuller"]', '1173476535.72kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476478.11kmuller"]["Objects"]["1173476535.72kmuller"]', '1173476568.78kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476568.78kmuller"]', '1173476595.0kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476595.0kmuller"]', '1173476640.59kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476595.0kmuller"]["Objects"]["1173476640.59kmuller"]', '1173476683.0kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476568.78kmuller"]["Objects"]["1173476683.0kmuller"]', '1173476711.03kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476478.11kmuller"]["Objects"]["1173476711.03kmuller"]', '1173476807.97kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476807.97kmuller"]', '1173476839.64kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476478.11kmuller"]["Objects"]["1173476839.64kmuller"]', '1173476913.16kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476913.16kmuller"]', '1173476951.55kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476951.55kmuller"]', '1173476992.98kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476913.16kmuller"]["Objects"]["1173476992.98kmuller"]', '1173477100.75kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173477100.75kmuller"]', '1173477117.31kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173477117.31kmuller"]', '1173477133.91kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173477133.91kmuller"]', '1173477184.05kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173477100.75kmuller"]["Objects"]["1173477184.05kmuller"]', '1173494051.56kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173475484.11kmuller"]["Objects"]["1173494051.56kmuller"]', '1173494163.92kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173476913.16kmuller"]["Objects"]["1173494163.92kmuller"]', '1173494260.01kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173494260.01kmuller"]', '1175216546.7kmuller': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1175216546.7kmuller"]', '1175912064.0JB2': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1175912064.0JB2"]', '1176164583.28dzlu': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1176164583.28dzlu"]', '1176165564.08dzlu': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1176165564.08dzlu"]', '1176167113.18dzlu': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1176167113.18dzlu"]', '1176225288.77dzlu': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1176225288.77dzlu"]', '1176342559.48dzlu': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1176342559.48dzlu"]', '1177608371.49dzlu': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1177608371.49dzlu"]', '1178323712.0dchiappe': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1178323712.0dchiappe"]', '1178323712.0dchiappe0': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1178323712.0dchiappe0"]', '1213983053.36aapatel': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1213983053.36aapatel"]', '1250816577.11akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250816577.11akelts"]', '1250816607.06akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250816607.06akelts"]', '1250876943.56akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250876943.56akelts"]', '1250877014.44akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250877014.44akelts"]', '1250877050.25akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250877050.25akelts"]', '1250877088.86akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250877088.86akelts"]', '1250877112.28akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250877112.28akelts"]', '1250877144.7akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250877144.7akelts"]', '1250877166.08akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250877166.08akelts"]', '1250877270.7akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250877270.7akelts"]', '1250877286.14akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250877286.14akelts"]', '1250877310.59akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173477100.75kmuller"]["Objects"]["1250877310.59akelts"]', '1250877328.98akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472197.86kmuller"]["Objects"]["1250877328.98akelts"]', '1250877376.02akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250877376.02akelts"]', '1250877472.52akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1250877472.52akelts"]', '1250877589.53akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173472392.78kmuller"]["Objects"]["1250877589.53akelts"]', '1250877604.84akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1173471575.44kmuller"]["Objects"]["1250877604.84akelts"]', '1251140212.95piwanow': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251140212.95piwanow"]', '1251140289.59piwanow': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251140289.59piwanow"]', '1251140327.19piwanow': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251140327.19piwanow"]', '1251140366.42piwanow': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251140366.42piwanow"]', '1251148581.42piwanow': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251148581.42piwanow"]', '1251148751.73piwanow': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251148751.73piwanow"]', '1251149030.66piwanow': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251149030.66piwanow"]', '1251149293.97piwanow': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251149293.97piwanow"]', '1251154757.73akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251154757.73akelts"]', '1251154942.28akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251148581.42piwanow"]["Objects"]["1251154942.28akelts"]', '1251155149.92akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251155149.92akelts"]', '1251161466.69akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251161466.69akelts"]', '1251161509.13akelts': '["Objects"]["1172209006.11sdnaik"]["Objects"]["1251161509.13akelts"]'}}
extraInfo = {'camPos': Point3(12.9352, 2.9029, 10.1918), 'camHpr': VBase3(15.8438, -17.1455, 0), 'focalLength': 0.639999985695, 'skyState': -2, 'fog': 1}
| 6,765.571429
| 46,900
| 0.651703
|
d22140d153ace32084ba418234769152e33d218a
| 1,881
|
py
|
Python
|
main4.py
|
Ethan-source/E01b-Smiles
|
053c5b5a7ad6f2d51c68e307e932f75efdb70c09
|
[
"MIT"
] | null | null | null |
main4.py
|
Ethan-source/E01b-Smiles
|
053c5b5a7ad6f2d51c68e307e932f75efdb70c09
|
[
"MIT"
] | null | null | null |
main4.py
|
Ethan-source/E01b-Smiles
|
053c5b5a7ad6f2d51c68e307e932f75efdb70c09
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import utils, open_color, arcade
utils.check_version((3,7))
# Open the window. Set the window title and dimensions (width and height)
arcade.open_window(800, 600, "Smiley Face Example")
arcade.set_background_color(open_color.white)
# Start the render process. This must be done before any drawing commands.
arcade.start_render()
#start at 100, go to 799, counting by 150
for x in range(100,800,150):
#start at 100, go to 599, counting by 150
for y in range(100,600,150):
face_x,face_y = (x,y)
smile_x,smile_y = (face_x + 0,face_y - 10)
eye1_x,eye1_y = (face_x - 45,face_y + 17)
eye2_x,eye2_y = (face_x + 35,face_y + 17)
catch1_x,catch1_y = (face_x - 41,face_y + 24)
catch2_x,catch2_y = (face_x + 41,face_y + 24)
# Draw the smiley face:
# (x,y,radius,color)
arcade.draw_circle_filled(face_x, face_y, 100, open_color.yellow_3)
# (x,y,radius,color,border_thickness)
arcade.draw_circle_outline(face_x, face_y, 100, open_color.black,4)
#(x,y,width,height,color)
arcade.draw_ellipse_filled(eye1_x,eye1_y,15,25,open_color.black)
arcade.draw_ellipse_filled(eye2_x,eye2_y,15,25,open_color.black)
arcade.draw_circle_filled(catch1_x,catch1_y,3,open_color.gray_2)
arcade.draw_circle_filled(catch2_x,catch2_y,3,open_color.gray_2)
#(x,y,width,height,color,start_degrees,end_degrees,border_thickness)
arcade.draw_arc_outline(smile_x,smile_y,60,50,open_color.black,190,350,4)
# Finish the render
# Nothing will be drawn without this.
# Must happen after all draw commands
arcade.finish_render()
# Keep the window up until someone closes it.
arcade.run()
| 38.387755
| 89
| 0.645401
|
aee840259fc24cdc0c4d341d475eb641a20dc226
| 2,429
|
py
|
Python
|
aliyun-python-sdk-live/aliyunsdklive/request/v20161101/OpenLiveShiftRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-live/aliyunsdklive/request/v20161101/OpenLiveShiftRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-live/aliyunsdklive/request/v20161101/OpenLiveShiftRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class OpenLiveShiftRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'OpenLiveShift','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Duration(self):
return self.get_query_params().get('Duration')
def set_Duration(self,Duration):
self.add_query_param('Duration',Duration)
def get_AppName(self):
return self.get_query_params().get('AppName')
def set_AppName(self,AppName):
self.add_query_param('AppName',AppName)
def get_StreamName(self):
return self.get_query_params().get('StreamName')
def set_StreamName(self,StreamName):
self.add_query_param('StreamName',StreamName)
def get_IgnoreTranscode(self):
return self.get_query_params().get('IgnoreTranscode')
def set_IgnoreTranscode(self,IgnoreTranscode):
self.add_query_param('IgnoreTranscode',IgnoreTranscode)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Vision(self):
return self.get_query_params().get('Vision')
def set_Vision(self,Vision):
self.add_query_param('Vision',Vision)
| 32.824324
| 74
| 0.758748
|
dd7f961e0e016e3cc6e342d51710107de24e8c77
| 1,537
|
py
|
Python
|
vendor/Adafruit_Python_DHT/Adafruit_Python_DHT_RPi/Master.py
|
hvos234/raspberrypi.home.website
|
72376beb55167da4b5fadda51992724451166129
|
[
"BSD-3-Clause"
] | null | null | null |
vendor/Adafruit_Python_DHT/Adafruit_Python_DHT_RPi/Master.py
|
hvos234/raspberrypi.home.website
|
72376beb55167da4b5fadda51992724451166129
|
[
"BSD-3-Clause"
] | null | null | null |
vendor/Adafruit_Python_DHT/Adafruit_Python_DHT_RPi/Master.py
|
hvos234/raspberrypi.home.website
|
72376beb55167da4b5fadda51992724451166129
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
# first [0] argument is master
try:
fr = sys.argv[1]
except IndexError:
print "err;no from"
sys.exit(0)
try:
to = sys.argv[2]
except IndexError:
print "err;no to"
sys.exit(0)
try:
ac = sys.argv[3]
except IndexError:
print "err;no action"
sys.exit(0)
import Adafruit_DHT
#sensor = Adafruit_DHT.DHT11
#sensor = Adafruit_DHT.DHT22
sensor = Adafruit_DHT.AM2302
pin = 4
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# Un-comment the line below to convert the temperature to Fahrenheit.
# temperature = temperature * 9/5.0 + 32
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
if humidity is not None and temperature is not None:
if '1' == ac:
print 'tem={0:0.2f}'.format(temperature)
sys.exit(0)
elif '2' == ac:
print 'hum={0:0.2f}'.format(humidity)
sys.exit(0)
elif '3' == ac:
print 'tem={0:0.2f},hum={1:0.2f}'.format(temperature, humidity)
sys.exit(0)
else:
print 'err:action does not exist !'
sys.exit(1)
else:
print 'err:failed to get read DHT !'
sys.exit(1)
| 24.396825
| 79
| 0.659727
|
e5759a591250bd790f4694038cfc3a9a4c4d0940
| 4,208
|
py
|
Python
|
examples/plot_regression.py
|
lapaill/braindecode
|
d5d6e34baef1c8df092e77d1f3e757b53d0e69ea
|
[
"BSD-3-Clause"
] | 301
|
2020-01-15T16:40:59.000Z
|
2022-03-31T05:28:00.000Z
|
examples/plot_regression.py
|
Mrswolf/braindecode
|
d1781c465239c45eccbf5f92e7d7a627ff985e16
|
[
"BSD-3-Clause"
] | 325
|
2020-01-12T21:36:55.000Z
|
2022-03-21T11:59:01.000Z
|
examples/plot_regression.py
|
Mrswolf/braindecode
|
d1781c465239c45eccbf5f92e7d7a627ff985e16
|
[
"BSD-3-Clause"
] | 98
|
2020-01-12T21:22:42.000Z
|
2022-03-24T14:36:08.000Z
|
"""
Regression example on fake data
===============================
"""
# Authors: Lukas Gemein <l.gemein@gmail.com>
#
# License: BSD-3
import numpy as np
import pandas as pd
import torch
from skorch.callbacks import LRScheduler
from skorch.helper import predefined_split
from braindecode import EEGRegressor
from braindecode.preprocessing import create_fixed_length_windows
from braindecode.datasets import BaseDataset, BaseConcatDataset
from braindecode.training.losses import CroppedLoss
from braindecode.models import Deep4Net
from braindecode.models import ShallowFBCSPNet
from braindecode.models.util import to_dense_prediction_model, get_output_shape
from braindecode.util import set_random_seeds, create_mne_dummy_raw
model_name = "shallow" # 'shallow' or 'deep'
n_epochs = 3
seed = 20200220
input_window_samples = 6000
batch_size = 64
cuda = torch.cuda.is_available()
device = 'cuda' if cuda else 'cpu'
if cuda:
torch.backends.cudnn.benchmark = True
n_chans = 21
# set to how many targets you want to regress (age -> 1, [x, y, z] -> 3)
n_classes = 1
set_random_seeds(seed=seed, cuda=cuda)
# initialize a model, transform to dense and move to gpu
if model_name == "shallow":
model = ShallowFBCSPNet(
in_chans=n_chans,
n_classes=n_classes,
input_window_samples=input_window_samples,
n_filters_time=40,
n_filters_spat=40,
final_conv_length=35,
)
optimizer_lr = 0.000625
optimizer_weight_decay = 0
elif model_name == "deep":
model = Deep4Net(
in_chans=n_chans,
n_classes=n_classes,
input_window_samples=input_window_samples,
n_filters_time=25,
n_filters_spat=25,
stride_before_pool=True,
n_filters_2=int(n_chans * 2),
n_filters_3=int(n_chans * (2 ** 2.0)),
n_filters_4=int(n_chans * (2 ** 3.0)),
final_conv_length=1,
)
optimizer_lr = 0.01
optimizer_weight_decay = 0.0005
else:
raise ValueError(f'{model_name} unknown')
new_model = torch.nn.Sequential()
for name, module_ in model.named_children():
if "softmax" in name:
continue
new_model.add_module(name, module_)
model = new_model
if cuda:
model.cuda()
to_dense_prediction_model(model)
n_preds_per_input = get_output_shape(model, n_chans, input_window_samples)[2]
def fake_regression_dataset(n_fake_recs, n_fake_chs, fake_sfreq, fake_duration_s):
datasets = []
for i in range(n_fake_recs):
train_or_eval = "eval" if i == 0 else "train"
raw, save_fname = create_mne_dummy_raw(
n_channels=n_fake_chs, n_times=fake_duration_s * fake_sfreq,
sfreq=fake_sfreq, savedir=None)
target = np.random.randint(0, 100, n_classes)
if n_classes == 1:
target = target[0]
fake_descrition = pd.Series(
data=[target, train_or_eval],
index=["target", "session"])
base_ds = BaseDataset(raw, fake_descrition, target_name="target")
datasets.append(base_ds)
dataset = BaseConcatDataset(datasets)
return dataset
dataset = fake_regression_dataset(
n_fake_recs=5, n_fake_chs=21, fake_sfreq=100, fake_duration_s=60)
windows_dataset = create_fixed_length_windows(
dataset,
start_offset_samples=0,
stop_offset_samples=0,
window_size_samples=input_window_samples,
window_stride_samples=n_preds_per_input,
drop_last_window=False,
drop_bad_windows=True,
)
splits = windows_dataset.split("session")
train_set = splits["train"]
valid_set = splits["eval"]
regressor = EEGRegressor(
model,
cropped=True,
criterion=CroppedLoss,
criterion__loss_function=torch.nn.functional.mse_loss,
optimizer=torch.optim.AdamW,
train_split=predefined_split(valid_set),
optimizer__lr=optimizer_lr,
optimizer__weight_decay=optimizer_weight_decay,
iterator_train__shuffle=True,
batch_size=batch_size,
callbacks=[
"neg_root_mean_squared_error",
# seems n_epochs -1 leads to desired behavior of lr=0 after end of training?
("lr_scheduler", LRScheduler('CosineAnnealingLR', T_max=n_epochs - 1)),
],
device=device,
)
regressor.fit(train_set, y=None, epochs=n_epochs)
| 29.843972
| 84
| 0.714354
|
6aac0d0f66d98d03d26eb51412ead3383666f8d9
| 2,883
|
py
|
Python
|
ljungbox.py
|
SeptumCapital/TimeSeries_Notebooks_Collections
|
93e22fe817a40513d06c2d1ade98bdbab7151612
|
[
"MIT"
] | 32
|
2019-11-03T06:04:29.000Z
|
2022-03-28T21:57:25.000Z
|
ljungbox.py
|
quantsense/TimeSeries_Notebooks_Collections
|
fbc93689af056a007ac0366925d55cec5096da29
|
[
"MIT"
] | null | null | null |
ljungbox.py
|
quantsense/TimeSeries_Notebooks_Collections
|
fbc93689af056a007ac0366925d55cec5096da29
|
[
"MIT"
] | 20
|
2019-11-03T06:15:20.000Z
|
2022-02-20T06:50:37.000Z
|
import numpy as np
import scipy.stats
def sac(x, k=1):
"""
Sample autocorrelation (As used in statistics with normalization)
http://en.wikipedia.org/wiki/Autocorrelation
Parameters
----------
x : 1d numpy array
Signal
k : int or list of ints
Lags to calculate sample autocorrelation for
Returns
-------
res : scalar or np array
The sample autocorrelation. A scalar value if k is a scalar, and a
numpy array if k is a interable.
"""
try:
res = []
for ki in k:
res.append(sac(x, ki))
return np.array(res)
except:
pass
mx = np.mean(x)
if k==0:
N = np.sum((x-mx)*(x-mx))
else:
N = np.sum((x[:-k]-mx)*(x[k:]-mx))
D = len(x) * np.var(x)
return N/D
def ljungbox(x, lags, alpha=0.1):
"""
The Ljung-Box test for determining if the data is independently distributed.
Parameters
----------
x : 1d numpy array
Signal to test
lags : int
Number of lags being tested
Returns
-------
Q : float
Test statistic
"""
n = len(x)
Q = 0
for k in range(1, lags+1):
Q += (sac(x, k)**2) / (n-k)
Q = n*(n+2)*Q
return Q
def boxpierce(x, lags, alpha=0.1):
"""
The Box-Pierce test for determining if the data is independently distributed.
Parameters
----------
x : 1d numpy array
Signal to test
lags : int
Number of lags being tested
Returns
-------
Q : float
Test statistic
"""
n = len(x)
Q = 0
for k in range(1, lags+1):
Q += (sac(x, k)**2)
Q = n*Q
return Q
def lbqtest(x, lags, alpha=0.1, method='lb'):
"""
The Ljung-Box test for determining if the data is independently distributed.
Parameters
----------
x : 1d numpy array
Signal to test
lags : list of ints
Lags being tested
alpha : float
Significance level used for the tests
method : string
Can be either 'lb' for Ljung-Box, or 'bp' for Box-Pierce
Returns
-------
h : np array
Numpy array of bool values, True == H0 hypothesis rejected
pV : np array
Test statistics p-values
Q : np array
Test statistics
cV : np array
Critical values used for determining if H0 should be rejected. The
critical values are calculated from the given alpha and lag.
"""
if method=='lb':
findq = ljungbox
else:
findq = boxpierce
n = len(x)
Q = np.zeros(len(lags))
pV = np.zeros(len(lags))
cV = np.zeros(len(lags))
for i, lag in enumerate(lags):
Q[i] = findq(x, lag)
pV[i] = 1.0 - scipy.stats.chi2.cdf(Q[i], lag)
cV[i] = scipy.stats.chi2.ppf(1-alpha, lag)
h = Q>cV
return h, pV, Q, cV
| 22.700787
| 81
| 0.540062
|
e1c50626f4231b147f570d0cf9dd643073447030
| 3,628
|
py
|
Python
|
tests/openwisp2/sample_users/tests.py
|
ShreeshaRelysys/openwisp-users
|
af5f95e89656cbf3dd32f2392ba6d0f1b2c4df96
|
[
"BSD-3-Clause"
] | 1
|
2020-09-06T18:24:06.000Z
|
2020-09-06T18:24:06.000Z
|
tests/openwisp2/sample_users/tests.py
|
ShreeshaRelysys/openwisp-users
|
af5f95e89656cbf3dd32f2392ba6d0f1b2c4df96
|
[
"BSD-3-Clause"
] | 1
|
2022-01-24T16:44:03.000Z
|
2022-01-24T16:44:03.000Z
|
tests/openwisp2/sample_users/tests.py
|
ShreeshaRelysys/openwisp-users
|
af5f95e89656cbf3dd32f2392ba6d0f1b2c4df96
|
[
"BSD-3-Clause"
] | null | null | null |
from openwisp_users.tests.test_admin import (
TestBasicUsersIntegration as BaseTestBasicUsersIntegration,
)
from openwisp_users.tests.test_admin import (
TestMultitenantAdmin as BaseTestMultitenantAdmin,
)
from openwisp_users.tests.test_admin import TestUsersAdmin as BaseTestUsersAdmin
from openwisp_users.tests.test_api.test_api import TestUsersApi as BaseTestUsersApi
from openwisp_users.tests.test_api.test_authentication import (
AuthenticationTests as BaseAuthenticationTests,
)
from openwisp_users.tests.test_api.test_throttling import (
RatelimitTests as BaseRatelimitTests,
)
from openwisp_users.tests.test_api.test_views import (
TestRestFrameworkViews as BaseTestRestFrameworkViews,
)
from openwisp_users.tests.test_backends import TestBackends as BaseTestBackends
from openwisp_users.tests.test_models import TestUsers as BaseTestUsers
additional_fields = [
('social_security_number', '123-45-6789'),
('details', 'Example value for detail used during testing.'),
]
class GetEditFormInlineMixin(object):
"""
The following code is only used in testing,
please remove it or replace it with your
Inline form fields data.
"""
def _get_org_edit_form_inline_params(self, user, organization):
"""
This function is created to be overridden
when the user extends openwisp-users
and adds inline forms in the Organization model.
"""
params = super()._get_user_edit_form_inline_params(user, organization)
params.update(
{
'organizationinlinemodel-TOTAL_FORMS': 1,
'organizationinlinemodel-INITIAL_FORMS': 0,
'organizationinlinemodel-MIN_NUM_FORMS': 0,
'organizationinlinemodel-MAX_NUM_FORMS': 1,
'organizationinlinemodel-0-details': '',
'organizationinlinemodel-0-user': str(organization.pk),
}
)
return params
def _get_user_edit_form_inline_params(self, user, organization):
"""
This function is created to be overridden
when the user extends openwisp-users
and adds inline forms in the User model.
"""
params = super()._get_user_edit_form_inline_params(user, organization)
params.update(
{
'userinlinemodel-TOTAL_FORMS': 1,
'userinlinemodel-INITIAL_FORMS': 0,
'userinlinemodel-MIN_NUM_FORMS': 0,
'userinlinemodel-MAX_NUM_FORMS': 1,
'userinlinemodel-0-details': '',
'userinlinemodel-0-user': str(user.pk),
}
)
return params
class TestUsersAdmin(GetEditFormInlineMixin, BaseTestUsersAdmin):
app_label = 'sample_users'
_additional_user_fields = additional_fields
class TestBasicUsersIntegration(GetEditFormInlineMixin, BaseTestBasicUsersIntegration):
app_label = 'sample_users'
_additional_user_fields = additional_fields
class TestMultitenantAdmin(BaseTestMultitenantAdmin):
app_label = 'sample_users'
class TestUsers(BaseTestUsers):
pass
class AuthenticationTests(BaseAuthenticationTests):
pass
class TestRestFrameworkViews(BaseTestRestFrameworkViews):
pass
class RatelimitTests(BaseRatelimitTests):
pass
class TestBackends(BaseTestBackends):
pass
class TestUsersApi(BaseTestUsersApi):
pass
del BaseTestUsersAdmin
del BaseTestBasicUsersIntegration
del BaseTestMultitenantAdmin
del BaseTestUsers
del BaseAuthenticationTests
del BaseRatelimitTests
del BaseTestRestFrameworkViews
del BaseTestBackends
del BaseTestUsersApi
| 30.233333
| 87
| 0.724366
|
a4195392761ccf823292c9d2d9b2c7ee62ee8047
| 38,712
|
py
|
Python
|
pySuStaIn/ZscoreSustain.py
|
ElsevierSoftwareX/SOFTX-D-21-00098
|
225e083eff46277016104ad0191b79115b9de478
|
[
"MIT"
] | 1
|
2022-03-21T18:36:52.000Z
|
2022-03-21T18:36:52.000Z
|
pySuStaIn/ZscoreSustain.py
|
ElsevierSoftwareX/SOFTX-D-21-00098
|
225e083eff46277016104ad0191b79115b9de478
|
[
"MIT"
] | null | null | null |
pySuStaIn/ZscoreSustain.py
|
ElsevierSoftwareX/SOFTX-D-21-00098
|
225e083eff46277016104ad0191b79115b9de478
|
[
"MIT"
] | null | null | null |
###
# pySuStaIn: a Python implementation of the Subtype and Stage Inference (SuStaIn) algorithm
#
# If you use pySuStaIn, please cite the following core papers:
# 1. The original SuStaIn paper: https://doi.org/10.1038/s41467-018-05892-0
# 2. The pySuStaIn software paper: https://doi.org/10.1101/2021.06.09.447713
#
# Please also cite the corresponding progression pattern model you use:
# 1. The piece-wise linear z-score model (i.e. ZscoreSustain): https://doi.org/10.1038/s41467-018-05892-0
# 2. The event-based model (i.e. MixtureSustain): https://doi.org/10.1016/j.neuroimage.2012.01.062
# with Gaussian mixture modeling (i.e. 'mixture_gmm'): https://doi.org/10.1093/brain/awu176
# or kernel density estimation (i.e. 'mixture_kde'): https://doi.org/10.1002/alz.12083
# 3. The model for discrete ordinal data (i.e. OrdinalSustain): TBD
#
# Thanks a lot for supporting this project.
#
# Authors: Peter Wijeratne (p.wijeratne@ucl.ac.uk) and Leon Aksman (leon.aksman@loni.usc.edu)
# Contributors: Arman Eshaghi (a.eshaghi@ucl.ac.uk), Alex Young (alexandra.young@kcl.ac.uk), Cameron Shand (c.shand@ucl.ac.uk)
###
from tqdm.auto import tqdm
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import norm
from pySuStaIn.AbstractSustain import AbstractSustainData
from pySuStaIn.AbstractSustain import AbstractSustain
#*******************************************
#The data structure class for ZscoreSustain. It holds the z-scored data that gets passed around and re-indexed in places.
class ZScoreSustainData(AbstractSustainData):
def __init__(self, data, numStages):
self.data = data
self.__numStages = numStages
def getNumSamples(self):
return self.data.shape[0]
def getNumBiomarkers(self):
return self.data.shape[1]
def getNumStages(self):
return self.__numStages
def reindex(self, index):
return ZScoreSustainData(self.data[index,], self.__numStages)
#*******************************************
#An implementation of the AbstractSustain class with multiple events for each biomarker based on deviations from normality, measured in z-scores.
#There are a fixed number of thresholds for each biomarker, specified at initialization of the ZscoreSustain object.
class ZscoreSustain(AbstractSustain):
def __init__(self,
data,
Z_vals,
Z_max,
biomarker_labels,
N_startpoints,
N_S_max,
N_iterations_MCMC,
output_folder,
dataset_name,
use_parallel_startpoints,
seed=None):
# The initializer for the z-score based events implementation of AbstractSustain
# Parameters:
# data - !important! needs to be (positive) z-scores!
# dim: number of subjects x number of biomarkers
# Z_vals - a matrix specifying the z-score thresholds for each biomarker
# for M biomarkers and 3 thresholds (1,2 and 3 for example) this would be a dim: M x 3 matrix
# Z_max - a vector specifying the maximum z-score for each biomarker
# when using z-score thresholds of 1,2,3 this would typically be 5.
# for M biomarkers this would be a dim: M x 1 vector
# biomarker_labels - the names of the biomarkers as a list of strings
# N_startpoints - number of startpoints to use in maximum likelihood step of SuStaIn, typically 25
# N_S_max - maximum number of subtypes, should be 1 or more
# N_iterations_MCMC - number of MCMC iterations, typically 1e5 or 1e6 but can be lower for debugging
# output_folder - where to save pickle files, etc.
# dataset_name - for naming pickle files
# use_parallel_startpoints - boolean for whether or not to parallelize the maximum likelihood loop
# seed - random number seed
N = data.shape[1] # number of biomarkers
assert (len(biomarker_labels) == N), "number of labels should match number of biomarkers"
stage_zscore = Z_vals.T.flatten() #np.array([y for x in Z_vals.T for y in x])
stage_zscore = stage_zscore.reshape(1,len(stage_zscore))
IX_select = stage_zscore>0
stage_zscore = stage_zscore[IX_select]
stage_zscore = stage_zscore.reshape(1,len(stage_zscore))
num_zscores = Z_vals.shape[1]
IX_vals = np.array([[x for x in range(N)]] * num_zscores).T
stage_biomarker_index = IX_vals.T.flatten() #np.array([y for x in IX_vals.T for y in x])
stage_biomarker_index = stage_biomarker_index.reshape(1,len(stage_biomarker_index))
stage_biomarker_index = stage_biomarker_index[IX_select]
stage_biomarker_index = stage_biomarker_index.reshape(1,len(stage_biomarker_index))
self.stage_zscore = stage_zscore
self.stage_biomarker_index = stage_biomarker_index
self.min_biomarker_zscore = [0] * N
self.max_biomarker_zscore = Z_max
self.std_biomarker_zscore = [1] * N
self.biomarker_labels = biomarker_labels
numStages = stage_zscore.shape[1]
self.__sustainData = ZScoreSustainData(data, numStages)
super().__init__(self.__sustainData,
N_startpoints,
N_S_max,
N_iterations_MCMC,
output_folder,
dataset_name,
use_parallel_startpoints,
seed)
def _initialise_sequence(self, sustainData, rng):
# Randomly initialises a linear z-score model ensuring that the biomarkers
# are monotonically increasing
#
#
# OUTPUTS:
# S - a random linear z-score model under the condition that each biomarker
# is monotonically increasing
N = np.array(self.stage_zscore).shape[1]
S = np.zeros(N)
for i in range(N):
IS_min_stage_zscore = np.array([False] * N)
possible_biomarkers = np.unique(self.stage_biomarker_index)
for j in range(len(possible_biomarkers)):
IS_unselected = [False] * N
for k in set(range(N)) - set(S[:i]):
IS_unselected[k] = True
this_biomarkers = np.array([(np.array(self.stage_biomarker_index)[0] == possible_biomarkers[j]).astype(int) +
(np.array(IS_unselected) == 1).astype(int)]) == 2
if not np.any(this_biomarkers):
this_min_stage_zscore = 0
else:
this_min_stage_zscore = min(self.stage_zscore[this_biomarkers])
if (this_min_stage_zscore):
temp = ((this_biomarkers.astype(int) + (self.stage_zscore == this_min_stage_zscore).astype(int)) == 2).T
temp = temp.reshape(len(temp), )
IS_min_stage_zscore[temp] = True
events = np.array(range(N))
possible_events = np.array(events[IS_min_stage_zscore])
this_index = np.ceil(rng.random() * ((len(possible_events)))) - 1
S[i] = possible_events[int(this_index)]
S = S.reshape(1, len(S))
return S
def _calculate_likelihood_stage(self, sustainData, S):
'''
Computes the likelihood of a single linear z-score model using an
approximation method (faster)
Outputs:
========
p_perm_k - the probability of each subjects data at each stage of a particular subtype
in the SuStaIn model
'''
N = self.stage_biomarker_index.shape[1]
S_inv = np.array([0] * N)
S_inv[S.astype(int)] = np.arange(N)
possible_biomarkers = np.unique(self.stage_biomarker_index)
B = len(possible_biomarkers)
point_value = np.zeros((B, N + 2))
# all the arange you'll need below
arange_N = np.arange(N + 2)
for i in range(B):
b = possible_biomarkers[i]
event_location = np.concatenate([[0], S_inv[(self.stage_biomarker_index == b)[0]], [N]])
event_value = np.concatenate([[self.min_biomarker_zscore[i]], self.stage_zscore[self.stage_biomarker_index == b], [self.max_biomarker_zscore[i]]])
for j in range(len(event_location) - 1):
if j == 0: # FIXME: nasty hack to get Matlab indexing to match up - necessary here because indices are used for linspace limits
# original
#temp = np.arange(event_location[j],event_location[j+1]+2)
#point_value[i,temp] = np.linspace(event_value[j],event_value[j+1],event_location[j+1]-event_location[j]+2)
# fastest by a bit
temp = arange_N[event_location[j]:(event_location[j + 1] + 2)]
N_j = event_location[j + 1] - event_location[j] + 2
point_value[i, temp] = ZscoreSustain.linspace_local2(event_value[j], event_value[j + 1], N_j, arange_N[0:N_j])
else:
# original
#temp = np.arange(event_location[j] + 1, event_location[j + 1] + 2)
#point_value[i, temp] = np.linspace(event_value[j],event_value[j+1],event_location[j+1]-event_location[j]+1)
# fastest by a bit
temp = arange_N[(event_location[j] + 1):(event_location[j + 1] + 2)]
N_j = event_location[j + 1] - event_location[j] + 1
point_value[i, temp] = ZscoreSustain.linspace_local2(event_value[j], event_value[j + 1], N_j, arange_N[0:N_j])
stage_value = 0.5 * point_value[:, :point_value.shape[1] - 1] + 0.5 * point_value[:, 1:]
M = sustainData.getNumSamples() #data_local.shape[0]
p_perm_k = np.zeros((M, N + 1))
# optimised likelihood calc - take log and only call np.exp once after loop
sigmat = np.array(self.std_biomarker_zscore)
factor = np.log(1. / np.sqrt(np.pi * 2.0) * sigmat)
coeff = np.log(1. / float(N + 1))
# original
"""
for j in range(N+1):
x = (data-np.tile(stage_value[:,j],(M,1)))/sigmat
p_perm_k[:,j] = coeff+np.sum(factor-.5*x*x,1)
"""
# faster - do the tiling once
# stage_value_tiled = np.tile(stage_value, (M, 1))
# N_biomarkers = stage_value.shape[0]
# for j in range(N + 1):
# stage_value_tiled_j = stage_value_tiled[:, j].reshape(M, N_biomarkers)
# x = (sustainData.data - stage_value_tiled_j) / sigmat #(data_local - stage_value_tiled_j) / sigmat
# p_perm_k[:, j] = coeff + np.sum(factor - .5 * np.square(x), 1)
# p_perm_k = np.exp(p_perm_k)
# even faster - do in one go
x = (sustainData.data[:, :, None] - stage_value) / sigmat[None, :, None]
p_perm_k = coeff + np.sum(factor[None, :, None] - 0.5 * np.square(x), 1)
p_perm_k = np.exp(p_perm_k)
return p_perm_k
def _optimise_parameters(self, sustainData, S_init, f_init, rng):
# Optimise the parameters of the SuStaIn model
M = sustainData.getNumSamples() #data_local.shape[0]
N_S = S_init.shape[0]
N = self.stage_zscore.shape[1]
S_opt = S_init.copy() # have to copy or changes will be passed to S_init
f_opt = np.array(f_init).reshape(N_S, 1, 1)
f_val_mat = np.tile(f_opt, (1, N + 1, M))
f_val_mat = np.transpose(f_val_mat, (2, 1, 0))
p_perm_k = np.zeros((M, N + 1, N_S))
for s in range(N_S):
p_perm_k[:, :, s] = self._calculate_likelihood_stage(sustainData, S_opt[s])
p_perm_k_weighted = p_perm_k * f_val_mat
p_perm_k_norm = p_perm_k_weighted / np.sum(p_perm_k_weighted, axis=(1,2), keepdims=True)
f_opt = (np.squeeze(sum(sum(p_perm_k_norm))) / sum(sum(sum(p_perm_k_norm)))).reshape(N_S, 1, 1)
f_val_mat = np.tile(f_opt, (1, N + 1, M))
f_val_mat = np.transpose(f_val_mat, (2, 1, 0))
order_seq = rng.permutation(N_S) # this will produce different random numbers to Matlab
for s in order_seq:
order_bio = rng.permutation(N) # this will produce different random numbers to Matlab
for i in order_bio:
current_sequence = S_opt[s]
current_location = np.array([0] * len(current_sequence))
current_location[current_sequence.astype(int)] = np.arange(len(current_sequence))
selected_event = i
move_event_from = current_location[selected_event]
this_stage_zscore = self.stage_zscore[0, selected_event]
selected_biomarker = self.stage_biomarker_index[0, selected_event]
possible_zscores_biomarker = self.stage_zscore[self.stage_biomarker_index == selected_biomarker]
# slightly different conditional check to matlab version to protect python from calling min,max on an empty array
min_filter = possible_zscores_biomarker < this_stage_zscore
max_filter = possible_zscores_biomarker > this_stage_zscore
events = np.array(range(N))
if np.any(min_filter):
min_zscore_bound = max(possible_zscores_biomarker[min_filter])
min_zscore_bound_event = events[((self.stage_zscore[0] == min_zscore_bound).astype(int) + (self.stage_biomarker_index[0] == selected_biomarker).astype(int)) == 2]
move_event_to_lower_bound = current_location[min_zscore_bound_event] + 1
else:
move_event_to_lower_bound = 0
if np.any(max_filter):
max_zscore_bound = min(possible_zscores_biomarker[max_filter])
max_zscore_bound_event = events[((self.stage_zscore[0] == max_zscore_bound).astype(int) + (self.stage_biomarker_index[0] == selected_biomarker).astype(int)) == 2]
move_event_to_upper_bound = current_location[max_zscore_bound_event]
else:
move_event_to_upper_bound = N
# FIXME: hack because python won't produce an array in range (N,N), while matlab will produce an array (N)... urgh
if move_event_to_lower_bound == move_event_to_upper_bound:
possible_positions = np.array([0])
else:
possible_positions = np.arange(move_event_to_lower_bound, move_event_to_upper_bound)
possible_sequences = np.zeros((len(possible_positions), N))
possible_likelihood = np.zeros((len(possible_positions), 1))
possible_p_perm_k = np.zeros((M, N + 1, len(possible_positions)))
for index in range(len(possible_positions)):
current_sequence = S_opt[s]
#choose a position in the sequence to move an event to
move_event_to = possible_positions[index]
# move this event in its new position
current_sequence = np.delete(current_sequence, move_event_from, 0) # this is different to the Matlab version, which call current_sequence(move_event_from) = []
new_sequence = np.concatenate([current_sequence[np.arange(move_event_to)], [selected_event], current_sequence[np.arange(move_event_to, N - 1)]])
possible_sequences[index, :] = new_sequence
possible_p_perm_k[:, :, index] = self._calculate_likelihood_stage(sustainData, new_sequence)
p_perm_k[:, :, s] = possible_p_perm_k[:, :, index]
total_prob_stage = np.sum(p_perm_k * f_val_mat, 2)
total_prob_subj = np.sum(total_prob_stage, 1)
possible_likelihood[index] = np.sum(np.log(total_prob_subj + 1e-250))
possible_likelihood = possible_likelihood.reshape(possible_likelihood.shape[0])
max_likelihood = max(possible_likelihood)
this_S = possible_sequences[possible_likelihood == max_likelihood, :]
this_S = this_S[0, :]
S_opt[s] = this_S
this_p_perm_k = possible_p_perm_k[:, :, possible_likelihood == max_likelihood]
p_perm_k[:, :, s] = this_p_perm_k[:, :, 0]
S_opt[s] = this_S
p_perm_k_weighted = p_perm_k * f_val_mat
#adding 1e-250 fixes divide by zero problem that happens rarely
#p_perm_k_norm = p_perm_k_weighted / np.tile(np.sum(np.sum(p_perm_k_weighted, 1), 1).reshape(M, 1, 1), (1, N + 1, N_S)) # the second summation axis is different to Matlab version
p_perm_k_norm = p_perm_k_weighted / np.sum(p_perm_k_weighted + 1e-250, axis=(1, 2), keepdims=True)
f_opt = (np.squeeze(sum(sum(p_perm_k_norm))) / sum(sum(sum(p_perm_k_norm)))).reshape(N_S, 1, 1)
f_val_mat = np.tile(f_opt, (1, N + 1, M))
f_val_mat = np.transpose(f_val_mat, (2, 1, 0))
f_opt = f_opt.reshape(N_S)
total_prob_stage = np.sum(p_perm_k * f_val_mat, 2)
total_prob_subj = np.sum(total_prob_stage, 1)
likelihood_opt = np.sum(np.log(total_prob_subj + 1e-250))
return S_opt, f_opt, likelihood_opt
def _perform_mcmc(self, sustainData, seq_init, f_init, n_iterations, seq_sigma, f_sigma):
# Take MCMC samples of the uncertainty in the SuStaIn model parameters
N = self.stage_zscore.shape[1]
N_S = seq_init.shape[0]
if isinstance(f_sigma, float): # FIXME: hack to enable multiplication
f_sigma = np.array([f_sigma])
samples_sequence = np.zeros((N_S, N, n_iterations))
samples_f = np.zeros((N_S, n_iterations))
samples_likelihood = np.zeros((n_iterations, 1))
samples_sequence[:, :, 0] = seq_init # don't need to copy as we don't write to 0 index
samples_f[:, 0] = f_init
# Reduce frequency of tqdm update to 0.1% of total for larger iteration numbers
tqdm_update_iters = int(n_iterations/1000) if n_iterations > 100000 else None
for i in tqdm(range(n_iterations), "MCMC Iteration", n_iterations, miniters=tqdm_update_iters):
if i > 0:
seq_order = self.global_rng.permutation(N_S) # this function returns different random numbers to Matlab
for s in seq_order:
move_event_from = int(np.ceil(N * self.global_rng.random())) - 1
current_sequence = samples_sequence[s, :, i - 1]
current_location = np.array([0] * N)
current_location[current_sequence.astype(int)] = np.arange(N)
selected_event = int(current_sequence[move_event_from])
this_stage_zscore = self.stage_zscore[0, selected_event]
selected_biomarker = self.stage_biomarker_index[0, selected_event]
possible_zscores_biomarker = self.stage_zscore[self.stage_biomarker_index == selected_biomarker]
# slightly different conditional check to matlab version to protect python from calling min,max on an empty array
min_filter = possible_zscores_biomarker < this_stage_zscore
max_filter = possible_zscores_biomarker > this_stage_zscore
events = np.array(range(N))
if np.any(min_filter):
min_zscore_bound = max(possible_zscores_biomarker[min_filter])
min_zscore_bound_event = events[((self.stage_zscore[0] == min_zscore_bound).astype(int) + (self.stage_biomarker_index[0] == selected_biomarker).astype(int)) == 2]
move_event_to_lower_bound = current_location[min_zscore_bound_event] + 1
else:
move_event_to_lower_bound = 0
if np.any(max_filter):
max_zscore_bound = min(possible_zscores_biomarker[max_filter])
max_zscore_bound_event = events[((self.stage_zscore[0] == max_zscore_bound).astype(int) + (self.stage_biomarker_index[0] == selected_biomarker).astype(int)) == 2]
move_event_to_upper_bound = current_location[max_zscore_bound_event]
else:
move_event_to_upper_bound = N
# FIXME: hack because python won't produce an array in range (N,N), while matlab will produce an array (N)... urgh
if move_event_to_lower_bound == move_event_to_upper_bound:
possible_positions = np.array([0])
else:
possible_positions = np.arange(move_event_to_lower_bound, move_event_to_upper_bound)
distance = possible_positions - move_event_from
if isinstance(seq_sigma, int): # FIXME: change to float
this_seq_sigma = seq_sigma
else:
this_seq_sigma = seq_sigma[s, selected_event]
# use own normal PDF because stats.norm is slow
weight = AbstractSustain.calc_coeff(this_seq_sigma) * AbstractSustain.calc_exp(distance, 0., this_seq_sigma)
weight /= np.sum(weight)
index = self.global_rng.choice(range(len(possible_positions)), 1, replace=True, p=weight) # FIXME: difficult to check this because random.choice is different to Matlab randsample
move_event_to = possible_positions[index]
current_sequence = np.delete(current_sequence, move_event_from, 0)
new_sequence = np.concatenate([current_sequence[np.arange(move_event_to)], [selected_event], current_sequence[np.arange(move_event_to, N - 1)]])
samples_sequence[s, :, i] = new_sequence
new_f = samples_f[:, i - 1] + f_sigma * self.global_rng.standard_normal()
new_f = (np.fabs(new_f) / np.sum(np.fabs(new_f)))
samples_f[:, i] = new_f
S = samples_sequence[:, :, i]
f = samples_f[:, i]
likelihood_sample, _, _, _, _ = self._calculate_likelihood(sustainData, S, f)
samples_likelihood[i] = likelihood_sample
if i > 0:
ratio = np.exp(samples_likelihood[i] - samples_likelihood[i - 1])
if ratio < self.global_rng.random():
samples_likelihood[i] = samples_likelihood[i - 1]
samples_sequence[:, :, i] = samples_sequence[:, :, i - 1]
samples_f[:, i] = samples_f[:, i - 1]
perm_index = np.where(samples_likelihood == max(samples_likelihood))
perm_index = perm_index[0]
ml_likelihood = max(samples_likelihood)
ml_sequence = samples_sequence[:, :, perm_index]
ml_f = samples_f[:, perm_index]
return ml_sequence, ml_f, ml_likelihood, samples_sequence, samples_f, samples_likelihood
def _plot_sustain_model(self, samples_sequence, samples_f, n_samples, cval=False, subtype_order=None, biomarker_order=None, title_font_size=10):
if subtype_order is None:
subtype_order = self._plot_subtype_order
#biomarker_order currently unused here
colour_mat = np.array([[1, 0, 0], [1, 0, 1], [0, 0, 1]]) #, [0.5, 0, 1], [0, 1, 1]])
temp_mean_f = np.mean(samples_f, 1)
vals = np.sort(temp_mean_f)[::-1]
vals = np.array([np.round(x * 100.) for x in vals]) / 100.
#ix = np.argsort(temp_mean_f)[::-1]
N_S = samples_sequence.shape[0]
N_bio = len(self.biomarker_labels)
if N_S == 1:
fig, ax = plt.subplots()
total_axes = 1
elif N_S < 3:
fig, ax = plt.subplots(1, N_S)
total_axes = N_S
elif N_S < 7:
fig, ax = plt.subplots(2, int(np.ceil(N_S / 2)))
total_axes = 2 * int(np.ceil(N_S / 2))
else:
fig, ax = plt.subplots(3, int(np.ceil(N_S / 3)))
total_axes = 3 * int(np.ceil(N_S / 3))
for i in range(total_axes): #range(N_S):
if i not in range(N_S):
ax.flat[i].set_axis_off()
continue
this_samples_sequence = samples_sequence[subtype_order[i],:,:].T
markers = np.unique(self.stage_biomarker_index)
N = this_samples_sequence.shape[1]
confus_matrix = np.zeros((N, N))
for j in range(N):
confus_matrix[j, :] = sum(this_samples_sequence == j)
confus_matrix /= float(this_samples_sequence.shape[0])
zvalues = np.unique(self.stage_zscore)
N_z = len(zvalues)
confus_matrix_z = np.zeros((N_bio, N, N_z))
for z in range(N_z):
confus_matrix_z[self.stage_biomarker_index[self.stage_zscore == zvalues[z]], :, z] = confus_matrix[(self.stage_zscore == zvalues[z])[0],:]
confus_matrix_c = np.ones((N_bio, N, 3))
for z in range(N_z):
this_confus_matrix = confus_matrix_z[:, :, z]
this_colour = colour_mat[z, :]
alter_level = this_colour == 0
this_colour_matrix = np.zeros((N_bio, N, 3))
this_colour_matrix[:, :, alter_level] = np.tile(this_confus_matrix[markers, :].reshape(N_bio, N, 1), (1, 1, sum(alter_level)))
confus_matrix_c = confus_matrix_c - this_colour_matrix
TITLE_FONT_SIZE = title_font_size
X_FONT_SIZE = 10 #8
Y_FONT_SIZE = 10 #7
if cval == False:
if n_samples != np.inf:
title_i = 'Subtype ' + str(i+1) + ' (f=' + str(vals[i]) + r', n=' + str(int(np.round(vals[i] * n_samples))) + ')'
else:
title_i = 'Subtype ' + str(i+1) + ' (f=' + str(vals[i]) + ')'
else:
title_i = 'Subtype ' + str(i+1) + ' cross-validated'
# must be a smarter way of doing this, but subplots(1,1) doesn't produce an array...
if N_S > 1:
ax_i = ax.flat[i] #ax[i]
ax_i.imshow(confus_matrix_c, interpolation='nearest') #, cmap=plt.cm.Blues)
ax_i.set_xticks(np.arange(N))
ax_i.set_xticklabels(range(1, N+1), rotation=45, fontsize=X_FONT_SIZE)
ax_i.set_yticks(np.arange(N_bio))
ax_i.set_yticklabels([]) #['']* N_bio)
if i == 0:
ax_i.set_yticklabels(np.array(self.biomarker_labels, dtype='object'), ha='right', fontsize=Y_FONT_SIZE)
for tick in ax_i.yaxis.get_major_ticks():
tick.label.set_color('black')
#ax[i].set_ylabel('Biomarker name') #, fontsize=20)
ax_i.set_xlabel('SuStaIn stage', fontsize=X_FONT_SIZE)
ax_i.set_title(title_i, fontsize=TITLE_FONT_SIZE)
else: #**** first plot
ax.imshow(confus_matrix_c) #, interpolation='nearest')#, cmap=plt.cm.Blues) #[...,::-1]
ax.set_xticks(np.arange(N))
ax.set_xticklabels(range(1, N+1), rotation=45, fontsize=X_FONT_SIZE)
ax.set_yticks(np.arange(N_bio))
ax.set_yticklabels(np.array(self.biomarker_labels, dtype='object'), ha='right', fontsize=Y_FONT_SIZE)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_color('black')
ax.set_xlabel('SuStaIn stage', fontsize=X_FONT_SIZE)
ax.set_title(title_i, fontsize=TITLE_FONT_SIZE)
plt.tight_layout()
#if cval:
# fig.suptitle('Cross validation')
return fig, ax
def subtype_and_stage_individuals_newData(self, data_new, samples_sequence, samples_f, N_samples):
numStages_new = self.__sustainData.getNumStages() #data_new.shape[1]
sustainData_newData = ZScoreSustainData(data_new, numStages_new)
ml_subtype, \
prob_ml_subtype, \
ml_stage, \
prob_ml_stage, \
prob_subtype, \
prob_stage, \
prob_subtype_stage = self.subtype_and_stage_individuals(sustainData_newData, samples_sequence, samples_f, N_samples)
return ml_subtype, prob_ml_subtype, ml_stage, prob_ml_stage, prob_subtype, prob_stage, prob_subtype_stage
# ********************* STATIC METHODS
@staticmethod
def linspace_local2(a, b, N, arange_N):
return a + (b - a) / (N - 1.) * arange_N
# ********************* TEST METHODS
@classmethod
def test_sustain(cls, n_biomarkers, n_samples, n_subtypes,
ground_truth_subtypes, sustain_kwargs, seed=42):
# Set a global seed to propagate
np.random.seed(seed)
# Create Z values
Z_vals = np.tile(np.arange(1, 4), (n_biomarkers, 1))
Z_vals[0, 2] = 0
Z_max = np.full((n_biomarkers,), 5)
Z_max[2] = 2
ground_truth_sequences = cls.generate_random_model(Z_vals, n_subtypes)
N_stages = np.sum(Z_vals > 0) + 1
ground_truth_stages_control = np.zeros((int(np.round(n_samples * 0.25)), 1))
ground_truth_stages_other = np.random.randint(1, N_stages+1, (int(np.round(n_samples * 0.75)), 1))
ground_truth_stages = np.vstack((ground_truth_stages_control, ground_truth_stages_other)).astype(int)
data, data_denoised, stage_value = cls.generate_data(
ground_truth_subtypes,
ground_truth_stages,
ground_truth_sequences,
Z_vals,
Z_max
)
return cls(
data, Z_vals, Z_max,
**sustain_kwargs
)
@staticmethod
def generate_random_model(Z_vals, N_S, seed=None):
num_biomarkers = Z_vals.shape[0]
stage_zscore = Z_vals.T.flatten()#[np.newaxis, :]
IX_select = np.nonzero(stage_zscore)[0]
stage_zscore = stage_zscore[IX_select]#[np.newaxis, :]
num_zscores = Z_vals.shape[0]
stage_biomarker_index = np.tile(np.arange(num_biomarkers), (num_zscores,))
stage_biomarker_index = stage_biomarker_index[IX_select]#[np.newaxis, :]
N = stage_zscore.shape[0]
S = np.zeros((N_S, N))
# Moved outside loop, no need
possible_biomarkers = np.unique(stage_biomarker_index)
for s in range(N_S):
for i in range(N):
IS_min_stage_zscore = np.full(N, False)
for j in possible_biomarkers:
IS_unselected = np.full(N, False)
# I have no idea what purpose this serves, so leaving for now
for k in set(range(N)) - set(S[s][:i]):
IS_unselected[k] = True
this_biomarkers = np.logical_and(
stage_biomarker_index == possible_biomarkers[j],
np.array(IS_unselected) == 1
)
if not np.any(this_biomarkers):
this_min_stage_zscore = 0
else:
this_min_stage_zscore = np.min(stage_zscore[this_biomarkers])
if this_min_stage_zscore:
IS_min_stage_zscore[np.logical_and(
this_biomarkers,
stage_zscore == this_min_stage_zscore
)] = True
events = np.arange(N)
possible_events = events[IS_min_stage_zscore]
this_index = np.ceil(np.random.rand() * len(possible_events)) - 1
S[s][i] = possible_events[int(this_index)]
return S
# TODO: Refactor this as above
@staticmethod
def generate_data(subtypes, stages, gt_ordering, Z_vals, Z_max):
B = Z_vals.shape[0]
stage_zscore = np.array([y for x in Z_vals.T for y in x])
stage_zscore = stage_zscore.reshape(1,len(stage_zscore))
IX_select = stage_zscore>0
stage_zscore = stage_zscore[IX_select]
stage_zscore = stage_zscore.reshape(1,len(stage_zscore))
num_zscores = Z_vals.shape[1]
IX_vals = np.array([[x for x in range(B)]] * num_zscores).T
stage_biomarker_index = np.array([y for x in IX_vals.T for y in x])
stage_biomarker_index = stage_biomarker_index.reshape(1,len(stage_biomarker_index))
stage_biomarker_index = stage_biomarker_index[IX_select]
stage_biomarker_index = stage_biomarker_index.reshape(1,len(stage_biomarker_index))
min_biomarker_zscore = [0]*B
max_biomarker_zscore = Z_max
std_biomarker_zscore = [1]*B
N = stage_biomarker_index.shape[1]
N_S = gt_ordering.shape[0]
possible_biomarkers = np.unique(stage_biomarker_index)
stage_value = np.zeros((B,N+2,N_S))
for s in range(N_S):
S = gt_ordering[s,:]
S_inv = np.array([0]*N)
S_inv[S.astype(int)] = np.arange(N)
for i in range(B):
b = possible_biomarkers[i]
event_location = np.concatenate([[0], S_inv[(stage_biomarker_index == b)[0]], [N]])
event_value = np.concatenate([[min_biomarker_zscore[i]], stage_zscore[stage_biomarker_index == b], [max_biomarker_zscore[i]]])
for j in range(len(event_location)-1):
if j == 0: # FIXME: nasty hack to get Matlab indexing to match up - necessary here because indices are used for linspace limits
index = np.arange(event_location[j],event_location[j+1]+2)
stage_value[i,index,s] = np.linspace(event_value[j],event_value[j+1],event_location[j+1]-event_location[j]+2)
else:
index = np.arange(event_location[j] + 1, event_location[j + 1] + 2)
stage_value[i,index,s] = np.linspace(event_value[j],event_value[j+1],event_location[j+1]-event_location[j]+1)
M = stages.shape[0]
data_denoised = np.zeros((M,B))
for m in range(M):
data_denoised[m,:] = stage_value[:,int(stages[m]),subtypes[m]]
data = data_denoised + norm.ppf(np.random.rand(B,M).T)*np.tile(std_biomarker_zscore,(M,1))
return data, data_denoised, stage_value
| 53.916435
| 217
| 0.535725
|
d7cd7de94d6bdf7c759ce0366f5c364dde3a12f5
| 4,365
|
py
|
Python
|
project/lib/i18n.py
|
feilaoda/FlickBoard
|
21e6364117e336f4eb60d83f496d9fc1cb2784ae
|
[
"MIT"
] | 2
|
2016-07-21T08:52:30.000Z
|
2017-06-15T06:31:30.000Z
|
project/lib/i18n.py
|
feilaoda/FlickBoard
|
21e6364117e336f4eb60d83f496d9fc1cb2784ae
|
[
"MIT"
] | null | null | null |
project/lib/i18n.py
|
feilaoda/FlickBoard
|
21e6364117e336f4eb60d83f496d9fc1cb2784ae
|
[
"MIT"
] | null | null | null |
from lib.config import Config
from lib.cache import SimpleCache
import gettext
#from lib.app.geoip import GeoIP
class TranslationMixin(object):
"""Translation mixin class for support i18n by using methods from gettext library."""
def get_lang_by_sid(self, sid):
"""
Return user language code by sid.
@type sid: C{str}
@param sid: The session id.
@rtype: C{str}
@return: The language code.
"""
#need method to get lang from session
if sid == 'zh-CN':
lang = 'zh_CN'
elif sid == 'zh-TW':
lang = 'zh_TW'
else:
lang = 'en'
return lang
def get_lang_opts(self):
"""
Return language options for "gettext.translation" method.
@rtype: C{dict}
@return: The language options - "{'domain':str,'localedir':str,'languages':list}".
"""
sid = '1234561'
user_lang = self.get_lang_by_sid(sid)
cfg = self.get_lang_cfg()
default_lang = cfg['default_lang']
languages = cfg['languages']
if not user_lang:
header = self.getHeader('accept-language')
if header:
lst = header.split(',')
user_lang = []
if len(lst) > 1:
for lang in lst:
if not lang[0:2] in user_lang and lang[0:2] in languages:
user_lang.append(lang[0:2])
if len(user_lang) == 0:
user_lang = default_lang
else:
user_lang = None
if not user_lang:
ip = self.getClientIP()
country = None
#gip = GeoIP.instance()
#country = gip.countryCodeByIp(ip)
if country:
conf = Config()
if country in conf['app']['country_lang']:
user_lang = conf['app']['country_lang'][country]
else:
user_lang = default_lang
else:
user_lang = default_lang
domain = cfg['domain']
localedir = cfg['localedir']
if isinstance(user_lang,list):
languages = user_lang
else:
if user_lang in languages:
languages = sorted(languages, key=lambda l: l!=user_lang)
else:
languages = sorted(languages, key=lambda l: l!=default_lang)
lang_opts = {'domain':domain,'localedir':localedir,'languages':languages}
return lang_opts
def set_user_lang (self, lang):
#have to write lang to session
self.user_lang = lang
def _(self, str):
"""
Return the localized translation of message as a Unicode string, based
on the current global domain, language, and locale directory.
@rtype: C{str}
@return: The message as a Unicode string.
"""
#need get sid from session
sid = '123456'
user_lang = self.get_lang_by_sid(sid)
sc = SimpleCache()
translation = sc.get('gettext.%s' % user_lang,None)
if not translation:
lang_opts = self.get_lang_opts()
translation = gettext.translation(lang_opts['domain'], localedir=lang_opts['localedir'],languages=lang_opts['languages'], codeset='utf-8')
user_lang = lang_opts['languages'][0]
sc.set('gettext.%s' % user_lang,translation)
return translation.ugettext(str)
def get_lang_cfg(self):
"""
Return default application configuration for translation methods.
@rtype: C{dict}
@return: The configuration - "{'domain':str,'default_lang':str,
'localedir':str,'languages':list}".
"""
cfg = Config()
domain = cfg['app']['domain']
default_lang = cfg['app']['default_lang']
localedir = '/'.join([cfg['pathes']['base'],cfg['app']['localedir']])
languages = cfg['app']['languages']
return {'domain':domain, 'default_lang':default_lang, 'localedir':localedir,'languages':languages}
| 33.576923
| 150
| 0.520733
|
a1571c4b35cdf6d8acfc67e1718d9c4dcbc9a31b
| 6,041
|
py
|
Python
|
test_imp_rewriter.py
|
cshorler/py3_import_rewriter
|
57d3b4121fc7c245fa2442c276b466a1b5896344
|
[
"Python-2.0"
] | null | null | null |
test_imp_rewriter.py
|
cshorler/py3_import_rewriter
|
57d3b4121fc7c245fa2442c276b466a1b5896344
|
[
"Python-2.0"
] | null | null | null |
test_imp_rewriter.py
|
cshorler/py3_import_rewriter
|
57d3b4121fc7c245fa2442c276b466a1b5896344
|
[
"Python-2.0"
] | null | null | null |
#
# Copyright (c) 2018 - Chris HORLER
# License: Python Software Foundation V2 [https://opensource.org/licenses/Python-2.0]
#
import ast
import unittest
from itertools import zip_longest
import imp_rewriter
class ImportRewriterTests(unittest.TestCase):
def ast_eq(self, node1, node2, msg=None):
"""https://stackoverflow.com/a/19598419 (improved)"""
if type(node1) is not type(node2):
raise self.failureException(f'{node1} != {node2}, {msg}')
if isinstance(node1, ast.AST):
for k, v in vars(node1).items():
if k in ('lineno', 'col_offset', 'ctx'):
continue
if not self.ast_eq(v, getattr(node2, k), msg):
raise self.failureException(f'{node1} != {node2}, {msg}')
return True
elif isinstance(node1, list):
return all(self.ast_eq(n1, n2, msg) for n1, n2 in zip_longest(node1, node2))
elif node1 != node2:
raise self.failureException(f'{node1} != {node2}, {msg}')
else:
return True
def setUp(self):
self.addTypeEqualityFunc(ast.Module, self.ast_eq)
def test_basic_import(self):
mod_ref = ast.parse('import dummy', '<STRING>', 'exec')
mod_exp = ast.parse('import readline', '<STRING>', 'exec')
imp_rewriter.RewriteImport(from_mod='dummy', to_mod='readline').visit(mod_ref)
ast.fix_missing_locations(mod_ref)
self.assertEqual(mod_ref, mod_exp, msg='AST transform failed')
def test_multi_import(self):
mod_ref = ast.parse('import dummy1, dummy2, dummy3', '<STRING>', 'exec')
mod_exp = ast.parse('import readline\nimport dummy1, dummy3', '<STRING>', 'exec')
imp_rewriter.RewriteImport(from_mod='dummy2', to_mod='readline').visit(mod_ref)
ast.fix_missing_locations(mod_ref)
self.assertEqual(mod_ref, mod_exp, msg='AST transform failed')
def test_alias_basic_import(self):
mod_ref = ast.parse('import dummy as magic_module', '<STRING>', 'exec')
mod_exp = ast.parse('import readline as magic_module', '<STRING>', 'exec')
imp_rewriter.RewriteImport(from_mod='dummy', to_mod='readline').visit(mod_ref)
ast.fix_missing_locations(mod_ref)
self.assertEqual(mod_ref, mod_exp, msg='AST transform failed')
def test_alias_multi_import(self):
mod_ref = ast.parse('import dummy1 as d1, dummy2 as d2, dummy3 as d3', '<STRING>', 'exec')
mod_exp = ast.parse('import readline as d2\nimport dummy1 as d1, dummy3 as d3', '<STRING>', 'exec')
imp_rewriter.RewriteImport(from_mod='dummy2', to_mod='readline').visit(mod_ref)
ast.fix_missing_locations(mod_ref)
self.assertEqual(mod_ref, mod_exp, msg='AST transform failed')
def test_basic_importfrom(self):
mod_ref = ast.parse('from dummy import magic', '<STRING>', 'exec')
mod_exp = ast.parse('from rl import readline', '<STRING>', 'exec')
imp_rewriter.RewriteImport(from_mod='dummy', from_id='magic',
to_mod='rl', to_id='readline').visit(mod_ref)
ast.fix_missing_locations(mod_ref)
self.assertEqual(mod_ref, mod_exp, msg='AST transform failed')
def test_multi_importfrom(self):
mod_ref = ast.parse('from dummy import magic1, magic2, magic3', '<STRING>', 'exec')
mod_exp = ast.parse('from rl import readline\nfrom dummy import magic1, magic3', '<STRING>', 'exec')
imp_rewriter.RewriteImport(from_mod='dummy', from_id='magic2', to_mod='rl', to_id='readline').visit(mod_ref)
ast.fix_missing_locations(mod_ref)
self.assertEqual(mod_ref, mod_exp, msg='AST transform failed')
def test_alias_basic_importfrom(self):
mod_ref = ast.parse('from dummy import magic1 as m1', '<STRING>', 'exec')
mod_exp = ast.parse('from readline import magic1 as m1', '<STRING>', 'exec')
imp_rewriter.RewriteImport(from_mod='dummy', to_mod='readline').visit(mod_ref)
ast.fix_missing_locations(mod_ref)
self.assertEqual(mod_ref, mod_exp, msg='AST transform failed')
def test_alias_multi_importfrom(self):
mod_ref = ast.parse('from dummy import magic1 as m1, magic2 as m2, magic3 as m3', '<STRING>', 'exec')
mod_exp = ast.parse('from rl import readline as m2\nfrom dummy import magic1 as m1, magic3 as m3',
'<STRING>', 'exec')
imp_rewriter.RewriteImport(from_mod='dummy', from_id='magic2', to_mod='rl', to_id='readline').visit(mod_ref)
ast.fix_missing_locations(mod_ref)
self.assertEqual(mod_ref, mod_exp, msg='AST transform failed')
def test_transform_import_to_importfrom(self):
mod_ref = ast.parse('import readline', '<STRING>', 'exec')
mod_exp = ast.parse('from rl import readline', '<STRING>', 'exec')
imp_rewriter.RewriteImport(from_mod='readline', to_mod='rl', to_id='readline').visit(mod_ref)
ast.fix_missing_locations(mod_ref)
self.assertEqual(mod_ref, mod_exp, msg='AST transform failed')
def test_transform_importfrom_to_import(self):
mod_ref = ast.parse('from rl import readline', '<STRING>', 'exec')
mod_exp = ast.parse('import readline', '<STRING>', 'exec')
imp_rewriter.RewriteImport(from_mod='rl', from_id='readline', to_mod='readline').visit(mod_ref)
ast.fix_missing_locations(mod_ref)
self.assertEqual(mod_ref, mod_exp, msg='AST transform failed')
def test_transform_multi_import_to_importfrom(self):
mod_ref = ast.parse('import readline, sys, io', '<STRING>', 'exec')
mod_exp = ast.parse('from rl import readline\nimport sys, io', '<STRING>', 'exec')
imp_rewriter.RewriteImport(from_mod='readline', to_mod='rl', to_id='readline').visit(mod_ref)
ast.fix_missing_locations(mod_ref)
self.assertEqual(mod_ref, mod_exp, msg='AST transform failed')
if __name__ == '__main__':
unittest.main()
| 51.632479
| 116
| 0.651879
|
3130e052758b750ec66b4e9377b3e35f7e13f1f3
| 507
|
py
|
Python
|
app/model/entity/estado.py
|
UniversidadeDeVassouras/labproghiper-2020.1-JoaoMarcosGomes-p1
|
89c4af4ef99c12edb7fec30ade6eba0b47412856
|
[
"Apache-2.0"
] | null | null | null |
app/model/entity/estado.py
|
UniversidadeDeVassouras/labproghiper-2020.1-JoaoMarcosGomes-p1
|
89c4af4ef99c12edb7fec30ade6eba0b47412856
|
[
"Apache-2.0"
] | null | null | null |
app/model/entity/estado.py
|
UniversidadeDeVassouras/labproghiper-2020.1-JoaoMarcosGomes-p1
|
89c4af4ef99c12edb7fec30ade6eba0b47412856
|
[
"Apache-2.0"
] | null | null | null |
class Estado:
def __init__(self, id, nome, sigla, iconeEstado, listaNoticias):
self._id = id
self._nome = nome
self._sigla = sigla
self._iconeEstado = iconeEstado
self._listaNoticias = listaNoticias
def getId(self):
return self._id
def getNome(self):
return self._nome
def getSigla(self):
return self._sigla
def getIcone(self):
return self._iconeEstado
def getNewsList(self):
return self._listaNoticias
| 28.166667
| 68
| 0.631164
|
6712b0c1992f2e64bd0747fa79bc19611460e17b
| 309
|
py
|
Python
|
libnd4j/include/graph/generated/nd4j/graph/UIEventSubtype.py
|
mjlorenzo305/deeplearning4j
|
a1fcc5f19f0f637e83252b00982b3f12b401f679
|
[
"Apache-2.0"
] | 13,006
|
2015-02-13T18:35:31.000Z
|
2022-03-18T12:11:44.000Z
|
libnd4j/include/graph/generated/nd4j/graph/UIEventSubtype.py
|
pxiuqin/deeplearning4j
|
e11ddf3c24d355b43d36431687b807c8561aaae4
|
[
"Apache-2.0"
] | 5,319
|
2015-02-13T08:21:46.000Z
|
2019-06-12T14:56:50.000Z
|
libnd4j/include/graph/generated/nd4j/graph/UIEventSubtype.py
|
pxiuqin/deeplearning4j
|
e11ddf3c24d355b43d36431687b807c8561aaae4
|
[
"Apache-2.0"
] | 4,719
|
2015-02-13T22:48:55.000Z
|
2022-03-22T07:25:36.000Z
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: graph
class UIEventSubtype(object):
NONE = 0
EVALUATION = 1
LOSS = 2
LEARNING_RATE = 3
TUNING_METRIC = 4
PERFORMANCE = 5
PROFILING = 6
FEATURE_LABEL = 7
PREDICTION = 8
USER_CUSTOM = 9
| 18.176471
| 68
| 0.660194
|
1041ae63887f91aa36973f8cde9f756e689327f9
| 9,383
|
py
|
Python
|
examples/ui.py
|
Slater-Victoroff/pyjaco
|
89c4e3c46399c5023b0e160005d855a01241c58a
|
[
"MIT"
] | 38
|
2015-01-01T18:08:59.000Z
|
2022-02-18T08:57:27.000Z
|
examples/ui.py
|
dusty-phillips/pyjaco
|
066895ae38d1828498e529c1875cb88df6cbc54d
|
[
"MIT"
] | 1
|
2020-07-15T13:30:32.000Z
|
2020-07-15T13:30:32.000Z
|
examples/ui.py
|
Slater-Victoroff/pyjaco
|
89c4e3c46399c5023b0e160005d855a01241c58a
|
[
"MIT"
] | 12
|
2016-03-07T09:30:49.000Z
|
2021-09-05T20:38:47.000Z
|
import inspect
import pyjaco
from pyjaco.decorator import JSVar
@JSVar("items")
def get_toolbar():
items = [
{"text":'File', "menu": [
{"text": 'Open...'},
{"text": 'Save...'},
'-',
{"text": 'Close'}
]},
{"text":'Edit', "menu": [
{'text': 'Undo'},
{'text': 'Redo'},
'-',
{'text': 'Copy'},
'-',
{'text': 'Delete selected objects'},
'-',
{'text': 'Options'},
]},
{"text":'View', "menu": [
{'text': 'Zoom best fit'},
{'text': 'Zoom region'},
{'text': 'Zoom in'},
{'text': 'Zoom out'},
'-',
{'text': 'Fullscreen mode'},
'-',
{'text': 'Scene properties'},
]},
{"text":'Problem', "menu": [
{'text': 'Operate on nodes'},
{'text': 'Operate on edges'},
{'text': 'Operate on labels'},
{'text': 'Postprocessor'},
"-",
{'text': 'Add'},
"-",
{'text': 'Select region'},
{'text': 'Transform'},
'-',
{'text': 'Local Values'},
{'text': 'Surface Integrals'},
{'text': 'Volume Integrals'},
{'text': 'Select by marker'},
]},
{"text":'Tools', "menu": [
{'text': 'Chart'},
"-",
{'text': 'Script editor'},
{'text': 'Run script...'},
{'text': 'Run command...'},
'-',
{'text': 'Report...'},
{'text': 'Create video...'},
]},
{"text":'Help', "menu": (
{'text': 'Help', 'handler': menu_help},
'-',
{'text': 'About Mesh Editor', 'handler': menu_about},
)},
]
Toolbar({"renderTo": 'mesh-editor', "items": items})
items = [
{ "icon": 'http://www.extjs.com/deploy/dev/examples/menu/list-items.gif', "cls": 'x-btn-icon',
"handler": toolbar_mesh1,
"tooltip": '<b>Draw Mesh I</b><br/>Show an example mesh' },
{ "icon": 'http://www.extjs.com/deploy/dev/examples/menu/list-items.gif', "cls": 'x-btn-icon',
"handler": toolbar_mesh2,
"tooltip": '<b>Draw Mesh II</b><br/>Show an example mesh' },
{ "icon": 'http://www.extjs.com/deploy/dev/examples/menu/list-items.gif', "cls": 'x-btn-icon',
"handler": toolbar_mesh3,
"tooltip": '<b>Draw Mesh III</b><br/>Show an example mesh' },
]
Toolbar({"renderTo": 'mesh-editor', "items": items})
@JSVar("items")
def get_panel():
items = {
"renderTo": 'mesh-editor',
"width": '200px',
"title": 'Mesh',
"html": "<canvas id='canvas' width='200' height='200'></canvas>",
"collapsible": true
}
p = Panel(items)
return p
def toolbar_mesh1(b, e):
canvas = Canvas('canvas')
canvas.fillStyle = 'rgb(255, 255, 255)'
canvas.fillRect(0, 0, 200, 200)
canvas.fillStyle = 'rgb(29, 65, 119)'
canvas.fillText("Mesh I", 80, 10)
canvas.strokeStyle = 'rgb(0, 255, 0)'
canvas.beginPath()
canvas.moveTo(10, 10)
canvas.lineTo(20, 50)
canvas.lineTo(50, 20)
canvas.lineTo(100, 100)
canvas.lineTo(10, 10)
canvas.stroke()
def toolbar_mesh2(b, e):
canvas = Canvas('canvas')
canvas.fillStyle = 'rgb(255, 255, 255)'
canvas.fillRect(0, 0, 200, 200)
canvas.fillStyle = 'rgb(29, 65, 119)'
canvas.fillText("Mesh II", 80, 10)
canvas.strokeStyle = 'rgb(255, 0, 0)'
canvas.beginPath()
canvas.moveTo(100, 100)
canvas.lineTo(200, 50)
canvas.lineTo(50, 20)
canvas.lineTo(100, 100)
canvas.lineTo(100, 10)
canvas.stroke()
def toolbar_mesh3(b, e):
canvas = Canvas('canvas')
canvas.fillStyle = 'rgb(255, 255, 255)'
canvas.fillRect(0, 0, 200, 200)
canvas.fillStyle = 'rgb(29, 65, 119)'
canvas.fillText("Mesh III", 80, 10)
canvas.strokeStyle = 'rgb(0, 0, 255)'
canvas.beginPath()
canvas.moveTo(50, 50)
canvas.lineTo(100, 180)
canvas.lineTo(20, 180)
canvas.lineTo(20, 100)
canvas.lineTo(50, 50)
canvas.stroke()
def menu_about(e, t):
info_box(js("About"), js("FEMhub Mesh Editor, (c) 2010 hp-FEM group at UNR"))
@JSVar("items")
def menu_help(e, t):
items = {
"activeTab": 2,
"width": 600,
"height": 250,
"plain": True,
"defaults": {"autoScroll": True},
"items":[{
"title": 'Introduction',
"html": "This is the mesh editor.<p/><br/>Browse the tabs for more help."
},{
"title": 'Mesh',
"html": "Create the mesh by adding points to the <b>canvas</b>."
},{
"title": 'Developing',
"html": "Documentation:<br/><a href='http://www.extjs.com/deploy/dev/docs/'>ExtJS</a><br/><a href='http://www.whatwg.org/specs/web-apps/current-work/multipage/the-canvas-element.html'>HTML5 Canvas</a>"
},{
"title": 'About',
"html": "Developed by the <a href='http://hpfem.org/'>hp-FEM group</a> at UNR."
}]
}
tabs2 = TabPanel(items)
items = {
"renderTo": 'mesh-editor-help',
"layout": 'fit',
"width": 500,
"height": 300,
"title": "Help",
"items": tabs2
}
w = Window(items)
w.show()
@JSVar("Ext")
def initialize():
Ext.get(document.body).update("<div id='mesh-editor'></div><div id='mesh-editor-help'></div>")
Ext.QuickTips.init()
get_toolbar()
get_panel()
#########################################################################
# End of the section that works both on the desktop and in JS.
# JS wrappers for Ext:
class ExtObject(object):
@JSVar("self._obj")
def __init__(self, args):
self._obj = _new(eval(js("Ext." + self.__class__.__name__)), js(args))
@JSVar("self._obj")
def _js_(self):
return self._obj
class Window(ExtObject):
@JSVar("self._obj")
def show(self):
self._obj.show()
class Panel(ExtObject):
pass
class TabPanel(ExtObject):
pass
class Toolbar(ExtObject):
pass
@JSVar("Ext")
def info_box(title, msg):
Ext.MessageBox.show({
"title": title,
"msg": msg,
"buttons": Ext.MessageBox.OK,
"animEl": 'mb9',
"icon": Ext.MessageBox.INFO,
})
class Canvas(object):
@JSVar("self._obj", "dom", "Ext", "G_vmlCanvasManager")
def __init__(self, id):
dom = Ext.getDom(js(id))
if js(Ext.isIE):
# This is needed for IE to emulate the canvas element:
G_vmlCanvasManager.initElement(dom)
self._obj = dom.getContext('2d')
self._obj.clearRect(0, 0, 200, 200)
@JSVar("self._obj")
def fillRect(self, x1, y1, w, h):
self._obj.fillStyle = js(self.fillStyle)
self._obj.fillRect(js(x1), js(y1), js(w), js(h))
@JSVar("self._obj")
def fillText(self, text, x, y):
self._obj.fillStyle = js(self.fillStyle)
self._obj.fillText(js(text), js(x), js(y))
@JSVar("self._obj")
def beginPath(self):
self._obj.strokeStyle = js(self.strokeStyle)
self._obj.beginPath()
@JSVar("self._obj")
def moveTo(self, x, y):
self._obj.moveTo(js(x), js(y))
@JSVar("self._obj")
def lineTo(self, x, y):
self._obj.lineTo(js(x), js(y))
@JSVar("self._obj")
def stroke(self):
self._obj.stroke()
##################################################
# Main code that translates the above to JS
def main():
funcs = [
ExtObject,
Window,
Panel,
TabPanel,
Toolbar,
info_box,
Canvas,
menu_about,
menu_help,
get_toolbar,
get_panel,
toolbar_mesh1,
toolbar_mesh2,
toolbar_mesh3,
initialize,
]
source = ""
for f in funcs:
source += inspect.getsource(f) + "\n"
js = pyjaco.compile_string(source)
print """\
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<!--[if IE]><script type="text/javascript" src="http://explorercanvas.googlecode.com/svn/trunk/excanvas.js"></script><![endif]-->
<link rel="stylesheet" type="text/css" href="http://www.extjs.com/deploy/dev/resources/css/ext-all.css">
<script type="text/javascript" src="http://www.extjs.com/deploy/dev/adapter/ext/ext-base.js"></script>
<script type="text/javascript" src="http://www.extjs.com/deploy/dev/ext-all.js"></script>
<script language="JavaScript" src="../py-builtins.js"></script>
<title id="page-title">Title</title>
<script type="text/javascript">
function _new(cls, args) { return new cls(args); }
%s
Ext.onReady(initialize);
</script>
</head>
<body></body>
</html>""" % (js)
if __name__ == "__main__":
main()
| 30.267742
| 217
| 0.496217
|
6cd946b1e06bd7961e17e15bdae4beb6950578aa
| 1,346
|
py
|
Python
|
examples/compensated-temperature.py
|
iohe/bme280-python
|
d59fb744ef64a2c74fa24ea67ee8cda9f48d7b3b
|
[
"MIT"
] | 44
|
2019-06-25T00:03:30.000Z
|
2022-03-25T03:04:44.000Z
|
examples/compensated-temperature.py
|
iohe/bme280-python
|
d59fb744ef64a2c74fa24ea67ee8cda9f48d7b3b
|
[
"MIT"
] | 17
|
2019-07-24T10:57:06.000Z
|
2022-02-13T10:28:22.000Z
|
examples/compensated-temperature.py
|
iohe/bme280-python
|
d59fb744ef64a2c74fa24ea67ee8cda9f48d7b3b
|
[
"MIT"
] | 18
|
2019-07-02T12:02:03.000Z
|
2021-11-12T07:40:34.000Z
|
#!/usr/bin/env python
import time
from bme280 import BME280
from subprocess import PIPE, Popen
try:
from smbus2 import SMBus
except ImportError:
from smbus import SMBus
print("""compensated-temperature.py - Use the CPU temperature to compensate temperature
readings from the BME280 sensor. Method adapted from Initial State's Enviro pHAT
review: https://medium.com/@InitialState/tutorial-review-enviro-phat-for-raspberry-pi-4cd6d8c63441
Press Ctrl+C to exit!
""")
# Initialise the BME280
bus = SMBus(1)
bme280 = BME280(i2c_dev=bus)
# Gets the CPU temperature in degrees C
def get_cpu_temperature():
process = Popen(['vcgencmd', 'measure_temp'], stdout=PIPE)
output, _error = process.communicate()
return float(output[output.index('=') + 1:output.rindex("'")])
factor = 0.6 # Smaller numbers adjust temp down, vice versa
smooth_size = 10 # Dampens jitter due to rapid CPU temp changes
cpu_temps = []
while True:
cpu_temp = get_cpu_temperature()
cpu_temps.append(cpu_temp)
if len(cpu_temps) > smooth_size:
cpu_temps = cpu_temps[1:]
smoothed_cpu_temp = sum(cpu_temps) / float(len(cpu_temps))
raw_temp = bme280.get_temperature()
comp_temp = raw_temp - ((smoothed_cpu_temp - raw_temp) / factor)
print("Compensated temperature: {:05.2f} *C".format(comp_temp))
time.sleep(1.0)
| 27.469388
| 98
| 0.726597
|
f881d9dba639cc00c4ad1626a8bfd92e7a0e3b1e
| 946
|
py
|
Python
|
hesiod/cfg/cfgparser.py
|
lykius/hesiod
|
091ba1b06cfa870133415fc1df6efdd8e50a2cfe
|
[
"MIT"
] | 19
|
2020-12-11T15:40:55.000Z
|
2022-01-17T16:55:13.000Z
|
hesiod/cfg/cfgparser.py
|
lykius/hesiod
|
091ba1b06cfa870133415fc1df6efdd8e50a2cfe
|
[
"MIT"
] | null | null | null |
hesiod/cfg/cfgparser.py
|
lykius/hesiod
|
091ba1b06cfa870133415fc1df6efdd8e50a2cfe
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List
CFG_T = Dict[str, Any]
class ConfigParser(ABC):
@staticmethod
@abstractmethod
def get_managed_extensions() -> List[str]:
"""Get the file extensions managed by the parser.
Returns:
List of the managed extensions.
"""
@staticmethod
@abstractmethod
def read_cfg_file(cfg_file: Path) -> CFG_T:
"""Read config from a file using a specific protocol.
Args:
cfg_file: The path to the file to be read.
Returns:
The config read from the given file.
"""
@staticmethod
@abstractmethod
def write_cfg(cfg: CFG_T, cfg_file: Path) -> None:
"""Write config into the given file using a specific protocol.
Args:
cfg: The config to be saved.
cfg_file: The path to the output file.
"""
| 24.25641
| 70
| 0.613108
|
ff2b2dac98f6bd2f3067a75c60815ad64324a8e4
| 8,461
|
py
|
Python
|
premise/utils.py
|
tngTUDOR/premise
|
f3ab48b590afaefe6ef431846561e934cac35de9
|
[
"BSD-3-Clause"
] | null | null | null |
premise/utils.py
|
tngTUDOR/premise
|
f3ab48b590afaefe6ef431846561e934cac35de9
|
[
"BSD-3-Clause"
] | null | null | null |
premise/utils.py
|
tngTUDOR/premise
|
f3ab48b590afaefe6ef431846561e934cac35de9
|
[
"BSD-3-Clause"
] | null | null | null |
from . import DATA_DIR
import csv
import pandas as pd
from .export import *
import numpy as np
from wurst import searching as ws
CO2_FUELS = DATA_DIR / "fuel_co2_emission_factor.txt"
LHV_FUELS = DATA_DIR / "fuels_lower_heating_value.txt"
CLINKER_RATIO_ECOINVENT_36 = DATA_DIR / "cement" / "clinker_ratio_ecoinvent_36.csv"
CLINKER_RATIO_ECOINVENT_35 = DATA_DIR / "cement" / "clinker_ratio_ecoinvent_35.csv"
CLINKER_RATIO_REMIND = DATA_DIR / "cement" / "clinker_ratios.csv"
STEEL_RECYCLING_SHARES = DATA_DIR / "steel" / "steel_recycling_shares.csv"
REMIND_TO_FUELS = DATA_DIR / "steel" / "remind_fuels_correspondance.txt"
EFFICIENCY_RATIO_SOLAR_PV = DATA_DIR / "renewables" / "efficiency_solar_PV.csv"
def eidb_label(model, scenario, year):
return "ecoinvent_" + model + "_" + scenario + "_" + str(year)
def get_correspondance_remind_to_fuels():
"""
Return a dictionary with REMIND fuels as keys and ecoinvent activity names and reference products as values.
:return: dict
:rtype: dict
"""
d = {}
with open(REMIND_TO_FUELS) as f:
r = csv.reader(f, delimiter=";")
for row in r:
d[row[0]] = {"fuel name": row[1], "activity name": row[2], "reference product": row[3]}
return d
def get_fuel_co2_emission_factors():
"""
Return a dictionary with fuel names as keys and, as values:
* CO_2 emission factor, in kg CO2 per MJ of lower heating value
* share of biogenic CO2
Source: https://www.plateformeco2.ch/portal/documents/10279/16917/IPCC+(2006),%20Guidelines+for+National+Greenhouse+Gas+Inventories.pdf/a3838a98-5ad6-4da5-82f3-c9430007a158
:return: dict
"""
d = {}
with open(CO2_FUELS) as f:
r = csv.reader(f, delimiter=";")
for row in r:
d[row[0]] = {"co2": float(row[1]), "bio_share": float(row[2])}
return d
def get_lower_heating_values():
"""
Loads a csv file into a dictionary. This dictionary contains lower heating values for a number of fuel types.
Mostly taken from: https://www.engineeringtoolbox.com/fuels-higher-calorific-values-d_169.html
:return: dictionary that contains lower heating values
:rtype: dict
"""
with open(LHV_FUELS) as f:
d = dict(filter(None, csv.reader(f, delimiter=";")))
d = {k: float(v) for k, v in d.items()}
return d
def get_efficiency_ratio_solar_PV(year, power):
"""
Return a dictionary with years as keys and efficiency ratios as values
:return: dict
"""
df = pd.read_csv(
EFFICIENCY_RATIO_SOLAR_PV)
return df.groupby(["power", "year"]) \
.mean()["value"] \
.to_xarray() \
.interp(year=year, power=power, kwargs={"fill_value": "extrapolate"})
def get_clinker_ratio_ecoinvent(version):
"""
Return a dictionary with (cement names, location) as keys and clinker-to-cement ratios as values,
as found in ecoinvent.
:return: dict
"""
if version == 3.5:
fp = CLINKER_RATIO_ECOINVENT_35
else:
fp = CLINKER_RATIO_ECOINVENT_36
with open(fp) as f:
d = {}
for val in csv.reader(f):
d[(val[0], val[1])] = float(val[2])
return d
def get_clinker_ratio_remind(year):
"""
Return an array with the average clinker-to-cement ratio per year and per region, as given by REMIND.
:return: xarray
:return:
"""
df = pd.read_csv(
CLINKER_RATIO_REMIND)
return df.groupby(["region", "year"]) \
.mean()["value"] \
.to_xarray() \
.interp(year=year)
def get_steel_recycling_rates(year):
"""
Return an array with the average shares for primary (Basic oxygen furnace) and secondary (Electric furnace)
steel production per year and per region, as given by: https://www.bir.org/publications/facts-figures/download/643/175/36?method=view
for 2015-2019, further linearly extrapolated to 2020, 2030, 2040 and 2050.
:return: xarray
:return:
"""
df = pd.read_csv(
STEEL_RECYCLING_SHARES, sep=";")
return df.groupby(["region", "year", "type"]) \
.mean()[["share", "world_share"]] \
.to_xarray() \
.interp(year=year)
def rev_index(inds):
return {v: k for k, v in inds.items()}
def create_codes_and_names_of_A_matrix(db):
"""
Create a dictionary a tuple (activity name, reference product,
unit, location) as key, and its code as value.
:return: a dictionary to map indices to activities
:rtype: dict
"""
return {
(
i["name"],
i["reference product"],
i["unit"],
i["location"],
): i["code"]
for i in db
}
def add_modified_tags(original_db, scenarios):
"""
Add a `modified` label to any activity that is new
Also add a `modified` label to any exchange that has been added
or that has a different value than the source database.
:return:
"""
# Class `Export` to which the original database is passed
exp = Export(original_db)
# Collect a dictionary of activities {row/col index in A matrix: code}
rev_ind_A = rev_index(create_codes_index_of_A_matrix(original_db))
# Retrieve list of coordinates [activity, activity, value]
coords_A = exp.create_A_matrix_coordinates()
# Turn it into a dictionary {(code of receiving activity, code of supplying activity): value}
original = {(rev_ind_A[x[0]], rev_ind_A[x[1]]): x[2] for x in coords_A}
# Collect a dictionary with activities' names and correponding codes
codes_names = create_codes_and_names_of_A_matrix(original_db)
# Collect list of substances
rev_ind_B = rev_index(create_codes_index_of_B_matrix())
# Retrieve list of coordinates of the B matrix [activity index, substance index, value]
coords_B = exp.create_B_matrix_coordinates()
# Turn it into a dictionary {(activity code, substance code): value}
original.update({(rev_ind_A[x[0]], rev_ind_B[x[1]]): x[2] for x in coords_B})
for s, scenario in enumerate(scenarios):
print(f"Looking for differences in database {s + 1} ...")
rev_ind_A = rev_index(create_codes_index_of_A_matrix(scenario["database"]))
exp = Export(scenario["database"], scenario["model"], scenario["pathway"], scenario["year"], "")
coords_A = exp.create_A_matrix_coordinates()
new = {(rev_ind_A[x[0]], rev_ind_A[x[1]]): x[2] for x in coords_A}
rev_ind_B = rev_index(create_codes_index_of_B_matrix())
coords_B = exp.create_B_matrix_coordinates()
new.update({(rev_ind_A[x[0]], rev_ind_B[x[1]]): x[2] for x in coords_B})
list_new = set(i[0] for i in original.keys()) ^ set(i[0] for i in new.keys())
ds = (d for d in scenario["database"] if d["code"] in list_new)
# Tag new activities
for d in ds:
d["modified"] = True
# List codes that belong to activities that contain modified exchanges
list_modified = (i[0] for i in new if i in original and new[i] != original[i])
#
# Filter for activities that have modified exchanges
for ds in ws.get_many(
scenario["database"],
ws.either(*[ws.equals("code", c) for c in set(list_modified)])
):
# Loop through biosphere exchanges and check if
# the exchange also exists in the original database
# and if it has the same value
# if any of these two conditions is False, we tag the exchange
excs = (exc for exc in ds["exchanges"] if exc["type"] == "biosphere")
for exc in excs:
if (ds["code"], exc["input"][0]) not in original or new[(ds["code"], exc["input"][0])] != original[(ds["code"], exc["input"][0])]:
exc["modified"] = True
# Same thing for technosphere exchanges,
# except that we first need to look up the provider's code first
excs = (exc for exc in ds["exchanges"] if exc["type"] == "technosphere")
for exc in excs:
if (exc["name"], exc["product"], exc["unit"], exc["location"]) in codes_names:
exc_code = codes_names[(exc["name"], exc["product"], exc["unit"], exc["location"])]
if new[(ds["code"], exc_code)] != original[(ds["code"], exc_code)]:
exc["modified"] = True
else:
exc["modified"] = True
return scenarios
| 38.811927
| 176
| 0.636686
|
1f69aa963cadc64c8f585a48e7a84a427e986649
| 6,486
|
py
|
Python
|
meta_agents/algos/trpo.py
|
zhanpenghe/meta_agents
|
b3b4df70bab1ebe621d48eebb4c886b85c1d8323
|
[
"MIT"
] | 3
|
2020-09-26T16:17:52.000Z
|
2021-04-23T08:56:04.000Z
|
meta_agents/algos/trpo.py
|
zhanpenghe/meta_agents
|
b3b4df70bab1ebe621d48eebb4c886b85c1d8323
|
[
"MIT"
] | 1
|
2019-09-03T19:57:40.000Z
|
2019-09-03T19:57:40.000Z
|
meta_agents/algos/trpo.py
|
zhanpenghe/meta_agents
|
b3b4df70bab1ebe621d48eebb4c886b85c1d8323
|
[
"MIT"
] | 1
|
2020-12-09T03:06:48.000Z
|
2020-12-09T03:06:48.000Z
|
from collections import OrderedDict
from dowel import logger, tabular
from garage.np.algos import BatchPolopt
import torch
from torch.distributions.kl import kl_divergence
from torch.nn.utils.convert_parameters import (vector_to_parameters,
parameters_to_vector)
from meta_agents.samplers.single_task_sampler import SingleTaskSampler
from meta_agents.torch_utils import np_to_torch, detach_distribution
from meta_agents.samplers.base import SampleProcessor
def surrogate_loss(samples, policy, old_dist=None):
assert isinstance(samples, dict)
assert 'observations' in samples.keys()
assert 'actions' in samples.keys()
assert 'advantages' in samples.keys()
observations = samples['observations']
actions = samples['actions']
advantages = samples['advantages']
# forward pass of policy
dist = policy(observations)
# currently lets just detach the logprob
# as old pi
if old_dist is None:
old_dist = detach_distribution(dist)
kl = torch.mean(kl_divergence(dist, old_dist))
log_likeli_ratio = dist.log_prob(actions) - old_dist.log_prob(actions)
ratio = torch.exp(log_likeli_ratio)
surr_loss = -torch.mean(ratio * advantages, dim=0)
return surr_loss, old_dist, kl
def conjugate_gradient(f_Ax, b, cg_iters=10, residual_tol=1e-10):
p = b.clone().detach()
r = b.clone().detach()
x = torch.zeros_like(b).float()
rdotr = torch.dot(r, r)
for i in range(cg_iters):
z = f_Ax(p).detach()
v = rdotr / torch.dot(p, z)
x += v * p
r -= v * z
newrdotr = torch.dot(r, r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr.item() < residual_tol:
break
return x.detach()
class TRPO(BatchPolopt):
def __init__(
self,
policy,
baseline,
discount=.99,
max_path_length=200,
n_samples=1, # This is super weird and I don't think this
# need to exist in on policy.
):
super().__init__(
policy=policy,
baseline=baseline,
discount=discount,
max_path_length=max_path_length,
n_samples=n_samples,)
# We only use our own sampler for consistency between single task
# and meta learning.
self.sampler_cls = SingleTaskSampler
self.preprocessor = SampleProcessor(baseline=self.baseline)
def train(self, runner, batch_size):
last_return = None
for epoch in runner.step_epochs():
for cycle in range(self.n_samples):
runner.step_path = runner.obtain_samples(
runner.step_itr, batch_size)
last_return = self.train_once(runner.step_itr,
runner.step_path)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
samples_data = self.preprocessor.process_samples(paths)
samples = np_to_torch(samples_data)
self._trpo_step(samples, surrogate_loss, kl_divergence)
def process_samples(self, itr, paths):
# We will never use a `process_samples` method under a algo
# since we have preprocessor in meta_agents
raise NotImplementedError
def _trpo_step(self, samples, loss_func, constraint, cg_damping=1e-2,
ls_backtrack_ratio=.5, cg_iters=10, max_ls_steps=10, max_kl=1e-2,):
old_loss, old_dist, kl_before = surrogate_loss(samples, self.policy)
grads = torch.autograd.grad(old_loss, self.policy.parameters())
grads = parameters_to_vector(grads)
hessian_vector_product = self.hessian_vector_product(samples, damping=cg_damping)
step_direction = conjugate_gradient(hessian_vector_product, grads, cg_iters)
# Compute the Lagrange multiplier
shs = 0.5 * step_direction.dot(hessian_vector_product(step_direction))
lagrange_multiplier = torch.sqrt(max_kl / shs)
grad_step = step_direction * lagrange_multiplier
old_params = parameters_to_vector(self.policy.parameters())
# Start line search
step_size = 1.
backtrack_step = 0
for _ in range(max_ls_steps):
vector_to_parameters(old_params - step_size * grad_step,
self.policy.parameters())
loss, _, kl = surrogate_loss(samples, self.policy, old_dist=old_dist)
improve = loss - old_loss
if (improve.item() < 0.0) and (kl.item() < max_kl):
break
step_size *= ls_backtrack_ratio
backtrack_step += 1
else:
vector_to_parameters(old_params, self.policy.parameters())
logger.log('Failed to update parameters')
tabular.record('backtrack-iters', backtrack_step)
tabular.record('loss-before', old_loss.item())
tabular.record('loss-after', loss.item())
tabular.record('kl-before', kl_before.item())
tabular.record('kl-after', kl.item())
def hessian_vector_product(self, samples_data, damping=1e-2):
"""Hessian-vector product, based on the Perlmutter method."""
def _product(vector):
kl = self.kl_divergence(samples_data)
grads = torch.autograd.grad(kl, self.policy.parameters(),
create_graph=True)
flat_grad_kl = parameters_to_vector(grads)
grad_kl_v = torch.dot(flat_grad_kl, vector)
grad2s = torch.autograd.grad(grad_kl_v, self.policy.parameters())
flat_grad2_kl = parameters_to_vector(grad2s)
return flat_grad2_kl + damping * vector
return _product
def kl_divergence(self, samples, old_dist=None):
loss, old_dist_, kl = surrogate_loss(samples, self.policy)
if old_dist is None:
old_dist = old_dist_
inputs = samples['observations']
new_dist = self.policy(inputs)
kl = torch.mean(kl_divergence(new_dist, old_dist))
return kl
def adapt_policy(self, loss, step_size=1., create_graph=True):
grads = torch.autograd.grad(loss,
self.policy.parameters(), create_graph=create_graph)
updated_params = OrderedDict()
for (name, param), grad in zip(self.policy.named_parameters(), grads):
updated_params[name] = param - step_size * grad
return updated_params
| 35.637363
| 89
| 0.63984
|
8ea785cdecb2c63662a52ae6e5188cf60b5a24da
| 21,604
|
py
|
Python
|
weaver/processes/wps3_process.py
|
crim-ca/weaver
|
107fec5e19f20b77061b9405a764da911d2db8a2
|
[
"Apache-2.0"
] | 16
|
2019-03-18T12:23:05.000Z
|
2022-02-25T00:39:11.000Z
|
weaver/processes/wps3_process.py
|
crim-ca/weaver
|
107fec5e19f20b77061b9405a764da911d2db8a2
|
[
"Apache-2.0"
] | 346
|
2019-03-06T21:05:04.000Z
|
2022-03-31T13:38:37.000Z
|
weaver/processes/wps3_process.py
|
crim-ca/weaver
|
107fec5e19f20b77061b9405a764da911d2db8a2
|
[
"Apache-2.0"
] | 5
|
2019-03-15T01:38:28.000Z
|
2021-11-11T15:38:43.000Z
|
import logging
import warnings
from copy import deepcopy
from time import sleep
from typing import TYPE_CHECKING
from pyramid.httpexceptions import (
HTTPConflict,
HTTPForbidden,
HTTPInternalServerError,
HTTPNotFound,
HTTPOk,
HTTPUnauthorized
)
from pyramid.settings import asbool
from weaver import status
from weaver.exceptions import PackageExecutionError
from weaver.execute import EXECUTE_MODE_ASYNC, EXECUTE_RESPONSE_DOCUMENT, EXECUTE_TRANSMISSION_MODE_REFERENCE
from weaver.formats import CONTENT_TYPE_APP_FORM, CONTENT_TYPE_APP_JSON
from weaver.processes import opensearch
from weaver.processes.constants import OPENSEARCH_LOCAL_FILE_SCHEME
from weaver.processes.sources import get_data_source_from_url, retrieve_data_source_url
from weaver.processes.utils import map_progress
from weaver.processes.wps_process_base import WpsProcessInterface
from weaver.utils import (
fetch_file,
get_any_id,
get_any_message,
get_any_value,
get_job_log_msg,
get_log_monitor_msg,
pass_http_error,
request_extra
)
from weaver.visibility import VISIBILITY_PUBLIC
from weaver.warning import MissingParameterWarning
from weaver.wps.utils import map_wps_output_location
from weaver.wps_restapi import swagger_definitions as sd
if TYPE_CHECKING:
from typing import List, Union
from pywps.app import WPSRequest
from weaver.typedefs import JSON, UpdateStatusPartialFunction
LOGGER = logging.getLogger(__name__)
REMOTE_JOB_PROGRESS_PROVIDER = 1
REMOTE_JOB_PROGRESS_PREPARE = 2
REMOTE_JOB_PROGRESS_DEPLOY = 3
REMOTE_JOB_PROGRESS_VISIBLE = 4
REMOTE_JOB_PROGRESS_READY = 5
REMOTE_JOB_PROGRESS_EXECUTION = 9
REMOTE_JOB_PROGRESS_MONITORING = 10
REMOTE_JOB_PROGRESS_FETCH_OUT = 90
REMOTE_JOB_PROGRESS_COMPLETED = 100
class Wps3Process(WpsProcessInterface):
def __init__(self,
step_payload, # type: JSON
joborder, # type: JSON
process, # type: str
request, # type: WPSRequest
update_status, # type: UpdateStatusPartialFunction
):
super(Wps3Process, self).__init__(request)
self.provider = None # overridden if data source properly resolved
self.update_status = lambda _message, _progress, _status: update_status(
self.provider, _message, _progress, _status)
self.provider, self.url, self.deploy_body = self.resolve_data_source(step_payload, joborder)
self.process = process
def resolve_data_source(self, step_payload, joborder):
try:
# Presume that all EOImage given as input can be resolved to the same ADES
# So if we got multiple inputs or multiple values for an input, we take the first one as reference
eodata_inputs = opensearch.get_eo_images_ids_from_payload(step_payload)
data_url = "" # data_source will be set to the default ADES if no EOImages (anything but `None`)
if eodata_inputs:
step_payload = opensearch.alter_payload_after_query(step_payload)
value = joborder[eodata_inputs[0]]
if isinstance(value, list):
value = value[0] # Use the first value to determine the data source
data_url = value["location"]
reason = "(ADES based on {0})".format(data_url)
else:
reason = "(No EOImage -> Default ADES)"
data_source = get_data_source_from_url(data_url)
deploy_body = step_payload
url = retrieve_data_source_url(data_source)
except (IndexError, KeyError) as exc:
raise PackageExecutionError("Failed to save package outputs. [{!r}]".format(exc))
self.provider = data_source # fix immediately for `update_status`
self.update_status("{provider} is selected {reason}.".format(provider=data_source, reason=reason),
REMOTE_JOB_PROGRESS_PROVIDER, status.STATUS_RUNNING)
return data_source, url, deploy_body
def get_user_auth_header(self):
# TODO: find a better way to generalize this to Magpie credentials?
if not asbool(self.settings.get("ades.use_auth_token", True)):
return {}
ades_usr = self.settings.get("ades.username", None)
ades_pwd = self.settings.get("ades.password", None)
ades_url = self.settings.get("ades.wso2_hostname", None)
ades_client = self.settings.get("ades.wso2_client_id", None)
ades_secret = self.settings.get("ades.wso2_client_secret", None)
access_token = None
if ades_usr and ades_pwd and ades_url and ades_client and ades_secret:
ades_body = {
"grant_type": "password",
"client_id": ades_client,
"client_secret": ades_secret,
"username": ades_usr,
"password": ades_pwd,
"scope": "openid",
}
ades_headers = {"Content-Type": CONTENT_TYPE_APP_FORM, "Accept": CONTENT_TYPE_APP_JSON}
ades_access_token_url = "{}/oauth2/token".format(ades_url)
cred_resp = request_extra("post", ades_access_token_url,
data=ades_body, headers=ades_headers, settings=self.settings)
cred_resp.raise_for_status()
if CONTENT_TYPE_APP_JSON not in cred_resp.headers.get("Content-Type"):
raise HTTPUnauthorized("Cannot retrieve valid access token using credential or ADES configurations.")
access_token = cred_resp.json().get("access_token", None)
if not access_token:
warnings.warn("Could not retrieve valid access token although response is expected to contain one.",
MissingParameterWarning)
else:
warnings.warn(
"Could not retrieve at least one of required login parameters: "
"[ades.username, ades.password, ades.wso2_hostname, ades.wso2_client_id, ades.wso2_client_secret]",
MissingParameterWarning
)
return {"Authorization": "Bearer {}".format(access_token) if access_token else None}
def is_deployed(self):
return self.describe_process() is not None
def is_visible(self):
# type: (...) -> Union[bool, None]
"""
Gets the process visibility.
:returns:
True/False correspondingly for public/private if visibility is retrievable,
False if authorized access but process cannot be found,
None if forbidden access.
"""
LOGGER.debug("Get process WPS visibility request for [%s]", self.process)
response = self.make_request(method="GET",
url=self.url + sd.process_visibility_service.path.format(process_id=self.process),
retry=False,
status_code_mock=HTTPUnauthorized.code)
if response.status_code in (HTTPUnauthorized.code, HTTPForbidden.code):
return None
if response.status_code == HTTPNotFound.code:
return False
if response.status_code == HTTPOk.code:
json_body = response.json()
# FIXME: support for Spacebel, always returns dummy visibility response, enforce deploy with `False`
if json_body.get("message") == "magic!" or json_body.get("type") == "ok" or json_body.get("code") == 4:
return False
return json_body.get("value") == VISIBILITY_PUBLIC
response.raise_for_status()
def set_visibility(self, visibility):
self.update_status("Updating process visibility on remote ADES.",
REMOTE_JOB_PROGRESS_VISIBLE, status.STATUS_RUNNING)
path = self.url + sd.process_visibility_service.path.format(process_id=self.process)
user_headers = deepcopy(self.headers)
user_headers.update(self.get_user_auth_header())
LOGGER.debug("Update process WPS visibility request for [%s] at [%s]", self.process, path)
response = self.make_request(method="PUT",
url=path,
json={"value": visibility},
retry=False,
status_code_mock=HTTPOk.code)
response.raise_for_status()
def describe_process(self):
path = self.url + sd.process_service.path.format(process_id=self.process)
LOGGER.debug("Describe process WPS request for [%s] at [%s]", self.process, path)
response = self.make_request(method="GET",
url=path,
retry=False,
status_code_mock=HTTPOk.code)
if response.status_code == HTTPOk.code:
# FIXME: Remove patch for Geomatys ADES (Missing process return a 200 InvalidParameterValue error !)
if response.content.lower().find("InvalidParameterValue") >= 0:
return None
return response.json()
elif response.status_code == HTTPNotFound.code:
return None
# FIXME: Remove patch for Spacebel ADES (Missing process return a 500 error)
elif response.status_code == HTTPInternalServerError.code:
return None
response.raise_for_status()
def deploy(self):
self.update_status("Deploying process on remote ADES.",
REMOTE_JOB_PROGRESS_DEPLOY, status.STATUS_RUNNING)
path = self.url + sd.processes_service.path
user_headers = deepcopy(self.headers)
user_headers.update(self.get_user_auth_header())
LOGGER.debug("Deploy process WPS request for [%s] at [%s]", self.process, path)
response = self.make_request(method="POST", url=path, json=self.deploy_body, retry=True,
status_code_mock=HTTPOk.code)
response.raise_for_status()
def prepare(self):
visible = self.is_visible()
if not visible: # includes private visibility and non-existing cases
if visible is None:
LOGGER.info("Process [%s] access is unauthorized on [%s] - deploying as admin.", self.process, self.url)
elif visible is False:
LOGGER.info("Process [%s] is not deployed on [%s] - deploying.", self.process, self.url)
# TODO: Maybe always redeploy? What about cases of outdated deployed process?
try:
self.deploy()
except Exception as exc:
# FIXME: support for Spacebel, avoid conflict error incorrectly handled, remove 500 when fixed
pass_http_error(exc, [HTTPConflict, HTTPInternalServerError])
if visible:
LOGGER.info("Process [%s] already deployed and visible on [%s] - executing.", self.process, self.url)
else:
LOGGER.info("Process [%s] enforced to public visibility.", self.process)
try:
self.set_visibility(visibility=VISIBILITY_PUBLIC)
# TODO: support for Spacebel, remove when visibility route properly implemented on ADES
except Exception as exc:
pass_http_error(exc, HTTPNotFound)
def execute(self, workflow_inputs, out_dir, expected_outputs):
self.update_status("Preparing process on remote ADES.",
REMOTE_JOB_PROGRESS_PREPARE, status.STATUS_RUNNING)
self.prepare()
self.update_status("Process ready for execute request on remote ADES.",
REMOTE_JOB_PROGRESS_READY, status.STATUS_RUNNING)
LOGGER.debug("Execute process WPS request for [%s]", self.process)
execute_body_inputs = self.stage_job_inputs(workflow_inputs)
execute_body_outputs = [
{"id": output, "transmissionMode": EXECUTE_TRANSMISSION_MODE_REFERENCE} for output in expected_outputs
]
self.update_status("Executing job on remote ADES.", REMOTE_JOB_PROGRESS_EXECUTION, status.STATUS_RUNNING)
execute_body = {
"mode": EXECUTE_MODE_ASYNC,
"response": EXECUTE_RESPONSE_DOCUMENT,
"inputs": execute_body_inputs,
"outputs": execute_body_outputs
}
request_url = self.url + sd.process_jobs_service.path.format(process_id=self.process)
response = self.make_request(method="POST", url=request_url, json=execute_body, retry=True)
if response.status_code != 201:
raise Exception("Was expecting a 201 status code from the execute request : {0}".format(request_url))
job_status_uri = response.headers["Location"]
job_id = self.monitor(job_status_uri)
self.update_status("Fetching job outputs from remote ADES.",
REMOTE_JOB_PROGRESS_FETCH_OUT, status.STATUS_RUNNING)
results = self.get_job_results(job_id)
self.stage_job_results(results, expected_outputs, out_dir)
self.update_status("Execution on remote ADES completed.",
REMOTE_JOB_PROGRESS_COMPLETED, status.STATUS_SUCCEEDED)
def monitor(self, job_status_uri):
job_status = self.get_job_status(job_status_uri)
job_status_value = status.map_status(job_status["status"])
job_id = job_status["jobID"]
self.update_status("Monitoring job on remote ADES : {0}".format(job_status_uri),
REMOTE_JOB_PROGRESS_MONITORING, status.STATUS_RUNNING)
while job_status_value not in status.JOB_STATUS_CATEGORIES[status.JOB_STATUS_CATEGORY_FINISHED]:
sleep(5)
job_status = self.get_job_status(job_status_uri)
job_status_value = status.map_status(job_status["status"])
LOGGER.debug(get_log_monitor_msg(job_id, job_status_value,
job_status.get("percentCompleted", 0),
get_any_message(job_status), job_status.get("statusLocation")))
self.update_status(get_job_log_msg(status=job_status_value,
message=get_any_message(job_status),
progress=job_status.get("percentCompleted", 0),
duration=job_status.get("duration", None)), # get if available
map_progress(job_status.get("percentCompleted", 0),
REMOTE_JOB_PROGRESS_MONITORING, REMOTE_JOB_PROGRESS_FETCH_OUT),
status.STATUS_RUNNING)
if job_status_value != status.STATUS_SUCCEEDED:
LOGGER.debug(get_log_monitor_msg(job_id, job_status_value,
job_status.get("percentCompleted", 0),
get_any_message(job_status), job_status.get("statusLocation")))
raise Exception(job_status)
return job_id
def get_job_status(self, job_status_uri, retry=True):
response = self.make_request(method="GET",
url=job_status_uri,
retry=True,
status_code_mock=HTTPNotFound.code)
# Retry on 404 since job may not be fully ready
if retry and response.status_code == HTTPNotFound.code:
sleep(5)
return self.get_job_status(job_status_uri, retry=False)
response.raise_for_status()
job_status = response.json()
# TODO Remove patch for Geomatys not conforming to the status schema
# - jobID is missing
# - handled by 'map_status': status are upper cases and succeeded process are indicated as successful
job_id = job_status_uri.split("/")[-1]
if "jobID" not in job_status:
job_status["jobID"] = job_id
job_status["status"] = status.map_status(job_status["status"])
return job_status
def get_job_results(self, job_id):
# type: (str) -> List[JSON]
"""
Obtains produced output results from successful job status ID.
"""
# use results endpoint instead of '/outputs' to ensure support with other
result_url = self.url + sd.process_results_service.path.format(process_id=self.process, job_id=job_id)
response = self.make_request(method="GET", url=result_url, retry=True)
response.raise_for_status()
contents = response.json()
# backward compatibility for ADES that returns output IDs nested under 'outputs'
if "outputs" in contents:
# ensure that we don't incorrectly pick a specific output ID named 'outputs'
maybe_outputs = contents["outputs"]
if isinstance(maybe_outputs, dict) and get_any_id(maybe_outputs) is None:
contents = maybe_outputs
# backward compatibility for ADES that returns list of outputs nested under 'outputs'
# (i.e.: as Weaver-specific '/outputs' endpoint)
elif isinstance(maybe_outputs, list) and all(get_any_id(out) is not None for out in maybe_outputs):
contents = maybe_outputs
# rebuild the expected (old) list format for calling method
if isinstance(contents, dict) and all(get_any_value(out) is not None for out in contents.values()):
outputs = []
for out_id, out_val in contents.items():
out_val.update({"id": out_id})
outputs.append(out_val)
contents = outputs
return contents
def stage_job_results(self, results, expected_outputs, out_dir):
for result in results:
res_id = get_any_id(result)
# CWL expect the output file to be written matching definition in 'expected_outputs',
# but this definition could be a glob pattern to match multiple file.
# Therefore, we cannot rely on a specific name from it.
if res_id in expected_outputs:
# plan ahead when list of multiple output values could be supported
result_values = get_any_value(result)
if not isinstance(result_values, list):
result_values = [result_values]
cwl_out_dir = out_dir.rstrip("/")
for value in result_values:
src_name = value.split("/")[-1]
dst_path = "/".join([cwl_out_dir, src_name])
# performance improvement:
# Bypass download if file can be resolved as local resource (already fetched or same server).
# Because CWL expects the file to be in specified 'out_dir', make a link for it to be found
# even though the file is stored in the full job output location instead (already staged by step).
map_path = map_wps_output_location(value, self.settings)
as_link = False
if map_path:
LOGGER.info("Detected result [%s] from [%s] as local reference to this instance. "
"Skipping fetch and using local copy in output destination: [%s]",
res_id, value, dst_path)
LOGGER.debug("Mapped result [%s] to local reference: [%s]", value, map_path)
src_path = map_path
as_link = True
else:
LOGGER.info("Fetching result [%s] from [%s] to CWL output destination: [%s]",
res_id, value, dst_path)
src_path = value
fetch_file(src_path, cwl_out_dir, settings=self.settings, link=as_link)
def stage_job_inputs(self, workflow_inputs):
execute_body_inputs = []
for workflow_input_key, workflow_input_value in workflow_inputs.items():
if not isinstance(workflow_input_value, list):
workflow_input_value = [workflow_input_value]
for workflow_input_value_item in workflow_input_value:
if isinstance(workflow_input_value_item, dict) and "location" in workflow_input_value_item:
location = workflow_input_value_item["location"]
execute_body_inputs.append({"id": workflow_input_key, "href": location})
else:
execute_body_inputs.append({"id": workflow_input_key, "data": workflow_input_value_item})
for exec_input in execute_body_inputs:
if "href" in exec_input and isinstance(exec_input["href"], str):
LOGGER.debug("Original input location [%s] : [%s]", exec_input["id"], exec_input["href"])
if exec_input["href"].startswith("{0}://".format(OPENSEARCH_LOCAL_FILE_SCHEME)):
exec_input["href"] = "file{0}".format(exec_input["href"][len(OPENSEARCH_LOCAL_FILE_SCHEME):])
LOGGER.debug("OpenSearch intermediate input [%s] : [%s]", exec_input["id"], exec_input["href"])
elif exec_input["href"].startswith("file://"):
exec_input["href"] = self.host_file(exec_input["href"])
LOGGER.debug("Hosting intermediate input [%s] : [%s]", exec_input["id"], exec_input["href"])
return execute_body_inputs
| 51.808153
| 120
| 0.625162
|
452809f16a5391640b496831a5805e9ac2f05de3
| 57,966
|
py
|
Python
|
sdk/textanalytics/azure-ai-textanalytics/tests/test_batch.py
|
cicovica/azure-sdk-for-python
|
cd8bed878f8a11d081358bf67400fb01031582b6
|
[
"MIT"
] | null | null | null |
sdk/textanalytics/azure-ai-textanalytics/tests/test_batch.py
|
cicovica/azure-sdk-for-python
|
cd8bed878f8a11d081358bf67400fb01031582b6
|
[
"MIT"
] | null | null | null |
sdk/textanalytics/azure-ai-textanalytics/tests/test_batch.py
|
cicovica/azure-sdk-for-python
|
cd8bed878f8a11d081358bf67400fb01031582b6
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
import platform
from azure.core.exceptions import HttpResponseError
from azure.ai.textanalytics import (
VERSION,
TextAnalyticsClient,
DetectLanguageInput,
TextDocumentInput,
TextAnalyticsApiKeyCredential
)
from testcase import TextAnalyticsTest, GlobalTextAnalyticsAccountPreparer
class TestBatchTextAnalytics(TextAnalyticsTest):
@pytest.mark.live_test_only
def test_active_directory_auth(self):
token = self.generate_oauth_token()
endpoint = self.get_oauth_endpoint()
text_analytics = TextAnalyticsClient(endpoint, token)
docs = [{"id": "1", "text": "I should take my cat to the veterinarian."},
{"id": "2", "text": "Este es un document escrito en Español."},
{"id": "3", "text": "猫は幸せ"},
{"id": "4", "text": "Fahrt nach Stuttgart und dann zum Hotel zu Fu."}]
response = text_analytics.detect_language(docs)
@GlobalTextAnalyticsAccountPreparer()
def test_empty_credentials(self, resource_group, location, text_analytics_account, text_analytics_account_key):
with self.assertRaises(TypeError):
text_analytics = TextAnalyticsClient(text_analytics_account, "")
@GlobalTextAnalyticsAccountPreparer()
def test_bad_type_for_credentials(self, resource_group, location, text_analytics_account, text_analytics_account_key):
with self.assertRaises(TypeError):
text_analytics = TextAnalyticsClient(text_analytics_account, [])
@GlobalTextAnalyticsAccountPreparer()
def test_none_credentials(self, resource_group, location, text_analytics_account, text_analytics_account_key):
with self.assertRaises(ValueError):
text_analytics = TextAnalyticsClient(text_analytics_account, None)
@GlobalTextAnalyticsAccountPreparer()
def test_bad_input_to_method(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
with self.assertRaises(TypeError):
response = text_analytics.detect_language("hello world")
@GlobalTextAnalyticsAccountPreparer()
def test_successful_detect_language(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "text": "I should take my cat to the veterinarian."},
{"id": "2", "text": "Este es un document escrito en Español."},
{"id": "3", "text": "猫は幸せ"},
{"id": "4", "text": "Fahrt nach Stuttgart und dann zum Hotel zu Fu."}]
response = text_analytics.detect_language(docs, show_stats=True)
self.assertEqual(response[0].primary_language.name, "English")
self.assertEqual(response[1].primary_language.name, "Spanish")
self.assertEqual(response[2].primary_language.name, "Japanese")
self.assertEqual(response[3].primary_language.name, "German")
self.assertEqual(response[0].primary_language.iso6391_name, "en")
self.assertEqual(response[1].primary_language.iso6391_name, "es")
self.assertEqual(response[2].primary_language.iso6391_name, "ja")
self.assertEqual(response[3].primary_language.iso6391_name, "de")
for doc in response:
self.assertIsNotNone(doc.id)
self.assertIsNotNone(doc.statistics)
self.assertIsNotNone(doc.primary_language.score)
@GlobalTextAnalyticsAccountPreparer()
def test_some_errors_detect_language(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "country_hint": "United States", "text": "I should take my cat to the veterinarian."},
{"id": "2", "text": "Este es un document escrito en Español."},
{"id": "3", "text": ""},
{"id": "4", "text": "Fahrt nach Stuttgart und dann zum Hotel zu Fu."}]
response = text_analytics.detect_language(docs)
self.assertTrue(response[0].is_error)
self.assertFalse(response[1].is_error)
self.assertTrue(response[2].is_error)
self.assertFalse(response[3].is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_all_errors_detect_language(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
text = ""
for _ in range(5121):
text += "x"
docs = [{"id": "1", "text": ""},
{"id": "2", "text": ""},
{"id": "3", "text": ""},
{"id": "4", "text": text}]
response = text_analytics.detect_language(docs)
for resp in response:
self.assertTrue(resp.is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_language_detection_empty_credential_class(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(""))
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.detect_language(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
def test_language_detection_bad_credentials(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential("xxxxxxxxxxxx"))
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.detect_language(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
def test_language_detection_bad_model_version(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
with self.assertRaises(HttpResponseError):
response = text_analytics.detect_language(
inputs=["Microsoft was founded by Bill Gates."],
model_version="old"
)
@GlobalTextAnalyticsAccountPreparer()
def test_successful_recognize_entities(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975."},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen el 4 de abril de 1975."},
{"id": "3", "language": "de", "text": "Microsoft wurde am 4. April 1975 von Bill Gates und Paul Allen gegründet."}]
response = text_analytics.recognize_entities(docs, show_stats=True)
for doc in response:
self.assertEqual(len(doc.entities), 4)
self.assertIsNotNone(doc.id)
self.assertIsNotNone(doc.statistics)
for entity in doc.entities:
self.assertIsNotNone(entity.text)
self.assertIsNotNone(entity.category)
self.assertIsNotNone(entity.offset)
self.assertIsNotNone(entity.length)
self.assertIsNotNone(entity.score)
@GlobalTextAnalyticsAccountPreparer()
def test_some_errors_recognize_entities(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975."},
{"id": "2", "language": "Spanish", "text": "Hola"},
{"id": "3", "language": "de", "text": ""}]
response = text_analytics.recognize_entities(docs)
self.assertFalse(response[0].is_error)
self.assertTrue(response[1].is_error)
self.assertTrue(response[2].is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_all_errors_recognize_entities(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "Spanish", "text": "Hola"},
{"id": "3", "language": "de", "text": ""}]
response = text_analytics.recognize_entities(docs)
self.assertTrue(response[0].is_error)
self.assertTrue(response[1].is_error)
self.assertTrue(response[2].is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_entity_recognition_empty_credential_class(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(""))
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.recognize_entities(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
def test_entity_recognition_bad_credentials(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential("xxxxxxxxxxxx"))
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.recognize_entities(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
def test_entity_recognition_bad_model_version(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
with self.assertRaises(HttpResponseError):
response = text_analytics.recognize_entities(
inputs=["Microsoft was founded by Bill Gates."],
model_version="old"
)
@GlobalTextAnalyticsAccountPreparer()
def test_successful_recognize_pii_entities(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "text": "My SSN is 555-55-5555."},
{"id": "2", "text": "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."},
{"id": "3", "text": "Is 998.214.865-68 your Brazilian CPF number?"}]
response = text_analytics.recognize_pii_entities(docs, show_stats=True)
self.assertEqual(response[0].entities[0].text, "555-55-5555")
self.assertEqual(response[0].entities[0].category, "U.S. Social Security Number (SSN)")
self.assertEqual(response[1].entities[0].text, "111000025")
# self.assertEqual(response[1].entities[0].category, "ABA Routing Number") # Service is currently returning PhoneNumber here
self.assertEqual(response[2].entities[0].text, "998.214.865-68")
self.assertEqual(response[2].entities[0].category, "Brazil CPF Number")
for doc in response:
self.assertIsNotNone(doc.id)
self.assertIsNotNone(doc.statistics)
for entity in doc.entities:
self.assertIsNotNone(entity.text)
self.assertIsNotNone(entity.category)
self.assertIsNotNone(entity.offset)
self.assertIsNotNone(entity.length)
self.assertIsNotNone(entity.score)
@GlobalTextAnalyticsAccountPreparer()
def test_some_errors_recognize_pii_entities(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "language": "es", "text": "hola"},
{"id": "2", "text": ""},
{"id": "3", "text": "Is 998.214.865-68 your Brazilian CPF number?"}]
response = text_analytics.recognize_pii_entities(docs)
self.assertTrue(response[0].is_error)
self.assertTrue(response[1].is_error)
self.assertFalse(response[2].is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_all_errors_recognize_pii_entities(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "language": "es", "text": "hola"},
{"id": "2", "text": ""}]
response = text_analytics.recognize_pii_entities(docs)
self.assertTrue(response[0].is_error)
self.assertTrue(response[1].is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_pii_entity_recognition_empty_credential_class(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(""))
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.recognize_pii_entities(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
def test_pii_entity_recognition_bad_credentials(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential("xxxxxxxxxxxx"))
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.recognize_pii_entities(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
def test_pii_entity_recognition_bad_model_version(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
with self.assertRaises(HttpResponseError):
response = text_analytics.recognize_pii_entities(
inputs=["Microsoft was founded by Bill Gates."],
model_version="old"
)
@GlobalTextAnalyticsAccountPreparer()
def test_successful_recognize_linked_entities(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = text_analytics.recognize_linked_entities(docs, show_stats=True)
for doc in response:
self.assertEqual(len(doc.entities), 3)
self.assertIsNotNone(doc.id)
self.assertIsNotNone(doc.statistics)
for entity in doc.entities:
self.assertIsNotNone(entity.name)
self.assertIsNotNone(entity.matches)
self.assertIsNotNone(entity.language)
self.assertIsNotNone(entity.data_source_entity_id)
self.assertIsNotNone(entity.url)
self.assertIsNotNone(entity.data_source)
@GlobalTextAnalyticsAccountPreparer()
def test_some_errors_recognize_linked_entities(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = text_analytics.recognize_linked_entities(docs)
self.assertTrue(response[0].is_error)
self.assertFalse(response[1].is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_all_errors_recognize_linked_entities(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "Spanish", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = text_analytics.recognize_linked_entities(docs)
self.assertTrue(response[0].is_error)
self.assertTrue(response[1].is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_linked_entity_recognition_empty_credential_class(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(""))
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.recognize_linked_entities(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
def test_linked_entity_recognition_bad_credentials(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential("xxxxxxxxxxxx"))
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.recognize_linked_entities(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
def test_linked_entity_recognition_bad_model_version(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
with self.assertRaises(HttpResponseError):
response = text_analytics.recognize_linked_entities(
inputs=["Microsoft was founded by Bill Gates."],
model_version="old"
)
@GlobalTextAnalyticsAccountPreparer()
def test_successful_extract_key_phrases(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = text_analytics.extract_key_phrases(docs, show_stats=True)
for phrases in response:
self.assertIn("Paul Allen", phrases.key_phrases)
self.assertIn("Bill Gates", phrases.key_phrases)
self.assertIn("Microsoft", phrases.key_phrases)
self.assertIsNotNone(phrases.id)
self.assertIsNotNone(phrases.statistics)
@GlobalTextAnalyticsAccountPreparer()
def test_some_errors_extract_key_phrases(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "language": "English", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = text_analytics.extract_key_phrases(docs)
self.assertTrue(response[0].is_error)
self.assertFalse(response[1].is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_all_errors_extract_key_phrases(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "language": "English", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
{"id": "2", "language": "es", "text": ""}]
response = text_analytics.extract_key_phrases(docs)
self.assertTrue(response[0].is_error)
self.assertTrue(response[1].is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_key_phrases_empty_credential_class(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(""))
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.extract_key_phrases(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
def test_key_phrases_bad_credentials(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential("xxxxxxxxxxxx"))
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.extract_key_phrases(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
def test_key_phrases_bad_model_version(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
with self.assertRaises(HttpResponseError):
response = text_analytics.extract_key_phrases(
inputs=["Microsoft was founded by Bill Gates."],
model_version="old"
)
@GlobalTextAnalyticsAccountPreparer()
def test_successful_analyze_sentiment(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
{"id": "2", "language": "en", "text": "I did not like the hotel we stayed it. It was too expensive."},
{"id": "3", "language": "en", "text": "The restaurant had really good food. I recommend you try it."}]
response = text_analytics.analyze_sentiment(docs, show_stats=True)
self.assertEqual(response[0].sentiment, "neutral")
self.assertEqual(response[1].sentiment, "negative")
self.assertEqual(response[2].sentiment, "positive")
for doc in response:
self.assertIsNotNone(doc.id)
self.assertIsNotNone(doc.statistics)
self.assertIsNotNone(doc.confidence_scores)
self.assertIsNotNone(doc.sentences)
@GlobalTextAnalyticsAccountPreparer()
def test_some_errors_analyze_sentiment(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "language": "en", "text": ""},
{"id": "2", "language": "english", "text": "I did not like the hotel we stayed it. It was too expensive."},
{"id": "3", "language": "en", "text": "The restaurant had really good food. I recommend you try it."}]
response = text_analytics.analyze_sentiment(docs)
self.assertTrue(response[0].is_error)
self.assertTrue(response[1].is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_all_errors_analyze_sentiment(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "language": "en", "text": ""},
{"id": "2", "language": "english", "text": "I did not like the hotel we stayed it. It was too expensive."},
{"id": "3", "language": "en", "text": ""}]
response = text_analytics.analyze_sentiment(docs)
self.assertTrue(response[0].is_error)
self.assertTrue(response[1].is_error)
self.assertTrue(response[2].is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_analyze_sentiment_empty_credential_class(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(""))
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.analyze_sentiment(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
def test_analyze_sentiment_bad_credentials(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential("xxxxxxxxxxxx"))
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.analyze_sentiment(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
def test_analyze_sentiment_bad_model_version(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
with self.assertRaises(HttpResponseError):
response = text_analytics.analyze_sentiment(
inputs=["Microsoft was founded by Bill Gates."],
model_version="old"
)
@GlobalTextAnalyticsAccountPreparer()
def test_validate_input_string(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [
u"I should take my cat to the veterinarian.",
u"Este es un document escrito en Español.",
u"猫は幸せ",
u"Fahrt nach Stuttgart und dann zum Hotel zu Fu.",
u""
]
response = text_analytics.detect_language(docs)
self.assertEqual(response[0].primary_language.name, "English")
self.assertEqual(response[1].primary_language.name, "Spanish")
self.assertEqual(response[2].primary_language.name, "Japanese")
self.assertEqual(response[3].primary_language.name, "German")
self.assertTrue(response[4].is_error)
@GlobalTextAnalyticsAccountPreparer()
def test_validate_language_input(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [
DetectLanguageInput(id="1", text="I should take my cat to the veterinarian."),
DetectLanguageInput(id="2", text="Este es un document escrito en Español."),
DetectLanguageInput(id="3", text="猫は幸せ"),
DetectLanguageInput(id="4", text="Fahrt nach Stuttgart und dann zum Hotel zu Fu.")
]
response = text_analytics.detect_language(docs)
self.assertEqual(response[0].primary_language.name, "English")
self.assertEqual(response[1].primary_language.name, "Spanish")
self.assertEqual(response[2].primary_language.name, "Japanese")
self.assertEqual(response[3].primary_language.name, "German")
@GlobalTextAnalyticsAccountPreparer()
def test_validate_multilanguage_input(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [
TextDocumentInput(id="1", text="Microsoft was founded by Bill Gates and Paul Allen."),
TextDocumentInput(id="2", text="I did not like the hotel we stayed it. It was too expensive."),
TextDocumentInput(id="3", text="The restaurant had really good food. I recommend you try it."),
]
response = text_analytics.analyze_sentiment(docs)
self.assertEqual(response[0].sentiment, "neutral")
self.assertEqual(response[1].sentiment, "negative")
self.assertEqual(response[2].sentiment, "positive")
@GlobalTextAnalyticsAccountPreparer()
def test_mixing_inputs(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [
{"id": "1", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
TextDocumentInput(id="2", text="I did not like the hotel we stayed it. It was too expensive."),
u"You cannot mix string input with the above inputs"
]
with self.assertRaises(TypeError):
response = text_analytics.analyze_sentiment(docs)
@GlobalTextAnalyticsAccountPreparer()
def test_out_of_order_ids(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = text_analytics.analyze_sentiment(docs)
in_order = ["56", "0", "22", "19", "1"]
for idx, resp in enumerate(response):
self.assertEqual(resp.id, in_order[idx])
@GlobalTextAnalyticsAccountPreparer()
def test_show_stats_and_model_version(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(response):
self.assertIsNotNone(response.model_version)
self.assertIsNotNone(response.raw_response)
self.assertEqual(response.statistics.document_count, 5)
self.assertEqual(response.statistics.transaction_count, 4)
self.assertEqual(response.statistics.valid_document_count, 4)
self.assertEqual(response.statistics.erroneous_document_count, 1)
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = text_analytics.analyze_sentiment(
docs,
show_stats=True,
model_version="latest",
response_hook=callback
)
@GlobalTextAnalyticsAccountPreparer()
def test_batch_size_over_limit(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [u"hello world"] * 1050
with self.assertRaises(HttpResponseError):
response = text_analytics.detect_language(docs)
@GlobalTextAnalyticsAccountPreparer()
def test_whole_batch_country_hint(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 3)
docs = [
u"This was the best day of my life.",
u"I did not like the hotel we stayed it. It was too expensive.",
u"The restaurant was not as good as I hoped."
]
response = text_analytics.detect_language(docs, country_hint="CA", response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_whole_batch_dont_use_country_hint(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
country_str = "\"countryHint\": \"\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 3)
docs = [
u"This was the best day of my life.",
u"I did not like the hotel we stayed it. It was too expensive.",
u"The restaurant was not as good as I hoped."
]
response = text_analytics.detect_language(docs, country_hint="", response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_per_item_dont_use_country_hint(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
country_str = "\"countryHint\": \"\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 2)
country_str = "\"countryHint\": \"US\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 1)
docs = [{"id": "1", "country_hint": "", "text": "I will go to the park."},
{"id": "2", "country_hint": "", "text": "I did not like the hotel we stayed it."},
{"id": "3", "text": "The restaurant had really good food."}]
response = text_analytics.detect_language(docs, response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_whole_batch_country_hint_and_obj_input(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 3)
docs = [
DetectLanguageInput(id="1", text="I should take my cat to the veterinarian."),
DetectLanguageInput(id="2", text="Este es un document escrito en Español."),
DetectLanguageInput(id="3", text="猫は幸せ"),
]
response = text_analytics.detect_language(docs, country_hint="CA", response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_whole_batch_country_hint_and_dict_input(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 3)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed it."},
{"id": "3", "text": "The restaurant had really good food."}]
response = text_analytics.detect_language(docs, country_hint="CA", response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_whole_batch_country_hint_and_obj_per_item_hints(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 2)
country_str = "\"countryHint\": \"US\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 1)
docs = [
DetectLanguageInput(id="1", text="I should take my cat to the veterinarian.", country_hint="CA"),
DetectLanguageInput(id="4", text="Este es un document escrito en Español.", country_hint="CA"),
DetectLanguageInput(id="3", text="猫は幸せ"),
]
response = text_analytics.detect_language(docs, country_hint="US", response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_whole_batch_country_hint_and_dict_per_item_hints(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 1)
country_str = "\"countryHint\": \"US\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 2)
docs = [{"id": "1", "country_hint": "US", "text": "I will go to the park."},
{"id": "2", "country_hint": "US", "text": "I did not like the hotel we stayed it."},
{"id": "3", "text": "The restaurant had really good food."}]
response = text_analytics.detect_language(docs, country_hint="CA", response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_whole_batch_language_hint(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
language_str = "\"language\": \"fr\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 3)
docs = [
u"This was the best day of my life.",
u"I did not like the hotel we stayed it. It was too expensive.",
u"The restaurant was not as good as I hoped."
]
response = text_analytics.analyze_sentiment(docs, language="fr", response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_whole_batch_dont_use_language_hint(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
language_str = "\"language\": \"\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 3)
docs = [
u"This was the best day of my life.",
u"I did not like the hotel we stayed it. It was too expensive.",
u"The restaurant was not as good as I hoped."
]
response = text_analytics.analyze_sentiment(docs, language="", response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_per_item_dont_use_language_hint(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
language_str = "\"language\": \"\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 2)
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 1)
docs = [{"id": "1", "language": "", "text": "I will go to the park."},
{"id": "2", "language": "", "text": "I did not like the hotel we stayed it."},
{"id": "3", "text": "The restaurant had really good food."}]
response = text_analytics.analyze_sentiment(docs, response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_whole_batch_language_hint_and_obj_input(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
language_str = "\"language\": \"de\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 3)
docs = [
TextDocumentInput(id="1", text="I should take my cat to the veterinarian."),
TextDocumentInput(id="4", text="Este es un document escrito en Español."),
TextDocumentInput(id="3", text="猫は幸せ"),
]
response = text_analytics.analyze_sentiment(docs, language="de", response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_whole_batch_language_hint_and_dict_input(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 3)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed it."},
{"id": "3", "text": "The restaurant had really good food."}]
response = text_analytics.analyze_sentiment(docs, language="es", response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_whole_batch_language_hint_and_obj_per_item_hints(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 2)
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 1)
docs = [
TextDocumentInput(id="1", text="I should take my cat to the veterinarian.", language="es"),
TextDocumentInput(id="2", text="Este es un document escrito en Español.", language="es"),
TextDocumentInput(id="3", text="猫は幸せ"),
]
response = text_analytics.analyze_sentiment(docs, language="en", response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_whole_batch_language_hint_and_dict_per_item_hints(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 2)
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 1)
docs = [{"id": "1", "language": "es", "text": "I will go to the park."},
{"id": "2", "language": "es", "text": "I did not like the hotel we stayed it."},
{"id": "3", "text": "The restaurant had really good food."}]
response = text_analytics.analyze_sentiment(docs, language="en", response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_bad_document_input(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = "This is the wrong type"
with self.assertRaises(TypeError):
response = text_analytics.analyze_sentiment(docs)
@GlobalTextAnalyticsAccountPreparer()
def test_client_passed_default_country_hint(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key), default_country_hint="CA")
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 3)
def callback_2(resp):
country_str = "\"countryHint\": \"DE\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 3)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed it."},
{"id": "3", "text": "The restaurant had really good food."}]
response = text_analytics.detect_language(docs, response_hook=callback)
response = text_analytics.detect_language(docs, country_hint="DE", response_hook=callback_2)
response = text_analytics.detect_language(docs, response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_client_passed_default_language_hint(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key), default_language="es")
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 3)
def callback_2(resp):
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 3)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed it."},
{"id": "3", "text": "The restaurant had really good food."}]
response = text_analytics.analyze_sentiment(docs, response_hook=callback)
response = text_analytics.analyze_sentiment(docs, language="en", response_hook=callback_2)
response = text_analytics.analyze_sentiment(docs, response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_rotate_subscription_key(self, resource_group, location, text_analytics_account, text_analytics_account_key):
credential = TextAnalyticsApiKeyCredential(text_analytics_account_key)
text_analytics = TextAnalyticsClient(text_analytics_account, credential)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed it."},
{"id": "3", "text": "The restaurant had really good food."}]
response = text_analytics.analyze_sentiment(docs)
self.assertIsNotNone(response)
credential.update_key("xxx") # Make authentication fail
with self.assertRaises(ClientAuthenticationError):
response = text_analytics.analyze_sentiment(docs)
credential.update_key(text_analytics_account_key) # Authenticate successfully again
response = text_analytics.analyze_sentiment(docs)
self.assertIsNotNone(response)
@GlobalTextAnalyticsAccountPreparer()
def test_user_agent(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(resp):
self.assertIn("azsdk-python-azure-ai-textanalytics/{} Python/{} ({})".format(
VERSION, platform.python_version(), platform.platform()),
resp.http_request.headers["User-Agent"]
)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed it."},
{"id": "3", "text": "The restaurant had really good food."}]
response = text_analytics.analyze_sentiment(docs, response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_document_attribute_error(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
docs = [{"id": "1", "text": ""}]
response = text_analytics.analyze_sentiment(docs)
# Attributes on DocumentError
self.assertTrue(response[0].is_error)
self.assertEqual(response[0].id, "1")
self.assertIsNotNone(response[0].error)
# Result attribute not on DocumentError, custom error message
try:
sentiment = response[0].sentiment
except AttributeError as custom_error:
self.assertEqual(
custom_error.args[0],
'\'DocumentError\' object has no attribute \'sentiment\'. '
'The service was unable to process this document:\nDocument Id: 1\nError: '
'invalidDocument - Document text is empty.\n'
)
# Attribute not found on DocumentError or result obj, default behavior/message
try:
sentiment = response[0].attribute_not_on_result_or_error
except AttributeError as default_behavior:
self.assertEqual(
default_behavior.args[0],
'\'DocumentError\' object has no attribute \'attribute_not_on_result_or_error\''
)
@GlobalTextAnalyticsAccountPreparer()
def test_text_analytics_error(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
text = ""
for _ in range(5121):
text += "x"
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "english", "text": "I did not like the hotel we stayed it."},
{"id": "3", "text": text}]
# Bad model version
try:
result = text_analytics.analyze_sentiment(docs, model_version="bad")
except HttpResponseError as err:
self.assertEqual(err.error_code, "InvalidRequest")
self.assertIsNotNone(err.message)
# DocumentErrors
doc_errors = text_analytics.analyze_sentiment(docs)
self.assertEqual(doc_errors[0].error.code, "invalidDocument")
self.assertIsNotNone(doc_errors[0].error.message)
self.assertEqual(doc_errors[1].error.code, "unsupportedLanguageCode")
self.assertIsNotNone(doc_errors[1].error.message)
self.assertEqual(doc_errors[2].error.code, "invalidDocument")
self.assertIsNotNone(doc_errors[2].error.message)
# Missing input records
docs = []
try:
result = text_analytics.analyze_sentiment(docs)
except HttpResponseError as err:
self.assertEqual(err.error_code, "MissingInputRecords")
self.assertIsNotNone(err.message)
# Duplicate Ids
docs = [{"id": "1", "text": "hello world"},
{"id": "1", "text": "I did not like the hotel we stayed it."}]
try:
result = text_analytics.analyze_sentiment(docs)
except HttpResponseError as err:
self.assertEqual(err.error_code, "InvalidDocument")
self.assertIsNotNone(err.message)
# Batch size over limit
docs = [u"hello world"] * 1001
try:
response = text_analytics.detect_language(docs)
except HttpResponseError as err:
self.assertEqual(err.error_code, "InvalidDocumentBatch")
self.assertIsNotNone(err.message)
# Service bug returns invalidDocument here. Uncomment after v3.0-preview.2
# docs = [{"id": "1", "country_hint": "United States", "text": "hello world"}]
#
# response = text_analytics.detect_language(docs)
# self.assertEqual(response[0].error.code, "invalidCountryHint")
# self.assertIsNotNone(response[0].error.message)
@GlobalTextAnalyticsAccountPreparer()
def test_text_analytics_country_hint_none(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
# service will eventually support this and we will not need to send "" for input == "none"
documents = [{"id": "0", "country_hint": "none", "text": "This is written in English."}]
documents2 = [DetectLanguageInput(id="1", country_hint="none", text="This is written in English.")]
def callback(response):
country_str = "\"countryHint\": \"\""
country = response.http_request.body.count(country_str)
self.assertEqual(country, 1)
# test dict
result = text_analytics.detect_language(documents, response_hook=callback)
# test DetectLanguageInput
result2 = text_analytics.detect_language(documents2, response_hook=callback)
# test per-operation
result3 = text_analytics.detect_language(inputs=["this is written in english"], country_hint="none", response_hook=callback)
# test client default
new_client = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key), default_country_hint="none")
result4 = new_client.detect_language(inputs=["this is written in english"], response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
def test_keyword_arguments(self, resource_group, location, text_analytics_account, text_analytics_account_key):
text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
def callback(response):
country_str = "\"countryHint\": \"ES\""
self.assertEqual(response.http_request.body.count(country_str), 1)
self.assertIsNotNone(response.model_version)
self.assertIsNotNone(response.statistics)
def callback2(response):
language_str = "\"language\": \"es\""
self.assertEqual(response.http_request.body.count(language_str), 1)
self.assertIsNotNone(response.model_version)
self.assertIsNotNone(response.statistics)
def callback3(response):
language_str = "\"language\": \"en\""
self.assertEqual(response.http_request.body.count(language_str), 1)
self.assertIsNotNone(response.model_version)
self.assertIsNotNone(response.statistics)
res = text_analytics.detect_language(
inputs=["this is written in english"],
model_version="latest",
show_stats=True,
country_hint="ES",
response_hook=callback
)
res = text_analytics.recognize_entities(
inputs=["Bill Gates is the CEO of Microsoft."],
model_version="latest",
show_stats=True,
language="es",
response_hook=callback2
)
res = text_analytics.recognize_linked_entities(
inputs=["Bill Gates is the CEO of Microsoft."],
model_version="latest",
show_stats=True,
language="es",
response_hook=callback2
)
res = text_analytics.recognize_pii_entities(
inputs=["Bill Gates is the CEO of Microsoft."],
model_version="latest",
show_stats=True,
language="en",
response_hook=callback3
)
res = text_analytics.analyze_sentiment(
inputs=["Bill Gates is the CEO of Microsoft."],
model_version="latest",
show_stats=True,
language="es",
response_hook=callback2
)
| 52.553037
| 154
| 0.686747
|
73f902ac5e7eebc7b61c5dcbe4195003f461b6b3
| 9,190
|
py
|
Python
|
ga_printer.py
|
Tb7386/GA_Post-it_thermal_printer
|
8dabf33c337a96c7d40d0ba7e41eb884f14fb8b9
|
[
"MIT"
] | null | null | null |
ga_printer.py
|
Tb7386/GA_Post-it_thermal_printer
|
8dabf33c337a96c7d40d0ba7e41eb884f14fb8b9
|
[
"MIT"
] | null | null | null |
ga_printer.py
|
Tb7386/GA_Post-it_thermal_printer
|
8dabf33c337a96c7d40d0ba7e41eb884f14fb8b9
|
[
"MIT"
] | null | null | null |
#Composition de la trame
# | Header | Longueur de données | ?? | DATA | CRC8 | Fin de ligne |
# | 51:78:XX:00 | 05 | 00 | 82:7f:7f:7e:82 | 60 | ff |
# Header XX :
# - bf ou a3 : Ecriture de 384 points en RAW
# - a1 : Avancer le papier de DATA (ex: 01:00 avance de 1dp, 10:00 avance de 10dp)
# Data (eg 82 -> 1000 0010) :
# - bit[0] : 1 = Black, 0 = White
# - bit[2-7] : Nombre de points
from gattlib import GATTRequester, DiscoveryService
from PIL import Image, ImageOps, ImageFont, ImageDraw
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
import crc8
import sys
import argparse
import textwrap
parser = argparse.ArgumentParser(description="Print an text to a thermal printer")
parser.add_argument("BTMAC", help="BT MAC address of printer (type FIND to scan BLE devices)")
parser.add_argument("-t", "--text", type=str, help="Text to be printed")
parser.add_argument("-p", "--port", type=str, help="HTTP port")
parser.add_argument("-s", "--size", type=str, help="Font size")
parser.add_argument("-d", "--device", type=str, help="Bluetooth Device (by default hci0)")
args = parser.parse_args()
# ------------------------------------------------------------------------------
# printer : Print text from command line or http post request
# ------------------------------------------------------------------------------
def printer(text,size=50):
req = bleConnect(args.BTMAC)
print(text)
if (req.is_connected()):
printText(text, size,req)
print ("Print end")
req.disconnect()
else:
print("BLE connect error")
return
# ------------------------------------------------------------------------------
# imgFromString : Convert string to binary image
# ------------------------------------------------------------------------------
def imgFromString(s, fontSize):
# Font choice
font = ImageFont.truetype("dejavu/DejaVuSansMono.ttf", fontSize)
# Convert inline text to multiline
s = textwrap.fill (s, width = int(384/font.getsize("1")[0]))
# Get size of text
size = font.getsize_multiline(s)
# Fix height and width
size_x = 384 #if size[0] > 384 else size[0]
size_y = font.getsize_multiline(s)[1]#font.getsize(s)[1]*(s.count('\n')+1)
# Create image
img = Image.new("RGB", size=(size_x, size_y+10), color="white")
# Draw text in image
draw = ImageDraw.Draw(img)
draw.text((0, 0), s, (0, 0, 0), font=font)
# Convert RGB image to binary image
img = ImageOps.invert(img.convert('L'))
img = img.convert('1')
# Save image to file
#img.save('img.png')
return img
# ------------------------------------------------------------------------------
# binFromImg : Convert binary image to array
# ------------------------------------------------------------------------------
def binFromImg(img):
binImg=[]
for line in range (0,img.size[1]):
binImg.append(''.join(format(byte, '08b') for byte in img.tobytes()[int(line*(img.size[0]/8)):int((line*(img.size[0]/8))+img.size[0]/8)]))
return binImg
# ------------------------------------------------------------------------------
# dataCrc : Calcul hex CRC-8
# ------------------------------------------------------------------------------
def dataCrc(data):
hash = crc8.crc8()
hash.update(bytes.fromhex(data))
return str(hash.hexdigest())
# ------------------------------------------------------------------------------
# binCount : Convert binary image to array of '0' and '1'
# ------------------------------------------------------------------------------
def binCount (binImg):
trame=[]
i=0
#read Image line by line
for line in binImg:
nb_zero=0
nb_one=0
trame.append('')
# Read line char by char
for char in line:
# Bit '0' process
if char == '0':
# Bit '1' before
if nb_one!=0:
# Format '1' number to hex + 128 (First bit to print black)
trame[i]+='{:02x}'.format(128+nb_one)
nb_one=0
# Max number is 127 (First bit color + 127 max number = '0x7f')
if nb_zero>126:
trame[i]+='{:02x}'.format(nb_zero)
nb_zero=0
nb_zero += 1
# Bit '1' process
if char == '1':
# Bit '0' before
if nb_zero!=0:
# Format '0' number to hex
trame[i]+='{:02x}'.format(nb_zero)
nb_zero=0
# Max number is 127 (First bit color + 127 max number = '0xff')
if nb_one>126:
trame[i]+='{:02x}'.format(128+nb_one)
nb_one=0
nb_one += 1
# End of trame. If '1' or '0' before process
if nb_zero!=0:
trame[i]+='{:02x}'.format(nb_zero)
elif nb_one!=0:
trame[i]+='{:02x}'.format(128+nb_one)
i+=1
return trame
# ------------------------------------------------------------------------------
# bleConnect : Connect to printer mac
# ------------------------------------------------------------------------------
def bleConnect(mac, device='hci0'):
host = mac
req = GATTRequester(host, False, device)
req.connect(True)
# Some config trame
req.write_by_handle(0x09, bytes([1, 0]))
time.sleep(0.02)
req.write_by_handle(0x000e, bytes([1, 0]))
time.sleep(0.02)
req.write_by_handle(0x0011, bytes([2, 0]))
time.sleep(0.02)
req.exchange_mtu(83)
time.sleep(0.02)
req.write_cmd(0x0006, bytes([18, 81, 120, 168, 0, 1, 0, 0, 0, 255, 18, 81, 120, 163, 0, 1, 0, 0, 0, 255]))
time.sleep(0.02)
req.write_cmd(0x0006, bytes([18, 81, 120, 187, 0, 1, 0, 1, 7, 255]))
time.sleep(0.02)
req.write_cmd(0x0006, bytes([18, 81, 120, 163, 0, 1, 0, 0, 0, 255]))
time.sleep(0.2)
return req
# ------------------------------------------------------------------------------
# printData : Print text
# ------------------------------------------------------------------------------
def printText(text, size, req):
data = binCount(binFromImg(imgFromString(text,size)))
for dat in data:
# Header of trame
head = "5178bf00"
# Format BT trame
trame=head + '{:02x}'.format(len(bytes.fromhex(dat)),'x') + "00" + dat + dataCrc(dat) + "ff"
print(trame)
i = len(trame)
# Pull 40 bytes trames
while i > 0:
if i > 40:
req.write_cmd(0x06, bytes.fromhex(trame[len(trame)-i:len(trame)-i+40]))
i -= 40
else:
req.write_cmd(0x06, bytes.fromhex(trame[len(trame)-i:len(trame)]))
i -= 40
time.sleep(0.01)
# 90 dp moving forward paper
forwardPaper(90,req)
return
# ------------------------------------------------------------------------------
# forwardPaper : Moving forward
# ------------------------------------------------------------------------------
def forwardPaper(dp,req):
head = "5178a100"
data = '{:02x}'.format(dp) + '00'
# Format BT trame
trame=head + '{:02x}'.format(len(bytes.fromhex(data)),'x') + "00" + data + dataCrc(data) + "ff"
req.write_cmd(0x06, bytes.fromhex(trame))
time.sleep(0.01)
return
# ------------------------------------------------------------------------------
# httpserver : Start HTTP server
# ------------------------------------------------------------------------------
class S(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
self._set_response()
printer(post_data.decode('utf-8'),50 if not args.size else args.size)
def httpserver(server_class=HTTPServer, handler_class=S, port=8080,):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
# ------------------------------------------------------------------------------
# bleScan : Scan Bluetooth Low Energy devices
# ------------------------------------------------------------------------------
def bleScan(device="hci0"):
service = DiscoveryService(device)
devices = service.discover(2)
for address, name in devices.items():
print("name: {}, address: {}".format(name, address))
if __name__ == '__main__':
if (args.BTMAC=="FIND"):
bleScan(args.device if args.device else bleScan())
sys.exit()
if not (args.text or args.port):
print("ERROR: Please specfiy text with -t or http port server with -p argument")
sys.exit(1)
if args.text:
printer(args.text, 50 if not args.size else args.size)
if args.port:
httpserver(port=int(args.port))
| 39.106383
| 146
| 0.484657
|
eaa780f42e408d1f104844df15583f70812f3421
| 2,648
|
py
|
Python
|
vcr/baselines/random-baseline/random_baseline.py
|
rlebras/mosaic-leaderboard-1
|
d36713ba59a6a28f6f5db5b09e40149a3349c78f
|
[
"Apache-2.0"
] | 18
|
2019-07-12T09:06:40.000Z
|
2022-02-10T07:50:11.000Z
|
vcr/baselines/random-baseline/random_baseline.py
|
rlebras/mosaic-leaderboard-1
|
d36713ba59a6a28f6f5db5b09e40149a3349c78f
|
[
"Apache-2.0"
] | 4
|
2019-08-30T21:39:04.000Z
|
2020-03-13T19:19:51.000Z
|
vcr/baselines/random-baseline/random_baseline.py
|
rlebras/mosaic-leaderboard-1
|
d36713ba59a6a28f6f5db5b09e40149a3349c78f
|
[
"Apache-2.0"
] | 5
|
2019-08-05T18:47:36.000Z
|
2021-01-24T05:06:11.000Z
|
import argparse
import json
import os
from typing import List
import numpy as np
# Parse the input file from JSONL to a list of dictionaries.
def read_jsonl_lines(input_file: str) -> List[dict]:
with open(input_file) as f:
lines = f.readlines()
return [json.loads(l.strip()) for l in lines]
def rand_prob_vector(n=4):
v = np.random.uniform(0, 100, size=n)
v = v / np.sum(v)
return v
def main(input_dir, output_file):
# Read the records from the test set.
qa_test_records = read_jsonl_lines(os.path.join(input_dir, 'qa.jsonl'))
qar_test_records = read_jsonl_lines(os.path.join(input_dir, 'qar.jsonl'))
assert len(qa_test_records) == len(qar_test_records)
# Make predictions for each example in the test set.
rows = []
for qa, qar in zip(qa_test_records, qar_test_records):
row = [qa['annot_id']]
answer_probs = rand_prob_vector(len(qa['answer_choices']))
row.extend([str(v) for v in answer_probs])
for _ in answer_probs:
row.extend([str(v) for v in rand_prob_vector(len(qar['rationale_choices']))])
rows.append(row)
# Write the predictions to the output file.
fields = [
"annot_id", "answer_0", "answer_1", "answer_2", "answer_3", "rationale_conditioned_on_a0_0",
"rationale_conditioned_on_a0_1", "rationale_conditioned_on_a0_2",
"rationale_conditioned_on_a0_3", "rationale_conditioned_on_a1_0",
"rationale_conditioned_on_a1_1", "rationale_conditioned_on_a1_2",
"rationale_conditioned_on_a1_3", "rationale_conditioned_on_a2_0",
"rationale_conditioned_on_a2_1", "rationale_conditioned_on_a2_2",
"rationale_conditioned_on_a2_3", "rationale_conditioned_on_a3_0",
"rationale_conditioned_on_a3_1", "rationale_conditioned_on_a3_2",
"rationale_conditioned_on_a3_3"]
with open(output_file, "w") as f:
f.write(",".join(fields))
f.write("\n")
for row in rows:
f.write(",".join(row))
f.write("\n")
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='A random baseline.')
parser.add_argument('--input-dir', type=str, required=True, help='Location of test data',
default=None)
parser.add_argument('--output-file', type=str, required=True, help='Location of predictions',
default=None)
args = parser.parse_args()
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=True))
print("=======================")
main(args.input_dir, args.output_file)
| 34.842105
| 100
| 0.658988
|
cd08f2cdb5fcb184ed2fc4cd84d621c3fa58a8ea
| 2,031
|
py
|
Python
|
easy_user_input/easy_user_input.py
|
generic-user1/easy-user-input
|
ea293ac97848b036a3d48fb4e4ab8d9dece30553
|
[
"MIT"
] | null | null | null |
easy_user_input/easy_user_input.py
|
generic-user1/easy-user-input
|
ea293ac97848b036a3d48fb4e4ab8d9dece30553
|
[
"MIT"
] | null | null | null |
easy_user_input/easy_user_input.py
|
generic-user1/easy-user-input
|
ea293ac97848b036a3d48fb4e4ab8d9dece30553
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#easy_user_input.easy_user_input - shortcut for easy_user_input.eui
#this module maps every function from the 'easy_user_input.eui'
#onto itself, allowing backwards compatibility with code using the longer name
#without having to maintain multiple copies of the same file.
#NOTE 1: this is a temporary feature to ease the transition into the new name
#it will not be maintained and it will be removed in the next major version
from typing import Tuple
def inputYesNo(promptText: str = "Choose yes or no", default: bool = None):
from easy_user_input.eui import inputYesNo
from warnings import warn
warn("easy_user_input.easy_user_input has been renamed to easy_user_input.eui and this shortcut will be removed in the future.")
return inputYesNo(promptText, default)
def inputChoice(
choices:Tuple[str or Tuple[str, str]],
promptText:str = "Please select an option",
default: int = None
) -> int:
from easy_user_input.eui import inputChoice
from warnings import warn
warn("easy_user_input.easy_user_input has been renamed to easy_user_input.eui and this shortcut will be removed in the future.")
return inputChoice(choices,promptText,default)
def inputStrictString(promptText: str, allowedChars: str = None, default: str or None = None) -> str:
from easy_user_input.eui import inputStrictString
from warnings import warn
warn("easy_user_input.easy_user_input has been renamed to easy_user_input.eui and this shortcut will be removed in the future.")
return inputStrictString(promptText, allowedChars, default)
def inputPath(
promptText: str = "Please input a valid path",
existsBehavior: str = "reject",
default: str or None = None
) -> str:
from easy_user_input.eui import inputPath
from warnings import warn
warn("easy_user_input.easy_user_input has been renamed to easy_user_input.eui and this shortcut will be removed in the future.")
return inputPath(promptText,existsBehavior,default)
| 44.152174
| 132
| 0.760709
|
cbfaafa5135febe520f8efd6f9a0c0a7ddb708ab
| 1,780
|
py
|
Python
|
instaclone/models.py
|
Lenus254/InstaClone
|
b008974bee486cd8ed5cc66e2dd67426f7545064
|
[
"MIT"
] | null | null | null |
instaclone/models.py
|
Lenus254/InstaClone
|
b008974bee486cd8ed5cc66e2dd67426f7545064
|
[
"MIT"
] | null | null | null |
instaclone/models.py
|
Lenus254/InstaClone
|
b008974bee486cd8ed5cc66e2dd67426f7545064
|
[
"MIT"
] | null | null | null |
from email.mime import image
from django.db import models
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile_pic = CloudinaryField('image',default='images/default.jpg', null=True)
bio = models.CharField(max_length = 200)
def __str__(self):
return f'{self.user.username}'
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
class Post(models.Model):
pic = CloudinaryField('image',default='images/default.jpg' )
caption = models.CharField(blank=True,max_length = 200)
profile = models.ForeignKey(Profile,on_delete=models.CASCADE)
like = models.IntegerField(default=0)
def __str__(self):
return f'{self.profile.user.username}'
def save_post(self):
self.save()
@property
def image_url(self):
if self.pic and hasattr(self.pic, 'url'):
return self.pic.url
class Comments(models.Model):
post = models.IntegerField(default=0)
username = models.CharField(blank=True,max_length = 100)
comment = models.TextField()
date = models.DateTimeField(auto_now_add=True)
count = models.IntegerField(default=0)
def __str__(self):
return f'{self.username}'
def save_comment(self):
self.save()
class Following(models.Model):
username = models.CharField(blank=True,max_length = 100)
followed = models.CharField(blank=True,max_length = 200)
image = CloudinaryField('image' )
def __str__(self):
return f'{self.username}'
def save_follower(self):
self.save()
| 27.8125
| 82
| 0.666854
|
12d0cb1226bd4fe8fcb1d36efe79ad4a545d8197
| 15,861
|
py
|
Python
|
runway/commands/base.py
|
GarisonLotus/runway
|
c371d952dc2500d4686f9f1359487d494c2136cd
|
[
"Apache-2.0"
] | null | null | null |
runway/commands/base.py
|
GarisonLotus/runway
|
c371d952dc2500d4686f9f1359487d494c2136cd
|
[
"Apache-2.0"
] | null | null | null |
runway/commands/base.py
|
GarisonLotus/runway
|
c371d952dc2500d4686f9f1359487d494c2136cd
|
[
"Apache-2.0"
] | null | null | null |
"""runway base module."""
from __future__ import print_function
from subprocess import check_call, check_output
import glob
import logging
import os
import shutil
import sys
import cfn_flip
import yaml
# from stacker.util import parse_cloudformation_template
# parse_cloudformation_template wraps yaml_parse; it would be better to call it
# from util but that would require sys.path shenanigans here
from ..embedded.stacker.awscli_yamlhelper import yaml_parse as parse_cloudformation_template # noqa
from ..util import (
change_dir, ensure_file_is_executable, get_embedded_lib_path,
ignore_exit_code_0, use_embedded_pkgs, which
)
from .. import __version__ as version
LOGGER = logging.getLogger('runway')
class Base(object):
"""Base class for deployer classes."""
def __init__(self, options, env_root=None, runway_config_dir=None):
"""Initialize base class."""
self.options = options
if env_root is None:
self.env_root = os.getcwd()
else:
self.env_root = env_root
if runway_config_dir is None:
self.runway_config_path = os.path.join(
self.env_root,
'runway.yml'
)
else:
self.runway_config_path = os.path.join(
runway_config_dir,
'runway.yml'
)
self._runway_config = None
def get_env_dirs(self):
"""Return list of directories in env_root."""
repo_dirs = next(os.walk(self.env_root))[1]
if '.git' in repo_dirs:
repo_dirs.remove('.git') # not relevant for any repo operations
return repo_dirs
def get_python_files_at_env_root(self):
"""Return list of python files in env_root."""
return glob.glob(os.path.join(self.env_root, '*.py'))
def get_yaml_files_at_env_root(self):
"""Return list of yaml files in env_root."""
yaml_files = glob.glob(
os.path.join(self.env_root, '*.yaml')
)
yml_files = glob.glob(
os.path.join(self.env_root, '*.yml')
)
return yaml_files + yml_files
def lint(self, base_dir=None, dirs_to_scan=None):
"""Call code linters."""
from flake8.main import application as flake8_app
from yamllint.cli import run as yamllint_run
if base_dir is None:
base_dir = self.env_root
if dirs_to_scan is None:
dirs_to_scan = self.get_env_dirs()
if os.path.isfile(os.path.join(base_dir, '.flake8')):
# config file in env will be picked up automatically
flake8_config = []
else:
# no config file in env; use runway defaults
flake8_config = [
('--append-config=' + os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), # noqa
'templates',
'.flake8'
))
]
if os.path.isfile(os.path.join(base_dir, '.yamllint.yml')):
yamllint_config = os.path.join(base_dir, '.yamllint.yml')
else:
yamllint_config = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'templates',
'.yamllint.yml'
)
with change_dir(base_dir):
with ignore_exit_code_0():
LOGGER.info('Starting Flake8 linting...')
flake8_run = flake8_app.Application()
flake8_run.run(
flake8_config + dirs_to_scan + self.get_python_files_at_env_root() # noqa pylint: disable=line-too-long
)
flake8_run.exit()
with ignore_exit_code_0():
LOGGER.info('Flake8 linting complete.')
LOGGER.info('Starting yamllint...')
yamllint_run(
["--config-file=%s" % yamllint_config] + dirs_to_scan + self.get_yaml_files_at_env_root() # noqa pylint: disable=line-too-long
)
LOGGER.info('yamllint complete.')
def get_cookbook_dirs(self, base_dir=None):
"""Find cookbook directories."""
if base_dir is None:
base_dir = self.env_root
cookbook_dirs = []
dirs_to_skip = set(['.git'])
for root, dirs, files in os.walk(base_dir): # pylint: disable=W0612
dirs[:] = [d for d in dirs if d not in dirs_to_skip]
for name in files:
if name == 'metadata.rb':
if 'cookbook' in os.path.basename(os.path.dirname(root)):
cookbook_dirs.append(root)
return cookbook_dirs
def cookbook_tests(self, base_dir=None):
"""Run cookbook tests."""
if base_dir is None:
base_dir = self.env_root
cookbook_dirs = self.get_cookbook_dirs(base_dir)
if cookbook_dirs:
if which('foodcritic') is None or which('cookstyle') is None:
LOGGER.error('"foodcritic" and/or "cookstyle" not found -- '
'please ensure ChefDK is installed.')
sys.exit(1)
for path in cookbook_dirs:
check_call(['foodcritic', '-f any', path])
check_call(['cookstyle', '-P', path])
def python_tests(self, base_dir=None, pylint_rc_file=None): # noqa pylint: disable=too-many-branches,too-many-locals
"""Run python tests."""
from pylint.lint import Run as PylintRun
if base_dir is None:
base_dir = self.env_root
if pylint_rc_file is None:
if os.path.isfile(os.path.join(base_dir, '.pylintrc')):
pylint_config = [
"--rcfile=%s" % os.path.join(base_dir, '.pylintrc')
]
else:
# Only reporting on errors ('-E') overrides any ignored errors
# set in .pylintrc, so it is only being used here when a
# pylint configuration file is not being used.
pylint_config = ['-E']
# Check all python files in repo
dirs_to_skip = set(['.git',
'node_modules',
'.serverless'])
nonblueprint_files = []
blueprint_files = []
for root, dirs, files in os.walk(base_dir):
dirs[:] = [d for d in dirs if d not in dirs_to_skip]
for name in files:
filepath = os.path.join(root, name)
if name[-3:] == '.py' and (
root.endswith('blueprints') and
not filepath.endswith('__init__.py')):
blueprint_files.append(filepath)
elif name[-3:] == '.py':
nonblueprint_files.append(filepath)
if nonblueprint_files + blueprint_files:
LOGGER.info("Checking python files with pylint (\"No config file "
"found...\" messages can be ignored)")
with use_embedded_pkgs(): # for embedded stacker
with ignore_exit_code_0():
LOGGER.debug("Executing pylint with the following options: \"%s\"", # noqa
' '.join(pylint_config + nonblueprint_files + blueprint_files)) # noqa pylint: disable=line-too-long
PylintRun(pylint_config + nonblueprint_files + blueprint_files) # noqa
LOGGER.info('pylint complete.')
for filepath in blueprint_files:
# Blueprints should output their template when executed
ensure_file_is_executable(filepath)
try:
shell_out_env = os.environ.copy()
if 'PYTHONPATH' in shell_out_env:
shell_out_env['PYTHONPATH'] = (
"%s:%s" % (get_embedded_lib_path(),
shell_out_env['PYTHONPATH'])
)
else:
shell_out_env['PYTHONPATH'] = get_embedded_lib_path()
cfn_template = check_output(
[sys.executable, filepath],
env=shell_out_env
).decode()
if not cfn_template:
raise ValueError('Template output should not be empty!') # noqa
parse_cloudformation_template(cfn_template)
except: # noqa - Bare except fine in this context
print("Error while checking %s for valid "
"YAML/JSON output" % filepath)
raise
def test(self):
"""Execute tests."""
self.lint()
self.cookbook_tests()
self.python_tests()
def path_only_contains_dirs(self, path):
"""Return boolean on whether a path only contains directories."""
pathlistdir = os.listdir(path)
if pathlistdir == []:
return True
if any(os.path.isfile(os.path.join(path, i)) for i in pathlistdir):
return False
return all(self.path_only_contains_dirs(os.path.join(path, i)) for i in pathlistdir) # noqa
def get_empty_dirs(self, path):
"""Return a list of empty directories in path."""
empty_dirs = []
for i in os.listdir(path):
child_path = os.path.join(path, i)
if i == '.git' or os.path.isfile(child_path) or os.path.islink(child_path): # noqa
continue
if self.path_only_contains_dirs(child_path):
empty_dirs.append(i)
return empty_dirs
def generate_sample_sls_module(self, module_dir=None):
"""Generate skeleton Serverless sample module."""
if module_dir is None:
module_dir = os.path.join(self.env_root, 'sampleapp.sls')
self.generate_sample_module(module_dir)
for i in ['config-dev-us-east-1.json', 'handler.py', 'package.json',
'serverless.yml']:
shutil.copyfile(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
'templates',
'serverless',
i),
os.path.join(module_dir, i),
)
LOGGER.info("Sample Serverless module created at %s",
module_dir)
def generate_sample_cdk_module(self, module_dir=None):
"""Generate skeleton CDK sample module."""
if module_dir is None:
module_dir = os.path.join(self.env_root, 'sampleapp.cdk')
self.generate_sample_module(module_dir)
for i in ['cdk.json', 'index.ts', 'package.json', 'tsconfig.json']:
shutil.copyfile(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
'templates',
'cdk',
i),
os.path.join(module_dir, i),
)
LOGGER.info("Sample CDK module created at %s", module_dir)
def generate_sample_cfn_module(self, module_dir=None):
"""Generate skeleton CloudFormation sample module."""
if module_dir is None:
module_dir = os.path.join(self.env_root, 'sampleapp.cfn')
self.generate_sample_module(module_dir)
for i in ['stacks.yaml', 'dev-us-east-1.env']:
shutil.copyfile(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
'templates',
'cfn',
i),
os.path.join(module_dir, i)
)
os.mkdir(os.path.join(module_dir, 'templates'))
with open(os.path.join(module_dir,
'templates',
'tf_state.yml'), 'w') as stream:
stream.write(
cfn_flip.flip(
check_output(
[sys.executable,
os.path.join(os.path.dirname(os.path.dirname(__file__)), # noqa
'templates',
'stacker',
'tfstate_blueprints',
'tf_state.py')]
)
)
)
LOGGER.info("Sample CloudFormation module created at %s",
module_dir)
def generate_sample_stacker_module(self, module_dir=None):
"""Generate skeleton Stacker sample module."""
if module_dir is None:
module_dir = os.path.join(self.env_root,
'runway-sample-tfstate.cfn')
self.generate_sample_module(module_dir)
for i in ['stacks.yaml', 'dev-us-east-1.env']:
shutil.copyfile(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
'templates',
'stacker',
i),
os.path.join(module_dir, i)
)
os.mkdir(os.path.join(module_dir, 'tfstate_blueprints'))
for i in ['__init__.py', 'tf_state.py']:
shutil.copyfile(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
'templates',
'stacker',
'tfstate_blueprints',
i),
os.path.join(module_dir, 'tfstate_blueprints', i)
)
os.chmod( # make blueprint executable
os.path.join(module_dir, 'tfstate_blueprints', 'tf_state.py'),
os.stat(os.path.join(module_dir,
'tfstate_blueprints',
'tf_state.py')).st_mode | 0o0111
)
LOGGER.info("Sample Stacker module created at %s",
module_dir)
def generate_sample_tf_module(self, module_dir=None):
"""Generate skeleton Terraform sample module."""
if module_dir is None:
module_dir = os.path.join(self.env_root, 'sampleapp.tf')
self.generate_sample_module(module_dir)
for i in ['.terraform-version', 'backend-us-east-1.tfvars',
'dev-us-east-1.tfvars', 'main.tf']:
shutil.copyfile(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
'templates',
'terraform',
i),
os.path.join(module_dir, i),
)
LOGGER.info("Sample Terraform app created at %s",
module_dir)
def parse_runway_config(self):
"""Read and parse runway.yml."""
if not os.path.isfile(self.runway_config_path):
LOGGER.error("Runway config file was not found (looking for "
"%s)",
self.runway_config_path)
sys.exit(1)
with open(self.runway_config_path) as data_file:
return yaml.safe_load(data_file)
@property
def runway_config(self):
"""Return parsed runway.yml."""
if not self._runway_config:
self._runway_config = self.parse_runway_config()
return self._runway_config
@staticmethod
def version():
"""Show current package version."""
print(version)
@staticmethod
def generate_sample_module(module_dir):
"""Generate skeleton sample module."""
if os.path.isdir(module_dir):
LOGGER.error("Error generating sample module -- directory %s "
"already exists!",
module_dir)
sys.exit(1)
os.mkdir(module_dir)
| 40.669231
| 147
| 0.535149
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.