content stringlengths 5 1.05M |
|---|
import os
from dataclasses import dataclass
from types import ModuleType
from typing import Any, Dict, Optional
from basis import DataspaceCfg, Environment, GraphCfg
from dcp.storage.database.utils import get_tmp_sqlite_db_url
from dcp.storage.file_system import get_tmp_local_file_url
from dcp.utils.common import rand_str
def ensure_var(
varname: str, interactive: bool = True, default: Any = None
) -> Optional[str]:
var = os.environ.get(varname)
if var is not None:
return var
if interactive:
var = input(f"Enter {varname} [{default}]: ")
if var is None:
var = default
return var
@dataclass
class TestImporter:
function_key: str
module: ModuleType = None
params: Dict = None
params_from_env: Dict = None
expected_records_cnt: int = None
expected_records_field: str = None
def get_params(self, interactive: bool):
params = (self.params or {}).copy()
for name, envvar in (self.params_from_env or {}).items():
var = ensure_var(envvar, interactive)
if var is not None:
params[name] = var
return params
def run(self, interactive: bool = False):
storage = get_tmp_sqlite_db_url()
file_storage = get_tmp_local_file_url()
env = Environment(
DataspaceCfg(metadata_storage="sqlite://", storages=[storage, file_storage])
)
if self.module is not None:
env.add_module(self.module)
# Initial graph
n = GraphCfg(
key=self.function_key + rand_str(6),
function=self.function_key,
params=self.get_params(interactive),
)
g = GraphCfg(nodes=[n])
results = env.produce(n.key, g, execution_timelimit_seconds=1)
records = results[0].stdout().as_records()
if self.expected_records_cnt is not None:
assert len(records) >= self.expected_records_cnt
if self.expected_records_field is not None:
assert self.expected_records_field in records[0]
def __call__(self):
self.run(interactive=False)
|
from neuron import h
class TransformMC1:
def __init__(self):
# Create a section lookup by section name
# Note: this assumes each section has a unique name
self.name2section = { sec.name(): sec for sec in h.allsec() }
# This will store the new section coordinates
self.section_coords = { }
def set_coords(self, sec_name):
# Lookup the section
nrn_section = self.name2section[sec_name]
# Lookup its new coords
new_coords = self.section_coords[sec_name]
# Use 3D points as section L and diam sources
h.pt3dconst(1, sec=nrn_section)
# Clear the existing points - and allocate room for the incoming points
h.pt3dclear(len(new_coords["diam"]), sec=nrn_section)
# Use vectorization to add the points to section
xvec = h.Vector(new_coords["x"])
yvec = h.Vector(new_coords["y"])
zvec = h.Vector(new_coords["z"])
dvec = h.Vector(new_coords["diam"])
h.pt3dadd(xvec, yvec, zvec, dvec, sec=nrn_section)
def set_all(self):
for sec_name in self.section_coords.keys():
self.set_coords(sec_name)
@staticmethod
def apply_on(prefix):
t = TransformMC1()
t.define_coords(prefix)
t.set_all()
@staticmethod
def apply():
t = TransformMC1()
t.define_coords()
t.set_all()
def define_coords(self, prefix = 'MC1[0]'):
if prefix != '':
prefix += '.'
self.section_coords = {
prefix + 'apic[22]': {
'x':[-478.310,-473.910,-479.128,-480.820,-481.712,-482.697,-488.235,-494.884],
'y':[700.308,692.069,683.629,679.457,675.050,671.268,663.419,659.395],
'z':[-489.135,-490.183,-493.178,-497.312,-500.440,-504.077,-505.843,-509.395],
'diam':[1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076]
},
prefix + 'dend[12]': {
'x':[-332.074,-335.122,-337.470,-334.026,-335.372,-336.116,-335.046,-334.488,-333.104,-334.898,-334.526,-333.471,-331.931,-333.435,-333.683,-333.639,-334.643,-335.863,-338.532,-340.624,-342.617,-345.516,-349.429,-351.803,-353.352,-353.786,-356.489,-359.387,-357.993,-359.280,-359.595,-361.638,-364.265,-363.180,-365.583,-365.255,-366.614,-368.594,-368.828,-371.963,-375.561,-378.475,-382.256,-386.725,-391.489,-396.120,-398.301,-400.798,-405.904,-405.091,-405.403,-403.801,-401.655,-399.122,-399.227,-399.849,-400.018,-399.957],
'y':[760.940,765.029,767.616,773.804,778.662,783.085,787.996,792.020,797.337,801.739,805.895,812.741,816.808,820.572,823.917,825.460,827.151,830.198,835.009,836.609,838.050,837.166,833.939,833.167,832.384,832.197,831.009,828.087,827.374,825.159,823.448,819.929,818.596,819.008,816.476,813.456,812.405,811.690,808.844,808.125,806.484,806.452,806.560,803.896,803.033,806.363,809.949,811.117,810.305,805.466,801.455,798.608,796.801,793.850,794.996,797.127,797.138,795.392],
'z':[-458.932,-454.293,-448.068,-443.909,-440.725,-436.734,-434.620,-429.662,-424.960,-421.570,-417.050,-415.355,-410.325,-405.849,-399.242,-393.811,-387.575,-382.475,-377.788,-371.883,-367.236,-363.026,-358.474,-353.873,-347.125,-341.159,-335.050,-328.369,-323.196,-314.724,-309.432,-304.323,-299.437,-293.984,-289.501,-284.243,-279.263,-274.667,-269.060,-264.167,-256.882,-252.325,-248.576,-243.451,-238.386,-232.730,-225.914,-221.276,-218.627,-215.114,-211.890,-207.826,-201.612,-194.902,-188.686,-183.452,-178.174,-172.950],
'diam':[2.517,2.517,2.153,1.614,1.614,1.440,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,0.902,0.902,0.902,0.902,0.902]
},
prefix + 'dend[8]': {
'x':[-390.475,-394.402,-399.664,-405.171,-411.162,-417.146,-423.462,-423.877,-425.844,-424.928,-422.396,-420.759,-417.856,-416.577,-413.955,-412.866,-412.101,-412.026,-413.202,-413.813,-415.319,-417.481,-418.966,-421.350,-423.513,-427.707,-430.626,-432.907,-435.617,-439.260,-442.806,-445.759,-448.499,-451.792,-454.884,-459.519,-462.975,-467.437,-471.808,-475.821,-477.723,-479.201,-478.039,-481.903,-479.041,-473.953,-470.646,-469.893,-467.105,-464.619,-464.460,-462.773,-461.973,-460.515,-460.587,-462.688,-464.739,-463.564,-465.215,-467.413,-469.472,-470.021,-468.989,-470.034,-473.437,-474.278,-476.715,-477.355,-476.903,-476.979,-477.684,-478.552],
'y':[790.922,798.135,801.341,805.548,809.674,809.277,810.112,815.330,818.027,821.501,826.741,831.666,834.689,839.028,843.256,849.066,853.879,862.510,869.394,875.233,880.112,884.271,890.879,895.558,900.953,904.359,909.398,914.443,919.655,924.324,930.170,935.085,939.706,945.265,948.747,951.458,956.813,960.740,965.269,968.591,973.243,979.969,984.990,990.185,995.408,999.621,1003.302,1007.887,1011.464,1016.616,1022.266,1026.352,1031.292,1036.125,1041.495,1047.572,1054.175,1059.418,1067.043,1075.022,1082.888,1089.152,1096.322,1102.874,1107.845,1113.629,1119.802,1127.956,1136.440,1143.206,1149.015,1155.129],
'z':[-480.436,-481.276,-480.765,-480.828,-479.838,-476.852,-471.718,-473.412,-468.220,-463.380,-464.067,-462.034,-457.401,-454.487,-452.170,-449.851,-448.425,-446.597,-445.537,-443.945,-438.714,-436.950,-434.216,-433.990,-433.607,-433.776,-433.253,-433.970,-433.472,-433.421,-434.530,-435.203,-434.432,-433.891,-436.091,-439.061,-440.165,-441.373,-442.703,-442.483,-441.738,-440.985,-439.410,-440.662,-437.753,-437.007,-433.463,-429.781,-425.800,-423.769,-419.334,-414.382,-411.104,-408.430,-406.919,-406.074,-406.328,-404.605,-402.102,-401.483,-401.894,-400.921,-401.539,-401.669,-402.545,-402.325,-403.137,-402.674,-401.103,-397.404,-396.103,-395.693],
'diam':[2.691,1.440,1.440,1.076,1.076,1.076,1.076,1.614,1.614,1.614,1.440,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250]
},
prefix + 'dend[9]': {
'x':[-478.552,-477.555,-478.545,-480.681,-484.841,-489.349,-497.127,-503.697,-509.159,-513.828,-515.357,-519.750,-521.096,-522.680,-524.031,-522.912,-524.781,-525.192,-524.800,-526.322,-528.651],
'y':[1155.129,1160.310,1165.736,1172.098,1175.723,1178.852,1178.892,1182.462,1184.581,1186.737,1192.022,1197.714,1205.017,1210.095,1217.282,1224.136,1228.479,1235.805,1241.493,1246.920,1253.357],
'z':[-395.693,-392.323,-390.244,-389.636,-390.816,-391.264,-391.834,-389.545,-389.677,-388.921,-388.641,-389.862,-392.130,-392.894,-393.604,-393.380,-388.019,-384.285,-382.522,-381.991,-381.307],
'diam':[1.250,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364,0.364]
},
prefix + 'apic[6]': {
'x':[-515.027,-521.342,-525.816],
'y':[737.309,735.293,732.106],
'z':[-499.252,-502.890,-505.069],
'diam':[3.055,2.327,2.327]
},
prefix + 'apic[0]': {
'x':[-339.843,-346.067,-351.058,-357.406,-363.357,-369.045,-375.506],
'y':[761.964,763.947,766.023,767.853,768.708,767.271,769.855],
'z':[-472.154,-472.109,-473.659,-474.003,-476.665,-478.824,-477.518],
'diam':[7.898,6.458,5.920,5.208,5.208,3.941,4.669]
},
prefix + 'apic[18]': {
'x':[-539.227,-541.874,-541.006],
'y':[701.369,696.662,689.541],
'z':[-517.068,-520.801,-524.030],
'diam':[1.076,0.712,0.712]
},
prefix + 'dend[0]': {
'x':[-336.265,-337.963,-343.549,-346.598,-345.087,-344.980,-345.237,-347.148,-349.967,-351.052,-351.342,-354.338],
'y':[757.228,756.012,756.459,756.695,756.371,757.987,760.357,760.138,760.635,761.541,762.063,764.449],
'z':[-476.711,-482.393,-487.033,-493.643,-500.130,-505.236,-511.779,-520.266,-527.587,-532.807,-538.837,-543.610],
'diam':[4.131,2.865,2.865,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.327]
},
prefix + 'dend[1]': {
'x':[-354.338,-358.094,-361.587,-362.196,-362.924,-361.775,-363.680,-364.124,-366.347,-366.918,-366.104,-367.061,-368.465,-368.200,-369.437,-369.568,-371.334,-371.839,-372.172,-372.883,-376.326,-378.782,-380.091,-381.748,-382.164,-384.185,-386.557,-387.574,-388.094,-390.059,-387.287,-388.266,-387.520,-385.552,-384.566,-383.571,-379.779,-377.898,-377.911,-379.154,-381.102,-380.490,-380.121,-383.783,-384.515,-384.698,-388.393,-393.839,-397.464,-398.228,-396.544,-398.612,-396.234,-397.668,-395.011,-394.539,-392.143,-390.564,-388.433,-388.927,-386.500,-388.819,-389.820,-388.683,-386.609,-383.811,-379.850,-381.662,-378.718,-376.449,-372.824,-371.498,-369.308,-367.801,-361.427,-356.565,-351.572,-346.634,-342.479,-336.503,-332.282,-327.880,-324.039,-318.323,-312.003,-307.683,-303.090,-298.666,-295.103,-292.032,-289.811,-287.930,-286.114,-283.776,-281.309,-280.224,-277.028,-272.262,-271.981,-271.869,-270.124,-271.689,-275.247,-276.450,-275.941,-278.613,-278.506,-277.280,-273.519,-270.046,-265.904,-262.503,-259.288,-255.906,-252.399,-247.025,-241.976,-238.496,-233.564,-230.603,-228.438,-225.596,-221.312,-215.477,-210.706],
'y':[764.449,770.724,775.491,777.484,778.889,779.901,780.168,781.008,782.114,782.606,782.879,782.674,783.700,785.125,786.535,788.622,790.096,789.757,790.634,790.023,791.387,792.572,793.841,794.216,794.250,797.827,800.310,802.086,803.860,806.891,808.074,810.241,811.398,812.387,813.269,813.876,815.405,817.911,818.768,822.369,825.502,829.120,833.199,835.862,839.910,843.275,847.724,852.373,854.183,859.893,865.813,870.861,874.831,878.886,884.922,890.006,893.306,895.583,898.782,901.857,905.865,908.744,912.898,916.538,919.525,922.290,924.731,927.517,929.480,932.931,936.846,940.925,944.331,948.560,948.672,951.419,955.088,959.955,962.925,964.995,966.958,967.316,967.337,965.640,965.361,967.444,970.082,971.011,973.290,975.804,978.544,982.536,984.808,988.363,992.330,996.459,997.963,997.826,999.216,1000.565,1002.935,1003.282,1007.912,1011.932,1013.107,1014.941,1013.529,1009.733,1007.495,1005.932,1006.645,1005.616,1006.440,1006.016,1005.364,1004.620,1003.044,1000.338,999.922,995.738,991.467,991.056,989.427,985.883,984.413],
'z':[-543.610,-550.406,-551.353,-558.800,-565.421,-575.176,-580.729,-586.629,-591.625,-596.583,-601.731,-608.681,-613.554,-619.957,-625.407,-629.992,-638.200,-644.340,-649.708,-655.745,-659.151,-663.628,-669.142,-675.001,-680.805,-686.221,-691.271,-696.917,-702.251,-706.282,-725.916,-730.532,-737.813,-750.052,-761.574,-769.590,-786.490,-800.917,-809.947,-814.431,-819.119,-823.424,-827.614,-830.582,-834.024,-837.732,-840.921,-843.972,-847.250,-850.215,-853.629,-856.316,-859.570,-864.043,-866.538,-869.099,-872.471,-876.842,-881.137,-886.854,-892.044,-895.541,-898.786,-903.335,-908.972,-916.105,-919.002,-923.271,-928.645,-934.695,-938.966,-942.231,-945.190,-950.463,-950.455,-952.855,-954.807,-956.893,-958.110,-959.681,-963.919,-967.277,-971.174,-973.487,-978.144,-981.795,-983.337,-986.272,-989.056,-992.248,-996.942,-1001.178,-1005.604,-1008.982,-1012.429,-1019.376,-1023.218,-1026.876,-1032.302,-1038.541,-1043.755,-1050.361,-1051.629,-1056.868,-1063.226,-1068.811,-1074.535,-1080.325,-1085.766,-1089.099,-1092.560,-1100.249,-1104.871,-1109.588,-1114.794,-1117.665,-1115.257,-1118.517,-1121.526,-1124.658,-1127.251,-1131.725,-1134.081,-1134.611,-1137.162],
'diam':[2.327,1.789,1.789,1.789,1.789,1.789,2.327,2.327,2.153,2.153,2.153,2.153,2.153,2.153,2.153,2.153,2.153,2.153,2.153,2.153,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.789,1.789,1.789,1.979,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.789,1.614,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.076,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,1.076,1.076,1.076,1.076,1.076,1.076,1.076]
},
prefix + 'apic[16]': {
'x':[-530.361,-533.910,-540.763,-546.446],
'y':[720.547,724.071,724.346,725.914],
'z':[-512.543,-516.632,-518.541,-520.265],
'diam':[1.076,1.076,1.076,1.076]
},
prefix + 'apic[1]': {
'x':[-375.506,-378.695,-386.283,-391.133,-397.016,-402.843,-410.018,-416.624,-423.041,-428.370,-433.772,-439.534,-444.849,-452.807,-457.691,-462.346,-468.116,-472.627,-479.574,-484.534],
'y':[769.855,767.895,764.506,762.773,762.015,759.465,758.486,756.097,754.228,751.879,749.107,745.875,743.911,739.912,736.930,734.384,733.883,733.327,733.019,731.758],
'z':[-477.518,-481.120,-482.387,-484.646,-484.741,-484.526,-486.373,-486.416,-487.863,-489.990,-491.637,-492.993,-494.080,-493.159,-493.339,-495.133,-492.292,-490.069,-489.018,-490.102],
'diam':[4.669,4.131,3.593,3.055,3.055,3.055,3.403,4.479,4.479,4.479,4.479,4.479,4.479,4.305,4.305,4.305,4.305,4.305,4.305,4.305]
},
prefix + 'axon': {
'x':[-334.143,-336.973,-336.895,-336.172],
'y':[753.161,747.220,739.752,734.825],
'z':[-467.986,-465.427,-461.929,-460.650],
'diam':[2.517,2.517,2.327,2.327]
},
prefix + 'dend[13]': {
'x':[-332.074,-321.158,-315.643,-313.868,-296.935,-289.703],
'y':[760.940,759.237,759.513,760.594,758.727,757.838],
'z':[-458.932,-461.537,-462.128,-455.225,-457.217,-455.679],
'diam':[2.517,1.250,1.250,1.250,1.076,1.076]
},
prefix + 'dend[7]': {
'x':[-378.909,-373.228,-370.413,-365.892,-363.587,-362.079,-361.781,-357.832,-355.377,-351.929,-347.559,-342.624,-337.458,-328.093],
'y':[1180.806,1187.333,1193.030,1199.661,1206.096,1212.982,1219.707,1223.531,1227.181,1231.845,1239.159,1247.697,1251.279,1255.221],
'z':[-681.811,-684.507,-685.094,-685.554,-685.479,-687.608,-687.548,-686.648,-690.526,-697.750,-702.858,-705.089,-705.763,-718.295],
'diam':[1.979,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614]
},
prefix + 'apic[26]': {
'x':[-501.271,-502.082,-504.372,-507.412,-509.073,-509.124,-509.189,-508.860],
'y':[718.724,713.906,709.478,702.928,697.012,690.212,683.860,676.745],
'z':[-509.074,-510.435,-516.547,-519.357,-521.451,-522.843,-522.332,-520.460],
'diam':[1.076,1.250,1.250,1.076,1.076,1.076,1.076,1.076]
},
prefix + 'apic[12]': {
'x':[-504.780,-509.430,-512.883,-517.227,-518.988,-521.271,-524.017],
'y':[734.618,727.051,723.638,720.781,717.455,713.491,710.301],
'z':[-491.919,-493.390,-495.264,-496.654,-502.683,-505.627,-509.218],
'diam':[4.305,3.055,2.865,2.865,2.865,2.327,1.979]
},
prefix + 'soma': {
'x':[-334.597,-334.905,-335.212],
'y':[753.946,758.606,763.266],
'z':[-466.908,-471.251,-475.594],
'diam':[12.755,12.755,12.755]
},
prefix + 'apic[25]': {
'x':[-501.271,-504.101,-510.554,-515.969,-521.736,-528.584,-533.687,-540.055],
'y':[718.724,718.086,714.439,712.818,712.215,713.746,713.928,713.347],
'z':[-509.074,-513.734,-516.916,-521.053,-524.211,-526.547,-527.641,-528.745],
'diam':[1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076]
},
prefix + 'apic[7]': {
'x':[-525.816,-531.943,-537.738,-546.092,-549.452,-552.296],
'y':[732.106,734.236,735.808,742.473,746.135,750.897],
'z':[-505.069,-510.392,-512.645,-512.836,-517.976,-522.391],
'diam':[2.327,0.902,0.902,0.902,0.902,0.902]
},
prefix + 'apic[17]': {
'x':[-529.073,-534.721,-539.227],
'y':[708.488,703.546,701.369],
'z':[-511.975,-515.550,-517.068],
'diam':[1.614,1.076,1.076]
},
prefix + 'apic[5]': {
'x':[-515.027,-516.400,-515.512,-518.515],
'y':[737.309,742.713,750.261,752.597],
'z':[-499.252,-501.463,-499.129,-495.251],
'diam':[3.055,1.250,1.250,1.250]
},
prefix + 'dend[10]': {
'x':[-478.552,-480.069,-480.630,-482.752,-484.683,-484.208,-485.443,-489.196,-493.320,-495.903,-498.671,-502.730,-506.900,-507.709,-511.104,-512.570,-514.586,-517.615,-520.847,-522.544,-525.453,-527.028,-527.854,-524.890,-523.311,-522.077,-519.841,-517.689],
'y':[1155.129,1165.676,1171.301,1178.136,1185.806,1190.912,1197.197,1203.632,1207.166,1211.554,1216.384,1224.275,1230.468,1238.243,1246.450,1253.035,1258.012,1265.934,1271.494,1277.241,1286.334,1291.990,1299.174,1304.822,1308.635,1314.810,1319.421,1324.950],
'z':[-395.693,-392.396,-389.099,-388.026,-387.093,-386.155,-386.117,-386.875,-388.516,-390.722,-391.229,-392.322,-394.063,-393.202,-394.620,-395.394,-395.209,-394.206,-394.869,-394.112,-393.505,-392.276,-391.847,-390.243,-386.782,-386.737,-386.329,-384.748],
'diam':[1.250,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902]
},
prefix + 'apic[2]': {
'x':[-484.534,-490.107,-495.470,-501.749],
'y':[731.758,734.349,737.066,737.771],
'z':[-490.102,-492.443,-494.052,-494.520],
'diam':[4.305,3.941,4.305,4.305]
},
prefix + 'apic[14]': {
'x':[-529.073,-530.161,-530.361],
'y':[708.488,714.965,720.547],
'z':[-511.975,-513.432,-512.543],
'diam':[1.614,1.076,1.076]
},
prefix + 'dend[3]': {
'x':[-336.589,-341.902,-342.304,-344.050,-346.743,-348.858,-350.320,-351.016,-355.986,-358.478,-359.843,-361.604,-363.207,-364.659,-368.001,-368.298,-368.911,-370.477,-373.056,-376.461,-376.136,-378.540,-381.362,-383.238,-385.722,-388.188,-389.339,-390.700,-389.033,-387.341,-386.127,-383.611,-383.792,-382.310,-382.390,-381.018,-382.869,-384.701,-384.169,-382.150,-378.348,-375.522,-373.443,-371.581,-369.859,-370.905,-367.857,-367.573,-366.218,-362.972,-359.341,-344.590,-345.399,-346.107,-350.173,-353.285,-355.382,-356.483,-354.030,-356.805,-355.317,-357.923,-359.438,-357.653,-354.741,-354.887,-355.322,-355.720,-354.505,-353.481,-351.202,-349.950,-349.910,-349.675,-348.941,-350.449,-351.123,-349.083,-346.880,-344.249,-343.581,-343.519,-344.199,-340.890,-337.709,-334.868,-330.892,-329.667,-327.535,-325.139,-324.197,-321.557,-318.557,-315.120,-314.855,-316.762,-317.450,-319.553,-321.061,-323.935,-322.733,-322.252,-318.872,-317.585,-318.493,-319.260,-322.021,-325.105,-327.515,-326.212,-325.570,-327.424,-329.128],
'y':[752.141,746.321,741.014,734.343,731.071,726.517,723.353,717.227,712.573,707.939,702.998,702.769,702.388,700.296,700.494,697.405,698.435,696.563,693.066,691.779,688.221,688.743,689.793,686.813,686.643,683.565,680.802,675.690,671.213,667.342,661.185,655.971,652.225,648.744,644.708,640.927,638.237,635.050,629.030,624.698,619.765,616.162,614.175,609.478,606.443,604.128,600.080,599.357,597.309,594.066,590.961,577.790,574.409,573.227,569.656,568.283,565.217,562.072,559.115,556.938,557.047,552.988,549.391,545.228,537.892,533.386,528.517,522.835,516.572,508.344,502.385,497.583,490.650,485.108,477.822,470.342,465.298,459.270,452.925,447.943,443.184,437.978,432.717,428.048,423.779,418.973,414.761,411.075,406.935,404.344,399.639,395.258,390.806,386.763,379.773,375.428,369.602,363.119,357.075,353.137,348.860,344.083,338.717,333.411,327.916,323.658,318.628,315.193,312.547,309.088,303.717,302.445,301.345],
'z':[-472.174,-475.355,-480.126,-484.195,-489.331,-494.121,-497.746,-499.389,-502.833,-504.930,-512.608,-517.338,-522.226,-529.022,-534.517,-540.763,-545.839,-552.248,-556.893,-561.342,-565.444,-570.110,-574.403,-579.142,-583.940,-587.704,-592.426,-597.540,-601.153,-603.873,-609.092,-613.498,-617.587,-622.116,-627.507,-633.260,-639.145,-643.328,-647.987,-654.044,-658.082,-662.010,-666.815,-671.825,-675.652,-683.980,-696.342,-701.460,-707.915,-715.782,-725.861,-759.782,-763.594,-769.635,-775.686,-781.130,-785.310,-790.159,-793.690,-797.575,-804.249,-805.791,-809.046,-813.121,-813.765,-816.229,-818.746,-820.417,-819.216,-817.438,-816.792,-814.671,-812.947,-811.950,-813.371,-814.134,-813.850,-814.560,-814.039,-815.340,-818.207,-819.498,-821.687,-822.911,-824.390,-822.472,-828.485,-832.647,-835.306,-839.046,-841.568,-843.379,-842.851,-844.184,-846.170,-848.675,-848.406,-849.754,-852.445,-855.953,-858.981,-860.762,-861.056,-858.438,-856.810,-859.991,-862.758,-866.091,-869.634,-873.850,-875.394,-880.711,-885.607],
'diam':[2.517,2.153,1.979,1.789,1.789,1.789,1.789,1.614,1.614,1.440,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.789,1.789,1.789,1.789,1.789,1.789,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,1.250,1.250,1.250,1.250,1.076,1.076,1.076,1.076,1.076,1.076,1.076,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.712,0.712]
},
prefix + 'apic[4]': {
'x':[-504.780,-508.974,-515.027],
'y':[734.618,736.030,737.309],
'z':[-491.919,-495.191,-499.252],
'diam':[4.305,3.055,3.055]
},
prefix + 'apic[13]': {
'x':[-524.017,-529.073],
'y':[710.301,708.489],
'z':[-509.218,-511.976],
'diam':[1.979,1.614]
},
prefix + 'apic[23]': {
'x':[-478.310,-480.882,-478.807,-477.921,-478.452],
'y':[700.308,698.938,695.449,687.449,682.780],
'z':[-489.135,-493.519,-497.874,-502.783,-507.198],
'diam':[1.076,1.076,1.076,1.076,1.076]
},
prefix + 'apic[3]': {
'x':[-501.749,-504.780],
'y':[737.771,734.618],
'z':[-494.520,-491.919],
'diam':[4.305,4.305]
},
prefix + 'dend[2]': {
'x':[-354.338,-357.128,-360.450,-363.953,-368.558,-373.550,-375.585,-377.924,-381.582,-385.135,-390.377,-393.212,-394.709,-395.086,-394.928,-391.569,-387.758,-383.724,-382.446,-380.358,-379.538,-379.510,-380.333,-377.969,-377.198,-376.008,-373.169,-373.053,-373.981,-373.901,-373.414,-374.059,-374.340,-369.815,-373.219,-372.019,-370.023,-367.707,-367.588,-367.162,-366.152,-365.247,-365.782,-365.810,-365.006,-365.119,-365.643,-366.393,-368.365,-368.639,-364.012,-362.837,-363.344,-362.951,-362.464,-363.351,-363.636,-364.836,-366.898,-368.514,-366.763,-369.298,-370.458,-372.996,-375.325,-375.863,-379.022,-382.262,-382.750,-383.284,-380.564,-376.461,-374.719,-369.403,-365.119,-363.085,-358.908,-352.556,-353.495,-351.527,-350.397,-353.199,-355.251,-358.766,-363.035,-364.668,-364.876,-359.895,-357.368,-355.195,-353.927,-353.335,-355.112,-355.825,-355.596,-356.250,-358.349,-361.840,-361.267,-360.990,-359.968,-356.761,-353.832,-349.871,-347.432,-347.928,-346.767,-344.858,-342.205,-339.316,-337.575,-335.864,-331.930,-330.901,-325.813,-320.567,-316.456,-311.151,-304.723,-300.016],
'y':[764.449,760.249,757.431,752.499,749.647,743.674,736.963,731.628,728.190,725.603,725.390,723.334,721.229,718.979,719.135,719.345,719.741,720.829,721.980,720.728,722.529,721.769,720.061,718.970,717.668,715.802,714.723,711.925,707.221,704.763,701.509,696.839,692.873,690.466,686.052,680.366,678.349,677.379,675.369,677.021,674.726,672.735,669.499,664.198,659.014,656.034,649.288,645.228,640.271,636.017,632.676,627.809,622.912,616.676,611.424,606.910,601.983,595.672,589.348,586.769,580.440,576.769,571.256,566.366,562.488,558.721,559.204,556.582,554.330,552.768,548.659,548.505,545.530,541.651,539.629,536.621,534.494,530.083,524.789,520.167,516.208,511.932,506.245,503.891,504.279,502.394,501.488,499.963,497.570,492.692,487.301,482.722,478.890,474.361,472.480,467.352,463.209,462.000,457.963,452.001,447.502,446.727,447.202,447.191,449.972,449.121,444.728,442.703,438.520,434.894,431.731,434.764,432.028,427.863,425.583,424.209,422.524,422.541,424.043,425.785],
'z':[-543.610,-547.397,-554.138,-556.909,-559.275,-559.771,-561.586,-565.193,-568.390,-571.090,-576.212,-581.926,-587.263,-592.027,-598.722,-605.379,-608.666,-612.386,-617.630,-626.455,-632.434,-637.886,-643.384,-649.443,-655.392,-663.864,-670.293,-675.275,-678.195,-682.827,-688.020,-691.827,-696.668,-754.075,-756.656,-758.230,-763.048,-768.361,-774.148,-780.799,-785.979,-793.518,-798.382,-802.789,-807.250,-813.043,-814.017,-817.282,-820.871,-825.055,-828.542,-831.184,-834.332,-838.057,-841.481,-845.005,-848.230,-851.854,-853.027,-857.757,-861.353,-866.985,-868.102,-871.820,-874.887,-880.396,-885.698,-890.337,-895.906,-901.429,-905.643,-909.393,-913.675,-914.613,-919.343,-922.847,-926.028,-928.860,-933.226,-935.217,-938.972,-940.853,-943.186,-948.335,-952.555,-957.505,-963.313,-965.359,-970.134,-972.163,-974.289,-979.874,-984.351,-989.020,-994.552,-996.691,-1002.138,-1007.310,-1010.343,-1010.722,-1013.624,-1017.925,-1024.919,-1032.135,-1037.982,-1044.917,-1048.111,-1053.837,-1057.346,-1060.840,-1065.861,-1071.054,-1076.443,-1079.348,-1080.628,-1085.002,-1087.841,-1088.971,-1089.594,-1091.919],
'diam':[2.327,1.979,1.789,1.789,1.789,1.614,1.614,1.614,1.614,1.440,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.789,1.789,1.789,1.789,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.614,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.440,1.250,1.614,1.614,1.614,1.614,1.440,1.440,1.440,1.440,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.250,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076,1.076]
},
prefix + 'dend[11]': {
'x':[-332.389,-332.074],
'y':[758.995,760.940],
'z':[-465.383,-458.932],
'diam':[2.865,2.517]
},
prefix + 'apic[9]': {
'x':[-525.816,-527.596],
'y':[732.106,725.494],
'z':[-505.069,-504.422],
'diam':[2.327,1.979]
},
prefix + 'apic[19]': {
'x':[-539.227,-546.106,-550.624,-556.157],
'y':[701.369,710.038,716.733,721.673],
'z':[-517.068,-520.931,-520.437,-520.698],
'diam':[1.076,0.712,0.712,0.712]
},
prefix + 'apic[15]': {
'x':[-530.361,-526.493],
'y':[720.547,724.804],
'z':[-512.543,-509.798],
'diam':[1.076,1.076]
},
prefix + 'apic[20]': {
'x':[-524.017,-522.920,-521.888],
'y':[710.301,703.155,697.655],
'z':[-509.218,-513.718,-514.177],
'diam':[1.979,1.440,1.440]
},
prefix + 'apic[10]': {
'x':[-527.596,-531.418,-533.904,-535.420,-539.815,-543.304,-547.482,-552.613],
'y':[725.494,718.233,713.208,706.430,701.599,699.076,696.733,693.462],
'z':[-504.422,-508.256,-508.995,-514.265,-516.356,-519.014,-521.826,-520.442],
'diam':[1.979,0.538,0.538,0.538,0.538,0.538,0.538,0.364]
},
prefix + 'apic[11]': {
'x':[-527.596,-527.859,-528.627,-532.772],
'y':[725.494,722.007,717.655,711.800],
'z':[-504.422,-500.211,-497.459,-494.881],
'diam':[1.979,1.440,1.440,1.440]
},
prefix + 'dend[5]': {
'x':[-390.475,-388.312,-390.379,-392.826,-394.880,-397.620,-399.292,-401.077,-400.976,-401.245,-401.296,-400.424,-400.031,-402.457,-398.453,-397.144,-398.483,-400.722,-402.495,-405.613,-405.921,-406.191,-408.168,-410.520,-412.537,-415.473,-418.882,-422.138,-424.170,-428.081,-431.348,-438.446,-443.809,-444.293,-439.884,-435.900,-429.900,-425.778,-426.379,-426.436,-425.220,-425.336,-424.636,-424.955,-426.442,-423.284,-422.643,-418.893,-417.299,-415.634,-415.182,-416.399,-413.901,-410.674,-408.944,-407.269,-405.320,-401.866,-399.459,-398.533,-395.398,-394.074,-393.300,-396.391,-397.673,-398.327,-396.660,-394.497,-392.168,-389.630,-388.880,-386.500,-383.440,-378.909],
'y':[790.922,794.530,801.913,806.555,811.178,820.422,826.948,833.005,838.718,843.896,849.322,855.419,861.171,866.162,868.853,873.384,875.784,879.756,886.096,890.848,897.764,902.708,906.952,910.310,915.961,918.297,921.009,927.092,930.176,936.269,939.826,947.491,950.558,954.155,958.209,963.010,968.487,972.227,977.865,983.498,991.355,995.496,1000.999,1006.388,1012.048,1017.537,1021.916,1027.022,1033.775,1040.213,1044.817,1048.858,1052.723,1058.180,1063.011,1068.316,1073.676,1080.693,1088.734,1094.585,1100.310,1111.176,1116.704,1124.182,1130.113,1135.479,1141.450,1146.631,1156.846,1160.813,1166.739,1171.580,1177.189,1180.806],
'z':[-480.436,-488.070,-492.698,-494.556,-496.545,-498.910,-501.933,-504.418,-506.801,-508.824,-511.797,-513.333,-516.185,-519.699,-528.427,-530.387,-535.509,-538.144,-543.986,-545.238,-546.876,-548.623,-554.816,-559.036,-564.871,-568.491,-571.248,-577.612,-582.260,-587.420,-590.879,-595.162,-594.666,-599.791,-603.803,-607.124,-618.284,-626.954,-629.364,-634.370,-637.621,-641.586,-643.992,-645.837,-650.005,-651.689,-655.004,-657.910,-661.333,-664.404,-666.869,-669.697,-671.771,-671.897,-672.344,-672.253,-672.624,-675.265,-676.553,-677.961,-679.519,-681.687,-682.047,-684.004,-685.205,-685.852,-686.517,-684.733,-684.667,-682.904,-682.304,-683.353,-684.499,-681.811],
'diam':[2.691,2.327,2.327,2.327,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,1.789,2.865,2.865,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.327,2.153,2.153,2.153,2.153,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979,1.979]
},
prefix + 'dend[6]': {
'x':[-378.909,-378.619,-378.559,-380.549,-381.536,-383.208,-385.196,-384.194,-382.475],
'y':[1180.806,1187.058,1193.652,1200.357,1206.409,1211.165,1217.116,1224.081,1231.409],
'z':[-681.811,-681.509,-682.777,-680.750,-682.297,-680.578,-681.378,-685.787,-685.867],
'diam':[1.979,1.250,1.250,0.902,0.902,0.902,0.902,0.902,0.902]
},
prefix + 'apic[24]': {
'x':[-484.534,-488.002,-492.845,-494.593,-497.454,-501.271],
'y':[731.758,728.106,724.770,722.603,719.961,718.724],
'z':[-490.102,-492.074,-496.614,-501.321,-504.923,-509.074],
'diam':[4.305,1.440,1.440,1.440,1.440,1.076]
},
prefix + 'apic[21]': {
'x':[-501.749,-497.544,-493.685,-490.383,-486.721,-484.163,-480.201,-478.310],
'y':[737.771,733.189,729.741,722.660,717.330,712.159,706.487,700.308],
'z':[-494.520,-492.841,-491.674,-488.576,-488.143,-489.231,-488.845,-489.135],
'diam':[4.305,1.614,1.250,1.250,1.250,1.250,1.250,1.076]
},
prefix + 'dend[4]': {
'x':[-375.506,-379.739,-380.439,-383.906,-390.475],
'y':[769.855,774.046,778.558,785.461,790.922],
'z':[-477.518,-479.623,-482.732,-484.504,-480.436],
'diam':[4.669,3.403,3.403,3.055,2.691]
},
prefix + 'apic[8]': {
'x':[-525.816,-529.711,-533.669,-537.614,-543.397,-546.150,-549.542,-548.527,-547.847,-547.572,-547.381],
'y':[732.106,729.225,726.106,722.247,718.313,712.665,706.624,700.751,695.727,691.145,686.462],
'z':[-505.069,-507.035,-506.823,-507.615,-507.827,-506.072,-504.548,-500.286,-499.008,-501.822,-503.689],
'diam':[2.327,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902,0.902]
},
}
|
# -*- coding: utf-8 -*-
from numpy import arange as npArange
from numpy import log as npLog
from numpy import sqrt as npSqrt
from pandas import DataFrame, Series, Timedelta
from ._core import verify_series
from ._time import total_time
from ._math import linear_regression
from pandas_ta.performance import drawdown, log_return, percent_return
def cagr(close: Series) -> float:
"""Compounded Annual Growth Rate"""
close = verify_series(close)
start, end = close.iloc[0], close.iloc[-1]
return ((end / start) ** (1 / total_time(close))) - 1
def calmar_ratio(close: Series, method: str = "percent", years: int = 3, log: bool = False) -> float:
"""The Calmar Ratio is the percent Max Drawdown Ratio 'typically' over
the past three years."""
close = verify_series(close)
n_years_ago = close.index[-1] - Timedelta(days=365.25 * years)
close = close[close.index > n_years_ago]
return cagr(close) / max_drawdown(close, method=method)
def downside_deviation(returns: Series, benchmark_rate: float = 0.0, log: bool = False, tf: str = "years") -> float:
"""Downside Deviation for the Sortino ratio.
Benchmark rate is assumed to be annualized. Adjusted according for the
number of periods per year seen in the data."""
# For both de-annualizing the benchmark rate and annualizing result
returns = verify_series(returns)
days_per_year = returns.shape[0] / total_time(returns, tf)
adjusted_benchmark_rate = ((1 + benchmark_rate) ** (1 / days_per_year)) - 1
downside = adjusted_benchmark_rate - returns
downside_sum_of_squares = (downside[downside > 0] ** 2).sum()
downside_deviation = npSqrt(downside_sum_of_squares / (returns.shape[0] - 1))
return downside_deviation * npSqrt(days_per_year)
def jensens_alpha(returns:Series, benchmark_returns:Series) -> float:
"""Jensen's 'Alpha' of a series and a benchmark."""
returns = verify_series(returns)
benchmark_returns = verify_series(benchmark_returns)
benchmark_returns.interpolate(inplace=True)
return linear_regression(benchmark_returns, returns)["a"]
def log_max_drawdown(close:Series):
"""Log Max Drawdown of a series."""
close = verify_series(close)
log_return = npLog(close.iloc[-1]) - npLog(close.iloc[0])
return log_return - max_drawdown(close, method="log")
def max_drawdown(close: Series, method:str = None, all:bool = False) -> float:
"""Maximum Drawdown from close. Defaults to 'dollar'. """
close = verify_series(close)
max_dd = drawdown(close).max()
max_dd_ = {
"dollar": max_dd.iloc[0],
"percent": max_dd.iloc[1],
"log": max_dd.iloc[2]
}
if all: return max_dd_
if isinstance(method, str) and method in max_dd_.keys():
return max_dd_[method]
return max_dd_["dollar"]
def pure_profit_score(close:Series) -> float:
"""Pure Profit Score of a series."""
from sklearn.linear_model import LinearRegression
close = verify_series(close)
close_index = Series(0, index=close.reset_index().index)
r = linear_regression(close_index, close)["r"]
return r * cagr(close)
def sharpe_ratio(close:Series, benchmark_rate:float = 0.0, log:bool = False) -> float:
"""Sharpe Ratio of a series."""
close = verify_series(close)
returns = percent_return(close=close) if not log else log_return(close=close)
result = cagr(close) - benchmark_rate
result /= volatility(close, returns, log=log)
return result
def sortino_ratio(close:Series, benchmark_rate:float = 0.0, log:bool = False) -> float:
"""Sortino Ratio of a series."""
close = verify_series(close)
returns = percent_return(close=close) if not log else log_return(close=close)
result = cagr(close) - benchmark_rate
result /= downside_deviation(returns)
return result
def volatility(close: Series, tf:str = "years", returns:bool = False, log: bool = False, **kwargs) -> float:
"""Volatility of a series. Default: 'years'"""
close = verify_series(close)
if not returns:
returns = percent_return(close=close) if not log else log_return(close=close)
factor = returns.shape[0] / total_time(returns, tf)
if kwargs.pop("nearest_day", False) and tf.lower() == "years":
factor = int(factor + 1)
return returns.std() * npSqrt(factor) |
__author__ = 'Zhanelya'
import web
from localsys.environment import get_start_time
from localsys.environment import context
from localsys import storage
import datetime
render_globals = {
'datetime': datetime,
'get_start_time': get_start_time,
'user_id': context.user_id,
'username': context.username,
'path': storage.path
}
render = web.template.render('views/', globals=render_globals)
class spa:
def GET(self):
return render.skeleton_spa()
|
# 징검다리
# 이분탐색을 이용한 풀이
# https://programmers.co.kr/learn/courses/30/lessons/43236
def solution(distance, rocks, n):
answer = 0
left, right=1, distance
rocks.sort()
while left<=right:
mid=(left+right)//2
pre=0
delete=0
for r in rocks:
if r-pre<mid:
delete+=1
else:
pre=r
if delete>n:
break
if delete>n:
right=mid-1
elif delete<=n:
answer=mid
left=mid+1
return answer
print(solution(25,[2, 14, 11, 21, 17],2))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
if __name__ == '__main__':
from rpy.cli.dispatch import execute_from_command_line
execute_from_command_line() |
"""collections_indexes_with_flush.py: These tests validate indexer behavior for collection during bucket flush
__author__ = "Hemant Rajput"
__maintainer = "Hemant Rajput"
__email__ = "Hemant.Rajput@couchbase.com"
__git_user__ = "hrajput89"
__created_on__ = "08/09/20 12:31 pm"
"""
from concurrent.futures import ThreadPoolExecutor
from couchbase_helper.query_definitions import QueryDefinition
from .base_gsi import BaseSecondaryIndexingTests
class CollectionsIndexesWithFlush(BaseSecondaryIndexingTests):
def setUp(self):
super(CollectionsIndexesWithFlush, self).setUp()
self.log.info("============== CollectionsIndexBasics setup has started ==============")
self.rest.delete_all_buckets()
self.bucket_params = self._create_bucket_params(server=self.master, size=self.bucket_size,
replicas=self.num_replicas, bucket_type=self.bucket_type,
enable_replica_index=self.enable_replica_index,
eviction_policy=self.eviction_policy, lww=self.lww)
self.cluster.create_standard_bucket(name=self.test_bucket, port=11222,
bucket_params=self.bucket_params)
self.buckets = self.rest.get_buckets()
self.log.info("============== CollectionsIndexBasics setup has completed ==============")
def tearDown(self):
self.log.info("============== CollectionsIndexBasics tearDown has started ==============")
super(CollectionsIndexesWithFlush, self).tearDown()
self.log.info("============== CollectionsIndexBasics tearDown has completed ==============")
def suite_tearDown(self):
pass
def suite_setUp(self):
pass
def test_index_status_with_bucket_flush(self):
num_of_docs_per_collection = 10 ** 5
self.prepare_collection_for_indexing(num_of_docs_per_collection=num_of_docs_per_collection)
collection_namespace = self.namespaces[0]
index_gen = QueryDefinition(index_name='idx', index_fields=['age', 'city', 'country'])
query = index_gen.generate_index_create_query(namespace=collection_namespace, defer_build=False)
self.run_cbq_query(query=query)
self.wait_until_indexes_online()
select_query = f'select count(age) from {collection_namespace} where age >= 0'
result = self.run_cbq_query(query=select_query)['results'][0]['$1']
self.assertEqual(result, num_of_docs_per_collection, "Doc count not matching")
# Checking indexer status after bucket flush
try:
num_rollback = self.rest.get_num_rollback_stat(bucket=self.test_bucket)
self.log.info(f"No. of rollbacks: {num_rollback}")
self.cluster.bucket_flush(server=self.master, bucket=self.test_bucket, timeout=600)
self.sleep(120, "Giving some time to indexer to update indexes after flush")
index_info = self.rest.get_indexer_metadata()['status']
self.log.info(index_info)
result = self.run_cbq_query(query=select_query)['results'][0]['$1']
self.assertEqual(result, 0, "Doc count not matching")
rollback_after_flush = self.rest.get_num_rollback_stat(bucket=self.test_bucket)
self.sleep(10)
self.log.info(rollback_after_flush)
self.assertEqual(rollback_after_flush, num_rollback + 2, "Flush didn't send rollback to Zero to Indexer")
num_rollback = rollback_after_flush
# Checking indexer status after with incremental build and then flush
with ThreadPoolExecutor() as executor:
task1 = executor.submit(self.data_ops_javasdk_loader_in_batches, sdk_data_loader=self.gen_create,
batch_size=2 * 10 ** 4)
self.sleep(15, "There is 10sec delay in doc loader")
task2 = executor.submit(self.run_cbq_query, query=select_query)
task3 = self.cluster.async_bucket_flush(server=self.master, bucket=self.test_bucket)
result = task2.result()['results'][0]['$1']
self.assertTrue(result > 0, "Indexer not indexed newly inserted docs")
self.log.info(task3.result())
tasks = task1.result()
for task in tasks:
out = task.result()
self.log.info(out)
self.sleep(15, "Giving some time to indexer to update indexes after flush")
rollback_after_flush = self.rest.get_num_rollback_stat(bucket=self.test_bucket)
self.log.info(rollback_after_flush)
self.assertEqual(rollback_after_flush, num_rollback + 2, "Flush didn't send rollback to Zero to Indexer")
result = self.run_cbq_query(query=select_query)['results'][0]['$1']
self.assertEqual(result, 0, "Doc count not matching")
# Flushing already Flushed bucket
result = self.cluster.bucket_flush(server=self.master, bucket=self.test_bucket)
self.log.info(result)
self.sleep(15, "Giving some time to indexer to update indexes after flush")
rollback_after_flush = self.rest.get_num_rollback_stat(bucket=self.test_bucket)
self.log.info(rollback_after_flush)
result = self.run_cbq_query(query=select_query)['results'][0]['$1']
self.assertEqual(result, 0, "Doc count not matching")
drop_index_query = index_gen.generate_index_drop_query(namespace=collection_namespace)
self.run_cbq_query(query=drop_index_query)
except Exception as err:
self.fail(err)
def test_index_status_with_multiple_collection_with_bucket_flush(self):
num_of_docs_per_collection = 10 ** 5
self.prepare_collection_for_indexing(num_of_docs_per_collection=num_of_docs_per_collection, num_collections=2)
index_gen_list = []
for collection_namespace in self.namespaces:
index_gen = QueryDefinition(index_name='idx', index_fields=['age', 'city', 'country'])
index_gen_list.append(index_gen)
query = index_gen.generate_index_create_query(namespace=collection_namespace, defer_build=False)
self.run_cbq_query(query=query)
self.wait_until_indexes_online()
select_query = f'select count(age) from {collection_namespace} where age >= 0'
result = self.run_cbq_query(query=select_query)['results'][0]['$1']
self.assertEqual(result, num_of_docs_per_collection, "Doc count not matching")
# Checking indexer status after bucket flush
try:
num_rollback = self.rest.get_num_rollback_stat(bucket=self.test_bucket)
self.log.info(f"num_rollback before flush:{num_rollback}")
task = self.cluster.async_bucket_flush(server=self.master, bucket=self.test_bucket)
result = task.result(timeout=200)
self.log.info(result)
self.sleep(15, "Giving some time to indexer to update indexes after flush")
rollback = self.rest.get_num_rollback_stat(bucket=self.test_bucket)
self.log.info(f"num_rollback after flush:{rollback}")
# self.assertEqual(rollback, num_rollback+1)
for collection_namespace in self.namespaces:
select_query = f'select count(age) from {collection_namespace} where age >= 0'
result = self.run_cbq_query(query=select_query)['results'][0]['$1']
self.assertEqual(result, 0, "Doc count not matching")
except Exception as err:
self.fail(err)
def test_index_status_with_flush_during_index_building(self):
num_of_docs_per_collection = 10 ** 5
self.prepare_collection_for_indexing(num_of_docs_per_collection=num_of_docs_per_collection)
collection_namespace = self.namespaces[0]
index_gen = QueryDefinition(index_name='idx', index_fields=['age', 'city', 'country'])
query = index_gen.generate_index_create_query(namespace=collection_namespace, defer_build=True)
self.run_cbq_query(query=query)
task = self.cluster.async_bucket_flush(server=self.master, bucket=self.test_bucket)
result = task.result()
self.log.info(result)
self.sleep(15, "Giving some time to indexer to update indexes after flush")
select_query = f'select count(age) from {collection_namespace} where age >= 0'
try:
result = self.run_cbq_query(query=select_query)['results'][0]['$1']
self.assertEqual(result, num_of_docs_per_collection, "Doc count not matching")
except Exception as err:
self.log.info(err)
tasks = self.data_ops_javasdk_loader_in_batches(sdk_data_loader=self.gen_create, batch_size=2 * 10 ** 4)
for task in tasks:
task.result()
# building the index and flushing bucket in parallel
build_query = index_gen.generate_build_query(namespace=collection_namespace)
with ThreadPoolExecutor() as executor:
task1 = executor.submit(self.run_cbq_query, query=build_query)
task2 = executor.submit(self.cluster.async_bucket_flush, server=self.master, bucket=self.test_bucket)
out = task2.result()
self.log.info(out)
out = task1.result()
self.log.info(out)
self.sleep(15, "Giving some time to indexer to update indexes after flush")
self.wait_until_indexes_online(defer_build=True)
self.sleep(5)
result = self.run_cbq_query(query=select_query)['results'][0]['$1']
self.assertEqual(result, 0, "Doc count not matching")
def test_index_status_with_node_disconnect_during_flush(self):
data_nodes = self.get_kv_nodes()
self.assertTrue(len(data_nodes) >= 2)
num_of_docs_per_collection = 10 ** 5
self.prepare_collection_for_indexing(num_of_docs_per_collection=num_of_docs_per_collection)
collection_namespace = self.namespaces[0]
index_gen = QueryDefinition(index_name='idx', index_fields=['age', 'city', 'country'])
query = index_gen.generate_index_create_query(namespace=collection_namespace, defer_build=False)
self.run_cbq_query(query=query)
self.wait_until_indexes_online()
select_query = f'select count(age) from {collection_namespace} where age >= 0'
result = self.run_cbq_query(query=select_query)['results'][0]['$1']
self.assertEqual(result, num_of_docs_per_collection, "Doc count not matching")
try:
with ThreadPoolExecutor() as executor:
task1 = executor.submit(self.cluster.async_bucket_flush, server=self.master, bucket=self.test_bucket)
self.sleep(1)
task2 = executor.submit(self.stop_server(data_nodes[1]))
out2 = task2.result()
self.log.info(out2)
out1 = task1.result()
self.log.info(out1)
self.sleep(5, "Wait for few secs before bringing node back on")
self.start_server(data_nodes[1])
result = self.run_cbq_query(query=select_query)['results'][0]['$1']
self.assertEqual(result, 0, "Doc count not matching")
except Exception as err:
self.log.info(err)
self.start_server(data_nodes[1])
self.sleep(10)
result = self.run_cbq_query(query=select_query)['results'][0]['$1']
self.log.info(f"Doc count in collection with flush failed due to node disconnect: {result}")
self.assertTrue(result > 0, "Doc count not matching")
|
# coding=utf-8
'''
This script generates one (or multiple) .sh file that'll handle the generation of training examples (processed .csv files ultimately).
'''
import math
import os.path
import sys
from enum import Enum
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import utilities
PARENT_DIR = ''
OUTPUT_PATH_FOR_GENERATOR_SCRIPTS = 'data-generation-scripts/'
RAW_INSTANCE_GENERATOR_COMMAND = 'python raw_problem_instance_generator.py '
RAW_DATA_DIRECTORY = 'data-raw/'
PARSER_SCRIPT_COMMAND = 'python parser_raw_to_csv.py '
PROCESSED_DATA_DIRECTORY = 'data-processed/'
# Amount of jobs in which the whole task will be divided.
# If the data generation is not serial, this should be an even number, so each generation + parsing task is
# contained in a single job.
JOB_AMOUNT = 1 # TODO make this not matter when serial data generation is used.
AMOUNT_OF_TEST_PROBLEM_INSTANCES_TO_GENERATE = 0
AMOUNT_OF_TRAINING_PROBLEM_INSTANCES_TO_GENERATE = 10
SUPPORTED_PROBLEM_SIZES = {} # Loaded at runtime.
# Whether to generate data serially (so one job will basically generate all of the .in and .out pairs for
# all types of problems FIRST, while the rest of the jobs can be executed in parallel later) or not.
SERIAL_RAW_DATA_GENERATION = True
# List of lists where:
# Element at index 0 is the task heterogeneity (possible values: 0, 1)
# Element at index 1 is the machine heterogeneity (possible values: 0, 1)
# Element at index 2 is the consistency type (possible values: 0, 1, 2)
SUPPORTED_PROBLEM_TYPES = [
[0, 0, 0]
]
MACHINE_AMOUNT = 16
MAX_TASK_AMOUNT = 1024
MIN_TASK_AMOUNT = MACHINE_AMOUNT + 1
class InstanceTypes(Enum):
Training = 1
Test = 2
def main():
populate_supported_problem_sizes()
generate_raw_data_generation_script()
generate_processed_data_generation_script()
print('Job scripts created at ' + OUTPUT_PATH_FOR_GENERATOR_SCRIPTS)
def generate_processed_data_generation_script():
commands_to_append_to_generator_script = []
utilities.generate_dir(OUTPUT_PATH_FOR_GENERATOR_SCRIPTS)
for problem_type in SUPPORTED_PROBLEM_TYPES:
task_heterogeneity = problem_type[0]
machine_heterogeneity = problem_type[1]
consistency_type = problem_type[2]
# For each problem type or dimension
for task_amount in SUPPORTED_PROBLEM_SIZES.keys():
machines = SUPPORTED_PROBLEM_SIZES[task_amount]
for instance_type in InstanceTypes:
directory_suffix = get_directory_suffix_for_instance_type(instance_type)
amount_of_instances_to_generate = get_amount_of_instances_to_generate_for_instance_type(instance_type)
append_commands_to_generate_processed_instances(amount_of_instances_to_generate,
commands_to_append_to_generator_script,
consistency_type, directory_suffix,
machine_heterogeneity, machines, task_amount,
task_heterogeneity)
# Adjust command amount per job to actually generate JOB_AMOUNT jobs
commands_per_job = int(math.ceil(len(commands_to_append_to_generator_script) / float(JOB_AMOUNT)))
for index, job in enumerate(
utilities.split_list_in_chunks(commands_to_append_to_generator_script, commands_per_job)):
tmp_file = open(OUTPUT_PATH_FOR_GENERATOR_SCRIPTS + 'job-' + str(index) + '.sh', 'w')
for command in job:
tmp_file.write(command + '\n')
tmp_file.close()
def get_directory_suffix_for_instance_type(instance_type):
if instance_type == InstanceTypes.Test:
return '/test/'
elif instance_type == InstanceTypes.Training:
return '/training/'
def get_amount_of_instances_to_generate_for_instance_type(instance_type):
if instance_type == InstanceTypes.Test:
return AMOUNT_OF_TEST_PROBLEM_INSTANCES_TO_GENERATE
elif instance_type == InstanceTypes.Training:
return AMOUNT_OF_TRAINING_PROBLEM_INSTANCES_TO_GENERATE
def populate_supported_problem_sizes():
for i in range(MIN_TASK_AMOUNT, MAX_TASK_AMOUNT + 1):
SUPPORTED_PROBLEM_SIZES[i] = MACHINE_AMOUNT
def generate_raw_data_generation_script():
commands_to_append_to_generator_script = []
# Generate jobs directory
utilities.generate_dir(OUTPUT_PATH_FOR_GENERATOR_SCRIPTS)
if SERIAL_RAW_DATA_GENERATION:
print('Working with serial raw data generation')
# Create serial job that generates training examples (doesn't handle parsing)
# TODO Make ranges configurable (maybe I just want to generate 000 and not 001 and so on).
for problem_type in SUPPORTED_PROBLEM_TYPES:
task_heterogeneity = problem_type[0]
machine_heterogeneity = problem_type[1]
consistency_type = problem_type[2]
# For each problem type or dimension
for task_amount in SUPPORTED_PROBLEM_SIZES.keys():
machines = SUPPORTED_PROBLEM_SIZES[task_amount]
for instance_type in InstanceTypes:
dir_suffix = ''
amount_of_instances_to_generate = 0
if instance_type == InstanceTypes.Test:
dir_suffix = '/test/'
amount_of_instances_to_generate = AMOUNT_OF_TEST_PROBLEM_INSTANCES_TO_GENERATE
elif instance_type == InstanceTypes.Training:
dir_suffix = '/training/'
amount_of_instances_to_generate = AMOUNT_OF_TRAINING_PROBLEM_INSTANCES_TO_GENERATE
append_commands_to_generate_raw_instances(amount_of_instances_to_generate,
commands_to_append_to_generator_script, consistency_type,
dir_suffix, machine_heterogeneity, task_amount, machines,
task_heterogeneity)
output_data_generation_script(commands_to_append_to_generator_script)
def append_commands_to_generate_raw_instances(amount_of_instances_to_generate, commands_to_append_to_generator_script,
consistency_type, directory_suffix, machine_heterogeneity, task_amount,
machines, task_heterogeneity):
if amount_of_instances_to_generate > 0:
# sub_dir is the identifier for each problem instance
sub_dir = str(task_amount) + 'x' + str(machines) + '-' + \
str(task_heterogeneity) + str(machine_heterogeneity) + \
str(consistency_type) + directory_suffix
directory = RAW_DATA_DIRECTORY + sub_dir
commands_to_append_to_generator_script.append(
RAW_INSTANCE_GENERATOR_COMMAND + str(task_amount) + ' ' + str(machines) + ' ' \
+ str(task_heterogeneity) + ' ' + str(machine_heterogeneity) + ' ' \
+ str(consistency_type) + ' ' + str(amount_of_instances_to_generate) \
+ ' ' + directory)
# Output directory is generated for later use
utilities.generate_dir(directory)
def output_data_generation_script(commands_to_append_to_generator_script):
tmp_file = open(OUTPUT_PATH_FOR_GENERATOR_SCRIPTS + 'job-generate-raw-data' + '.sh', 'w')
for command in commands_to_append_to_generator_script:
tmp_file.write(command + '\n')
tmp_file.close()
print('Raw data generation script generated in ' + OUTPUT_PATH_FOR_GENERATOR_SCRIPTS)
def append_commands_to_generate_processed_instances(amount_of_instances_to_generate,
commands_to_append_to_generator_script, consistency_type,
directory_suffix, machine_heterogeneity, machines, task_amount,
task_heterogeneity):
if amount_of_instances_to_generate > 0:
# sub_dir is the identifier for each problem instance
sub_dir = str(task_amount) + 'x' + str(machines) + '-' + \
str(task_heterogeneity) + str(machine_heterogeneity) + \
str(consistency_type) + directory_suffix
# TODO split commands in N arrays (maybe using chunks utility) if not using serial generation.
directory = PROCESSED_DATA_DIRECTORY + sub_dir
commands_to_append_to_generator_script.append(
PARSER_SCRIPT_COMMAND + str(amount_of_instances_to_generate) + ' ' + PARENT_DIR \
+ RAW_DATA_DIRECTORY + sub_dir + ' ' + PARENT_DIR + PROCESSED_DATA_DIRECTORY + sub_dir)
# Output directory is generated for later use
utilities.generate_dir(directory)
if __name__ == '__main__':
main()
|
import pytest
import schemathesis
@pytest.fixture(autouse=True)
def unregister_global():
yield
schemathesis.auth.unregister()
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import unittest
import fedlearner
import test_nn_trainer
import numpy as np
import unittest
import threading
import random
import os
import time
import logging
from multiprocessing import Process
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import gfile
from queue import PriorityQueue
import enum
from tensorflow.core.example.feature_pb2 import FloatList, Features, Feature, \
Int64List, BytesList
from tensorflow.core.example.example_pb2 import Example
import numpy as np
from fedlearner.data_join import (
data_block_manager, common,
data_block_visitor, raw_data_manifest_manager
)
from fedlearner.common import (
db_client, common_pb2 as common_pb,
data_join_service_pb2 as dj_pb,
trainer_master_service_pb2 as tm_pb
)
from fedlearner.data_join.data_block_manager import DataBlockBuilder
from fedlearner.data_join.raw_data_iter_impl.tf_record_iter import TfExampleItem
from fedlearner.trainer_master.leader_tm import LeaderTrainerMaster
from fedlearner.trainer_master.follower_tm import FollowerTrainerMaster
class TestDataSource(object):
def __init__(self, base_path, name, role, partition_num=1,
start_time=0, end_time=100000):
if role == 'leader':
role = 0
elif role == 'follower':
role = 1
else:
raise ValueError("Unknown role %s"%role)
data_source = common_pb.DataSource()
data_source.data_source_meta.name = name
data_source.data_source_meta.partition_num = partition_num
data_source.data_source_meta.start_time = start_time
data_source.data_source_meta.end_time = end_time
data_source.output_base_dir = "{}/{}_{}/data_source/".format(
base_path, data_source.data_source_meta.name, role)
data_source.role = role
if gfile.Exists(data_source.output_base_dir):
gfile.DeleteRecursively(data_source.output_base_dir)
self._data_source = data_source
self._kv_store = db_client.DBClient("etcd", True)
common.commit_data_source(self._kv_store, self._data_source)
self._dbms = []
for i in range(partition_num):
manifest_manager = raw_data_manifest_manager.RawDataManifestManager(
self._kv_store, self._data_source)
manifest_manager._finish_partition('join_example_rep',
dj_pb.JoinExampleState.UnJoined, dj_pb.JoinExampleState.Joined,
-1, i)
self._dbms.append(
data_block_manager.DataBlockManager(self._data_source, i))
def add_data_block(self, partition_id, x, y):
dbm = self._dbms[partition_id]
builder = DataBlockBuilder(
common.data_source_data_block_dir(self._data_source),
self._data_source.data_source_meta.name, partition_id,
dbm.get_dumped_data_block_count(),
dj_pb.WriterOptions(output_writer="TF_RECORD"), None)
builder.set_data_block_manager(dbm)
for i in range(x.shape[0]):
feat = {}
exam_id = '{}'.format(i).encode()
feat['example_id'] = Feature(
bytes_list=BytesList(value=[exam_id]))
feat['event_time'] = Feature(
int64_list = Int64List(value=[i])
)
feat['x'] = Feature(float_list=FloatList(value=list(x[i])))
if y is not None:
feat['y'] = Feature(int64_list=Int64List(value=[y[i]]))
example = Example(features=Features(feature=feat))
builder.append_item(TfExampleItem(example.SerializeToString()), i, 0)
return builder.finish_data_block()
class TestOnlineTraining(unittest.TestCase):
def test_online_training(self):
leader_ds = TestDataSource('./output', 'test_ds', 'leader')
leader_ds.add_data_block(0, np.zeros((100, 10)), np.zeros((100,), dtype=np.int32))
leader_tm = fedlearner.trainer_master.leader_tm.LeaderTrainerMaster(
'leader_test', 'test_ds', None, None, True, False, 1)
leader_thread = threading.Thread(target=leader_tm.run, args=(50051,))
leader_thread.daemon = True
leader_thread.start()
follower_ds = TestDataSource('./output', 'test_ds', 'follower')
follower_ds.add_data_block(0, np.zeros((100, 10)), np.zeros((100,), dtype=np.int32))
follower_tm = fedlearner.trainer_master.follower_tm.FollowerTrainerMaster(
'follower_test', 'test_ds', True)
follower_thread = threading.Thread(target=follower_tm.run, args=(50052,))
follower_thread.daemon = True
follower_thread.start()
leader_tmc = fedlearner.trainer.trainer_master_client.TrainerMasterClient(
'localhost:50051', 'leader', 0)
leader_tmc.restore_data_block_checkpoint('leader_test', [])
block1 = leader_tmc.request_data_block().block_id
self.assertEqual(block1, 'test_ds.partition_0000.00000000.0-99')
leader_ds.add_data_block(0, np.zeros((100, 10)), np.zeros((100,), dtype=np.int32))
block2 = leader_tmc.request_data_block().block_id
self.assertEqual(block2, 'test_ds.partition_0000.00000001.0-99')
follower_tmc = fedlearner.trainer.trainer_master_client.TrainerMasterClient(
'localhost:50052', 'follower', 0)
follower_tmc.restore_data_block_checkpoint('follower_test', [])
self.assertEqual(block1, follower_tmc.request_data_block(block1).block_id)
follower_ds.add_data_block(0, np.zeros((100, 10)), np.zeros((100,), dtype=np.int32))
self.assertEqual(block2, follower_tmc.request_data_block(block2).block_id)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
from __future__ import absolute_import
from google.appengine.api import urlfetch
class Downloader(object):
def html(self, url, timeout=5):
result = urlfetch.fetch(url=url, deadline=timeout)
return result.content
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.job.regularizer_conf_pb2 as regularizer_conf_util
from oneflow.python.oneflow_export import oneflow_export
@oneflow_export("regularizers.l1_l2")
def l1_l2_regularizer(
l1: float = 0.01, l2: float = 0.01
) -> regularizer_conf_util.RegularizerConf:
"""This operator creates a L1 and L2 weight regularizer.
Args:
l1 (float, optional): The L1 regularization coefficient. Defaults to 0.01.
l2 (float, optional): The L2 regularization coefficient. Defaults to 0.01.
Returns:
regularizer_conf_util.RegularizerConf: A regularizer that can be used in other layers or operators.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_l1_l2_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.truncated_normal(0.1)
regularizer = flow.regularizers.l1_l2(l1=0.001, l2=0.001)
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
kernel_regularizer=regularizer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_l1_l2_Job(x)
"""
regularizer = regularizer_conf_util.RegularizerConf()
setattr(regularizer.l1_l2_conf, "l1", l1)
setattr(regularizer.l1_l2_conf, "l2", l2)
return regularizer
@oneflow_export("regularizers.l1")
def l1_regularizer(l: float = 0.01) -> regularizer_conf_util.RegularizerConf:
"""This operator creates a L1 weight regularizer.
Args:
l (float, optional): The L1 regularization coefficient. Defaults to 0.01.
Returns:
regularizer_conf_util.RegularizerConf: A regularizer that can be used in other layers or operators.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_l1_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.truncated_normal(0.1)
regularizer = flow.regularizers.l1(l=0.001)
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
kernel_regularizer=regularizer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_l1_Job(x)
"""
return l1_l2_regularizer(l1=l, l2=0.0)
@oneflow_export("regularizers.l2")
def l2_regularizer(l: float = 0.01) -> regularizer_conf_util.RegularizerConf:
"""This operator creates a L2 weight regularizer.
Args:
l (float, optional): The L2 regularization coefficient. Defaults to 0.01.
Returns:
regularizer_conf_util.RegularizerConf: A regularizer that can be used in other layers or operators.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_l2_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.truncated_normal(0.1)
regularizer = flow.regularizers.l2(l=0.001)
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
kernel_regularizer=regularizer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_l2_Job(x)
"""
return l1_l2_regularizer(l1=0.0, l2=l)
|
#!/usr/bin/python3
# -*- coding : UTF-8 -*-
"""
Name : Pyrix/Exceptions\n
Author : Abhi-1U <https://github.com/Abhi-1U>\n
Description : Exceptions are implemented here \n
Encoding : UTF-8\n
Version :0.7.19\n
Build :0.7.19/21-12-2020
"""
from pyrix.exception.pyrixexceptions import (
binaryMatrixException,
bitWiseOnMatrix,
divisionErrorException,
incompaitableTypeException,
nonInvertibleException,
) |
C = float(input('Informe a teperatura em °C: '))
F = ((9 * C) / 5) + 32
print('A temperatura de {}°C corresponde a {}°F.'.format(C, F))
|
"""Functions to visualize human poses"""
#import matplotlib.pyplot as plt
import data_utils
import numpy as np
import h5py
import os
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def show3Dpose(channels, ax, lcolor="#3498db", rcolor="#e74c3c", add_labels=False): # blue, orange
"""
Visualize a 3d skeleton
Args
channels: 96x1 vector. The pose to plot.
ax: matplotlib 3d axis to draw on
lcolor: color for left part of the body
rcolor: color for right part of the body
add_labels: whether to add coordinate labels
Returns
Nothing. Draws on ax.
"""
assert channels.size == len(data_utils.H36M_NAMES)*3, "channels should have 96 entries, it has %d instead" % channels.size
vals = np.reshape( channels, (len(data_utils.H36M_NAMES), -1) )
I = np.array([1,2,3,1,7,8,1, 13,14,15,14,18,19,14,26,27])-1 # start points
J = np.array([2,3,4,7,8,9,13,14,15,16,18,19,20,26,27,28])-1 # end points
LR = np.array([1,1,1,0,0,0,0, 0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=bool)
# Make connection matrix
for i in np.arange( len(I) ):
x, y, z = [np.array( [vals[I[i], j], vals[J[i], j]] ) for j in range(3)]
ax.plot(x, y, z, lw=2, c=lcolor if LR[i] else rcolor)
#ax.text(x,y,z,i)
RADIUS = 750 # space around the subject
xroot, yroot, zroot = vals[0,0], vals[0,1], vals[0,2]
ax.set_xlim3d([-RADIUS+xroot, RADIUS+xroot])
ax.set_zlim3d([-RADIUS+zroot, RADIUS+zroot])
ax.set_ylim3d([-RADIUS+yroot, RADIUS+yroot])
if add_labels:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
# Get rid of the ticks and tick labels
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
ax.get_xaxis().set_ticklabels([])
ax.get_yaxis().set_ticklabels([])
ax.set_zticklabels([])
ax.set_aspect('equal')
# Get rid of the panes (actually, make them white)
white = (1.0, 1.0, 1.0, 0.0)
ax.w_xaxis.set_pane_color(white)
ax.w_yaxis.set_pane_color(white)
# Keep z pane
# Get rid of the lines in 3d
ax.w_xaxis.line.set_color(white)
ax.w_yaxis.line.set_color(white)
ax.w_zaxis.line.set_color(white)
def show2Dpose(channels, ax, lcolor="#3498db", rcolor="#e74c3c", add_labels=False):
"""
Visualize a 2d skeleton
Args
channels: 64x1 vector. The pose to plot.
ax: matplotlib axis to draw on
lcolor: color for left part of the body
rcolor: color for right part of the body
add_labels: whether to add coordinate labels
Returns
Nothing. Draws on ax.
"""
assert channels.size == len(data_utils.H36M_NAMES)*2, "channels should have 64 entries, it has %d instead" % channels.size
vals = np.reshape( channels, (len(data_utils.H36M_NAMES), -1) )
I = np.array([1,2,3,1,7,8,1, 13,14,14,18,19,14,26,27])-1 # start points
J = np.array([2,3,4,7,8,9,13,14,16,18,19,20,26,27,28])-1 # end points
LR = np.array([1,1,1,0,0,0,0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=bool)
# Make connection matrix
for i in np.arange( len(I) ):
x, y = [np.array( [vals[I[i], j], vals[J[i], j]] ) for j in range(2)]
ax.plot(x, y, lw=2, c=lcolor if LR[i] else rcolor)
# Get rid of the ticks
ax.set_xticks([])
ax.set_yticks([])
# Get rid of tick labels
ax.get_xaxis().set_ticklabels([])
ax.get_yaxis().set_ticklabels([])
RADIUS = 350 # space around the subject
xroot, yroot = vals[0,0], vals[0,1]
ax.set_xlim([-RADIUS+xroot, RADIUS+xroot])
ax.set_ylim([-RADIUS+yroot, RADIUS+yroot])
if add_labels:
ax.set_xlabel("x")
ax.set_ylabel("z")
ax.set_aspect('equal')
def connect_points2():
"""Sid.
This functions only visualises to connect points
for 16 joints only
"""
connector = np.array((
[6, 1, -1, -1], # 0
[2, -1, -1, -1], # 1
[-1, -1, -1, -1], # 2
[6, 4, -1, -1], # 3
[3, 5, -1, -1], # 4
[-1, -1, -1, -1], # 5
[0, 3, 7, -1], # 6
[8, 10, 13, 6], # 7
[9, 7, -1, -1], # 8
[-1, -1, -1, -1], # 9
[7, 11, -1, -1], # 10
[12, -1, -1, -1], # 11
[-1, -1, -1, -1], # 12
[7, 14, -1, -1], # 13
[13, 15, -1, -1], # 14
[-1, -1, -1, -1] # 15
))
return connector
def connect_points32():
"""Sid.
This functions only visualises to connect points
for 16 joints only
"""
connector = np.array((
[6, 1, 12, -1], # 0
[2, 0, -1, -1], # 1
[1, 3, -1, -1], # 2
[2, -1, -1, -1], # 3
[-1, -1, -1, -1], # 4
[-1, -1, -1, -1], # 5
[0, 7, -1, -1], # 6
[6, 8, -1, -1], # 7
[7, -1, -1, -1], # 8
[-1, -1, -1, -1], # 9
[-1, -1, -1, -1], # 10
[-1, -1, -1, -1], # 11
[0, 13, -1, -1], # 12
[12, 17, 25, 14], # 13
[13, 15, -1, -1], # 14
[-1, -1, -1, -1], # 15
[-1, -1, -1, -1], # 16
[18, 13, -1, -1], # 17
[19, -1, -1, -1], # 18
[-1, -1, -1, -1], # 19
[-1, -1, -1, -1], # 20
[-1, -1, -1, -1], # 21
[-1, -1, -1, -1], # 22
[-1, -1, -1, -1], # 23
[-1, -1, -1, -1], # 24
[13, 26, -1, -1], # 25
[27, 25, -1, -1], # 26
[-1, -1, -1, -1], # 27
[-1, -1, -1, -1], # 28
[-1, -1, -1, -1], # 29
[-1, -1, -1, -1], # 30
[-1, -1, -1, -1], # 31
))
return connector
def plotNoisySkeleton(skeleton1, skeleton2, changed):
fig = plt.figure()
connect = True
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-750, 750)
ax.set_ylim3d(-750, 750)
ax.set_zlim3d(0, 2000)
col = ['g', 'r']
count = 0
for skeleton in [skeleton1, skeleton2]:
data = np.reshape(skeleton, (-1, 3))
x = data[:, 0]
y = data[:, 1]
z = data[:, 2]
#ax.scatter(x, y, z, c='b')
for i in range(data.shape[0]):
#ax.text(x[i], y[i], z[i], i)
if connect == True:
# find the child of the current point
c = connect_points32()
for child in c[i, :]:
if child == -1:
continue
# otherwise fetch that point from data
#print(i, child)
x_c, y_c, z_c = data[child, :]
ax.plot([x[i], x_c], [y[i], y_c], [z[i], z_c], c = col[count])
count = 1
plt.xlabel('X')
plt.ylabel('Y')
changed = ','.join(str(x) for x in changed)
plt.title('Joints changed :'+ changed)
plt.show()
def plotFlip(models, connect=False, c=connect_points2()):
"""Just plots 3dpoints and what connector to use"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-1000, 1000)
ax.set_ylim3d(-1000, 1000)
ax.set_zlim3d(0, 2000)
for model in models:
data = np.reshape(model, (-1, 3))
x = data[:, 0]
y = data[:, 1]
z = data[:, 2]
#ax.scatter(x, y, z, c='r')
for i in range(data.shape[0]):
#ax.text(x[i], y[i], z[i], i)
if connect == True:
# find the child of the current point
#c = connect_points2()
for child in c[i, :]:
if child == -1:
continue
# otherwise fetch that point from data
#print(i, child)
x_c, y_c, z_c = data[child, :]
ax.plot([x[i], x_c], [y[i], y_c], [z[i], z_c])
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
def plot(arrayOf3Djoints, connect=False, c=connect_points2()):
"""Just plots 3dpoints and what connector to use"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-1000, 1000)
ax.set_ylim3d(-1000, 1000)
ax.set_zlim3d(0, 2000)
data = np.reshape(arrayOf3Djoints, (-1, 3))
x = data[:, 0]
y = data[:, 1]
z = data[:, 2]
#ax.scatter(x, y, z, c='r')
for i in range(data.shape[0]):
#ax.text(x[i], y[i], z[i], i)
if connect == True:
# find the child of the current point
#c = connect_points2()
for child in c[i, :]:
if child == -1:
continue
# otherwise fetch that point from data
#print(i, child)
x_c, y_c, z_c = data[child, :]
ax.plot([x[i], x_c], [y[i], y_c], [z[i], z_c])
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
def plotTrail(trail, trailRot):
""""show trailing plots of points in different frames after rotation"""
trail = np.reshape(trail[:50], (-1, 2))
trailRot = np.reshape(trailRot[:50], (-1, 2))
fig, ax = plt.subplots()
ax.scatter(trail[:, 0], trail[:, 1], c='r')
ax.plot(trail[:, 0], trail[:, 1])
# for i in range(trail.shape[0]):
# ax.text(trail[:,0][i], trail[:,1][i],i)
ax.scatter(trailRot[:, 0], trailRot[:, 1], c='b')
ax.plot(trailRot[:, 0], trailRot[:, 1])
# for i in range(trailRot.shape[0]):
# ax.text(trailRot[:,0][i], trailRot[:,1][i],i)
plt.show()
def plotTrail2(trail, trailRot, str, str2):
fig, ax = plt.subplots()
aofp = np.array(trail, dtype=float)
aofp = np.reshape(aofp, (-1, 3)).T
aofp2 = np.array(trailRot, dtype=float)
aofp2 = np.reshape(aofp2, (-1, 3)).T
# should be every 32nd node
# find the root node, evry 32,33,34th value
rootNodes = aofp[:, 0::32]
ax.plot(rootNodes[0], rootNodes[1], c='r')
rootNodes2 = aofp2[:, 0::32]
ax.plot(rootNodes2[0], rootNodes2[1], c='b')
plt.gca().legend((str,str2))
plt.show()
|
import os.path
from django.test import TestCase
from dojo.tools.trufflehog.parser import TruffleHogParser
from dojo.models import Test
def sample_path(file_name):
return os.path.join("dojo/unittests/scans/trufflehog", file_name)
class TestTruffleHogParser(TestCase):
def test_many_vulns(self):
test_file = open(sample_path("many_vulns.json"))
parser = TruffleHogParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 18)
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(798, finding.cwe)
self.assertEqual('test_all.py', finding.file_path)
|
from .imagenet import imagenet_iterator
from .cifar10 import cifar10_iterator
from .cifar100 import cifar100_iterator
|
import h5py
import numpy as np
import os
import gzip
import sys
sys.path.append("../../..")
from topview.common import get_big_data_path
def openh5(cloud_path, label_path):
cloud_file = h5py.File(cloud_path, 'r')
cloud_ids, clouds = cloud_file.get('id'), cloud_file.get('data')
cloud_ids, clouds = np.asarray(cloud_ids), np.asarray(clouds)
label_file = h5py.File(label_path, 'r')
label_ids, is_valid, real_world_coordinates, segmentation = \
label_file.get('id'), label_file.get('is_valid'), label_file.get('real_world_coordinates'), label_file.get('segmentation')
label_ids, is_valid, real_world_coordinates, segmentation = \
np.asarray(label_ids), np.asarray(is_valid), np.asarray(real_world_coordinates), np.asarray(segmentation),
def byte_arr_to_str_arr(byte_arr):
return [el.decode("utf-8") for el in byte_arr]
cloud_ids, label_ids = byte_arr_to_str_arr(cloud_ids), byte_arr_to_str_arr(label_ids)
# check if all elements are equal
assert(cloud_ids == label_ids)
arrays = [label_ids, is_valid, real_world_coordinates, segmentation, clouds]
frames = [
{
"id": x[0],
"is_valid": x[1],
"real_world_coordinates": x[2],
"segmentation": x[3],
"cloud": x[4]
} for x in zip(*arrays)
]
frames = list(filter(lambda frame: frame["is_valid"] == 1, frames))
def transform_id_to_correct_format(frame):
frame["id"] = frame["id"][:3] + "00" + frame["id"][3:]
return frame
frames = list(map(transform_id_to_correct_format, frames))
def filter_point_clouds(frame):
'''Only retained those point clouds, that are ok after segmentation'''
body_coords = np.where(frame["segmentation"] != -1)
frame["cloud"] = frame["cloud"].reshape(240, 320, 3)[body_coords].reshape(-1, 3)
return frame
frames = list(map(filter_point_clouds, frames))
return frames
# train: 16 people; test: 4 people
# mode: train / test
def itop_file_path(data_dir, viewpoint, mode, type):
return f'{data_dir}/raw/ITOP_{viewpoint}_{mode}_{type}.h5'
def read_itop(viewpoint, mode):
DATA_DIR = f"{get_big_data_path()}/ITOP"
cloud_path = itop_file_path(DATA_DIR, viewpoint, mode, "point_cloud")
label_path = itop_file_path(DATA_DIR, viewpoint, mode, "labels")
frames = openh5(cloud_path, label_path)
print("read_itop", viewpoint)
def get_frame_person_id(frame):
return frame["id"][:2]
print("person ids", set(map(get_frame_person_id, frames)))
# at the beginning those were indices of people from train / test
# {'06', '09', '10', '07', '19', '18', '12', '08', '11', '15', '16', '17', '04', '05', '13', '14'}
# {'00', '03', '01', '02'}
# following changes chaged it to
# {'00' - '15'}
# {'16' - '19'}
def transform_test_id(frame):
frame_id = frame["id"]
frame_id = str(int(frame_id[:2]) + 16) + frame_id[2:]
frame["id"] = frame_id
return frame
def transform_train_id(frame):
frame_id = frame["id"]
frame_id = f"{(int(frame_id[:2]) - 4):02d}{frame_id[2:]}"
frame["id"] = frame_id
return frame
if mode == "test":
frames = list(map(transform_test_id, frames))
elif mode == "train":
frames = list(map(transform_train_id, frames))
return frames
#train_frames = read_itop("top", "train")
#test_frames = read_itop("top", "test")
train_frames = read_itop("side", "train")
test_frames = read_itop("side", "test")
for frame in train_frames + test_frames:
cloud = frame["cloud"]
joints = frame["real_world_coordinates"]
frame_id = frame["id"]
DATA_DIR = f"{get_big_data_path()}/ITOP"
#cloud_path = f"{DATA_DIR}/clouds_depth/{frame_id}.npy.gz"
#joints_path = f"{DATA_DIR}/joints_side/{frame_id}.npy.gz"
cloud_path = f"{DATA_DIR}/clouds_depth_side/{frame_id}.npy.gz"
joints_path = f"{DATA_DIR}/joints_side/{frame_id}.npy.gz"
print("cloud_path, joints_path", cloud_path, joints_path)
f = gzip.GzipFile(cloud_path, "w")
np.save(file=f, arr=cloud)
f.close()
f = gzip.GzipFile(joints_path, "w")
np.save(file=f, arr=joints)
f.close()
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs the 'ar' command after removing its output file first.
This script is invoked like:
python gcc_ar_wrapper.py --ar=$AR --output=$OUT $OP $INPUTS
to do the equivalent of:
rm -f $OUT && $AR $OP $OUT $INPUTS
"""
import argparse
import os
import subprocess
import sys
# When running on a Windows host and using a toolchain whose tools are
# actually wrapper scripts (i.e. .bat files on Windows) rather than binary
# executables, the "command" to run has to be prefixed with this magic.
# The GN toolchain definitions take care of that for when GN/Ninja is
# running the tool directly. When that command is passed in to this
# script, it appears as a unitary string but needs to be split up so that
# just 'cmd' is the actual command given to Python's subprocess module.
BAT_PREFIX = 'cmd /c call '
def CommandToRun(command):
if command[0].startswith(BAT_PREFIX):
command = command[0].split(None, 3) + command[1:]
return command
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--ar',
required=True,
help='The ar binary to run',
metavar='PATH')
parser.add_argument('--output',
required=True,
help='Output archive file',
metavar='ARCHIVE')
parser.add_argument('--plugin',
help='Load plugin')
parser.add_argument('operation',
help='Operation on the archive')
parser.add_argument('inputs', nargs='+',
help='Input files')
args = parser.parse_args()
command = [args.ar, args.operation]
if args.plugin is not None:
command += ['--plugin', args.plugin]
command.append(args.output)
command += args.inputs
# Remove the output file first.
try:
os.remove(args.output)
except OSError as e:
if e.errno != os.errno.ENOENT:
raise
# Now just run the ar command.
return subprocess.call(CommandToRun(command))
if __name__ == "__main__":
sys.exit(main())
|
from gazette.spiders.base import FecamGazetteSpider
class ScMafraSpider(FecamGazetteSpider):
name = "sc_mafra"
FECAM_QUERY = "cod_entidade:157"
TERRITORY_ID = "4210100"
|
from sys import platform
REFERENCE_PACKAGE_POST = """
%%prep
%%build
%%install
%%postun
rm -rf "$RPM_INSTALL_PREFIX/%(pkgdir)s"
%%description
No description
%%files
%%post
PKG="%(pkgdir)s"
REF="%(refroot)s"
DIR="%(cmsroot)s"
if [ ! -e "$REF/$PKG" ] ; then exit 0 ; fi
mkdir -p "$DIR/$PKG"
SCRAM_PROJECT=false
if [ -d "$REF/$PKG/.SCRAM" ] ; then SCRAM_PROJECT=true ; fi
for d in $(find "$REF/$PKG" -maxdepth 1 -mindepth 1 | sed 's|.*/||') ; do
if [ "$d" = "etc" ] && [ -e "$REF/$PKG/etc/profile.d" ] ; then
mkdir "$DIR/$PKG/$d"
for sd in $(find "$REF/$PKG/$d" -maxdepth 1 -mindepth 1 | sed 's|.*/||') ; do
if [ "$sd" = "profile.d" ] && [ -e "$REF/$PKG/$d/$sd/init.sh" ] ; then
rsync -a "$REF/$PKG/$d/$sd/" "$DIR/$PKG/$d/$sd/"
elif [ "$sd" = "scram.d" ] ; then
rsync -a "$REF/$PKG/$d/$sd/" "$DIR/$PKG/$d/$sd/"
else
ln -s "$REF/$PKG/$d/$sd" "$DIR/$PKG/$d/$sd"
fi
done
elif [ "$d" = "tools" ] && [ -d "$REF/$PKG/$d/selected" ] ; then
rsync -a "$REF/$PKG/$d/" "$DIR/$PKG/$d/"
elif [ "$SCRAM_PROJECT" = "true" ] && [ "$d" = ".SCRAM" ] ; then
rsync -a "$REF/$PKG/$d/" "$DIR/$PKG/$d/"
elif [ "$SCRAM_PROJECT" = "true" ] && [ "$d" = "config" ] ; then
rsync -a "$REF/$PKG/$d/" "$DIR/$PKG/$d/"
else
ln -s $REF/$PKG/$d $DIR/$PKG/$d
fi
done
find $DIR/$PKG -type f | xargs --no-run-if-empty chmod +w
if [ "$SCRAM_PROJECT" = "true" ] ; then
chmod -R +w "$DIR/$PKG/.SCRAM" "$DIR/$PKG/config"
$DIR/$PKG/config/SCRAM/projectAreaRename.pl "$REF" "$DIR" %(cmsplatf)s "$DIR/$PKG"
fi
find $DIR/$PKG -type f | xargs --no-run-if-empty grep -Il '%(refroot)s' | xargs -n1 --no-run-if-empty perl -p -i -e "s|\\Q%(refroot)s\\E|$DIR|g"
"""
# Macros for creating git repo out of sources and applying patches
PATCH_SOURCE_MACROS = """
%define package_init_source if [ ! -d .git ] ; then git init && git add . && git commit -a -m 'init repo' && _PKGTOOLS_PKG_BASE_DIR=`/bin/pwd` && PKGTOOLS_PATCH_NUM=0 ; fi
%define package_commit_patch let PKGTOOLS_PATCH_NUM=$PKGTOOLS_PATCH_NUM+1 && git add . && [ $(git diff --name-only HEAD | wc -l) -gt 0 ] && git commit -a -m "applying patch ${PKGTOOLS_PATCH_NUM}"
%define package_final_source if [ "X${_PKGTOOLS_PKG_BASE_DIR}" != "X" ] ; then mv ${_PKGTOOLS_PKG_BASE_DIR}/.git %{_builddir}/pkgtools-pkg-src-move2git ; fi
"""
# FIXME: write a more valuable description
DEFAULT_SECTIONS = {"": """
""",
"%%description": """
No description
""",
"%prep": """
%%setup -n %n-%realversion
""",
"%build": """
%initenv
./configure --prefix=%i
make
""",
"%install": """
%initenv
make install
""",
"%pre": """
if [ X"$(id -u)" = X0 ]; then
if [ ! -f /etc/cms-root-install-allowed ]; then
echo "*** CMS SOFTWARE INSTALLATION ABORTED ***"
echo "CMS software cannot be installed as the super-user."
echo "(We recommend reading a unix security guide)."
exit 1
fi
fi
""",
"%post": """
if [ "X$CMS_INSTALL_PREFIX" = "X" ] ; then CMS_INSTALL_PREFIX=$RPM_INSTALL_PREFIX; export CMS_INSTALL_PREFIX; fi
%{relocateConfig}etc/profile.d/init.sh
%{relocateConfig}etc/profile.d/init.csh
""",
"%preun": """
""",
"%postun": """
if [ "X$CMS_INSTALL_PREFIX" = "X" ] ; then CMS_INSTALL_PREFIX=$RPM_INSTALL_PREFIX; export CMS_INSTALL_PREFIX; fi
""",
"%files": """
%{i}/
%dir %{instroot}/
%dir %{instroot}/%{cmsplatf}/
%dir %{instroot}/%{cmsplatf}/%{pkgcategory}/
%dir %{instroot}/%{cmsplatf}/%{pkgcategory}/%{pkgname}/
"""}
COMPILER_DETECTION = { "gcc": "gcc -v 2>&1 | grep version | sed -e \'s|.*\\([0-9][.][0-9][.][0-9]\\).*|\\1|\'",
"icc": "echo no detection callback for icc."}
# Preambles. %dynamic_path_var is defined in rpm-preamble.
INITENV_PREAMBLE = [
("CMD_SH", "if", "[ -f %i/etc/profile.d/dependencies-setup.sh ]; then . %i/etc/profile.d/dependencies-setup.sh; fi"),
("CMD_CSH", "if", "( -f %i/etc/profile.d/dependencies-setup.csh ) source %i/etc/profile.d/dependencies-setup.csh; endif"),
("SETV", "%(uppername)s_ROOT", "%i"),
("SETV", "%(uppername)s_VERSION", "%v"),
("SETV", "%(uppername)s_REVISION", "%pkgrevision"),
("SETV", "%(uppername)s_CATEGORY", "%pkgcategory"),
("+PATH", "PATH", "%i/bin"),
("+PATH", "%%{dynamic_path_var}", "%i/lib")]
RUNPATH_ENV = {
"START_SH": '',
"START_CSH": 'if (! $?_CMSBUILD_BUILD_ENV_) setenv _CMSBUILD_BUILD_ENV_ ""\n',
"END_SH": '',
"END_CSH": '',
"PRE_SH": 'if [ "${_CMSBUILD_BUILD_ENV_}" != "" ] ; then\n',
"POST_SH": 'fi\n',
"PRE_CSH": 'if ( ${%_CMSBUILD_BUILD_ENV_} != 0 ) then\n',
"POST_CSH": 'endif\n'
}
DEFAULT_PREAMBLE = """
"""
if platform == 'darwin' : DEFAULT_PREAMBLE = """
AutoReqProv: no
"""
DEFAULT_DESCRIPTION_PREAMBLE = """
"""
DEFAULT_PREP_PREAMBLE = """
%initenv
[ -d %i ] && chmod -R u+w %i
rm -fr %i
"""
DEFAULT_BUILD_PREAMBLE = """
%initenv
"""
DEFAULT_INSTALL_PREABLE = """
mkdir -p %i
mkdir -p %_rpmdir
mkdir -p %_srcrpmdir
%initenv
"""
DEFAULT_PRE_PREAMBLE = """
if [ X"$(id -u)" = X0 ]; then
echo "*** CMS SOFTWARE INSTALLATION ABORTED ***"
echo "CMS software cannot be installed as the super-user."
echo "(We recommend reading a unix security guide)."
exit 1
fi
"""
DEFAULT_POST_PREAMBLE = """
if [ "X$CMS_INSTALL_PREFIX" = "X" ] ; then CMS_INSTALL_PREFIX=$RPM_INSTALL_PREFIX; export CMS_INSTALL_PREFIX; fi
%{relocateConfig}etc/profile.d/init.sh
%{relocateConfig}etc/profile.d/init.csh
"""
DEFAULT_PREUN_PREAMBLE = """
"""
DEFAULT_POSTUN_PREAMBLE = """
if [ "X$CMS_INSTALL_PREFIX" = "X" ] ; then CMS_INSTALL_PREFIX=$RPM_INSTALL_PREFIX; export CMS_INSTALL_PREFIX; fi
"""
DEFAULT_FILES_PREAMBLE = """
%%defattr(-, root, root)
"""
DEFAULT_RPATH_PREAMBLE_INSTALL = "\n%{?runpath_install:%runpath_install}\n"
DEFAULT_RPATH_PREAMBLE_POST = "\n%{?runpath_post:%runpath_post}\n"
COMMANDS_SH = {"SETV": """%(var)s="%(value)s"\n""",
"SET": """export %(var)s="%(value)s";\n""",
"+PATH": """[ ! -d %(value)s ] || export %(var)s="%(value)s${%(var)s:+:$%(var)s}";\n""",
"UNSET": """unset %(var)s || true\n""",
"CMD": """%(var)s %(value)s\n""",
"CMD_SH": """%(var)s %(value)s\n""",
"CMD_CSH": "",
"ALIAS": """alias %(var)s="%(value)s"\n""",
"ALIAS_CSH": "",
"ALIAS_SH": """alias %(var)s="%(value)s"\n"""}
COMMANDS_CSH = {"SETV": """set %(var)s="%(value)s"\n""",
"SET": """setenv %(var)s "%(value)s"\n""",
"+PATH": """if ( -d %(value)s ) then\n"""
""" if ( ${?%(var)s} ) then\n"""
""" setenv %(var)s "%(value)s:$%(var)s"\n"""
""" else\n"""
""" setenv %(var)s "%(value)s"\n"""
""" endif\n"""
"""endif\n""",
"UNSET": """unset %(var)s || true\n""",
"CMD": """%(var)s %(value)s\n""",
"CMD_SH": "",
"CMD_CSH": """%(var)s %(value)s\n""",
"ALIAS": """alias %(var)s "%(value)s"\n""",
"ALIAS_SH": "",
"ALIAS_CSH":"""alias %(var)s "%(value)s"\n"""}
SPEC_REFERENCE_REPO = """
%%define relocateReference find %%{instroot} -type f | xargs --no-run-if-empty grep -Il '%(reference_repo)s' | xargs -n1 --no-run-if-empty perl -p -i -e "s|\\\\Q%(reference_repo)s\\\\E|%%{cmsroot}|g;"
"""
SPEC_HEADER = """
%%define pkgname %(name)s
%%define pkgversion %(version)s
%%define pkgcategory %(group)s
%%define cmsroot %(workDir)s
%%define instroot %(workDir)s/%(tempDirPrefix)s/BUILDROOT/%(checksum)s%(installDir)s
%%define realversion %(realVersion)s
%%define gccver %(compilerRealVersion)s
%%define compilerRealVersion %(compilerRealVersion)s
%%define pkgrevision %(pkgRevision)s
%%define pkgreqs %(pkgreqs)s
%%define directpkgreqs %(directpkgreqs)s
%%define specchecksum %(checksum)s
%%define cmscompiler %(compilerName)s
%%define cmsbuildApiVersion 1
%%define installroot %(installDir)s
%%define tempprefix %(tempDirPrefix)s
Name: %(group)s+%(name)s+%(version)s
Group: %(group)s
Version: %(rpmVersion)s
Release: %(pkgRevision)s
License: "As required by the orginal provider of the software."
Summary: %(summary)s SpecChecksum:%(checksum)s
%(requiresStatement)s
Packager: CMS <hn-cms-sw-develtools@cern.ch>
Distribution: CMS
Vendor: CMS
Provides: %(group)s+%(name)s+%(version)s
Obsoletes: %(group)s+%(name)s+%(version)s
Prefix: %(installDir)s
"""
DEFAULT_INSTALL_POSTAMBLE="""
# Avoid pkgconfig dependency. Notice you still need to keep the rm statement
# to support architectures not being build with cmsBuild > V00-19-XX
%if "%{?keep_pkgconfig:set}" != "set"
if [ -d "%i/lib/pkgconfig" ]; then rm -rf %i/lib/pkgconfig; fi
%endif
# Do not package libtool and archive libraries, unless required.
%if "%{?keep_archives:set}" != "set"
# Don't need archive libraries.
rm -f %i/lib/*.{l,}a
%endif
# Strip executable / paths which were specified in the strip_files macro.
%if "%{?strip_files:set}" == "set"
for x in %strip_files
do
if [ -e $x ]
then
find $x -type f -perm -a+x -exec %strip {} \;
fi
done
%endif
# remove files / directories which were specified by the drop_files macro.
%if "%{?drop_files:set}" == "set"
for x in %drop_files
do
if [ -e $x ]; then rm -rf $x; fi
done
%endif
case %{cmsplatf} in
osx* )
for x in `find %{i} -type f -perm -u+x | grep -v -e "[.]pyc"`;
do
if [ "X`file --mime $x | sed -e 's| ||g' | cut -d: -f2 | cut -d\; -f1`" = Xapplication/octet-stream ]
then
chmod +w $x
old_install_name=`otool -D $x | tail -1 | sed -e's|:$||'`
new_install_name=`basename $old_install_name`
install_name_tool -change $old_install_name $new_install_name -id $new_install_name $x
# Make sure also dependencies do not have an hardcoded path.
for dep in `otool -L $x | sed -e"s|[^\\t\\s ]*%{instroot}|%{instroot}|" | grep -e '^/' | sed -e's|(.*||'`
do
install_name_tool -change $dep `basename $dep` $x
done
chmod -w $x
fi
done
;;
* )
;;
esac
"""
DEFAULT_PREP_POSTAMBLE="""
"""
DEFAULT_BUILD_POSTAMBLE="""
# make sure that at least an empty file list does exist
touch %_builddir/files
"""
|
from decimal import Decimal
from bag_transfer.mixins.authmixins import (ManagingArchivistMixin,
OrgReadViewMixin)
from bag_transfer.mixins.formatmixins import JSONResponseMixin
from bag_transfer.mixins.viewmixins import PageTitleMixin
from bag_transfer.models import BagItProfile, Organization
from django.contrib import messages
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.views.generic import (CreateView, DetailView, TemplateView,
UpdateView)
from .form import (AcceptBagItVersionFormset, AcceptSerializationFormset,
BagItProfileBagInfoFormset, BagItProfileForm,
ManifestsAllowedFormset, ManifestsRequiredFormset,
TagFilesRequiredFormset, TagManifestsRequiredFormset)
class BagItProfileManageView(PageTitleMixin):
template_name = "bagit_profiles/manage.html"
model = BagItProfile
form_class = BagItProfileForm
def get_page_title(self, context):
return "Edit BagIt Profile" if self.object else "Create BagIt Profile"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.object:
form = BagItProfileForm(instance=self.object)
organization = self.object.organization
else:
source_organization = self.request.user.organization
organization = get_object_or_404(Organization, pk=self.request.GET.get("org"))
form = BagItProfileForm(
initial={
"source_organization": source_organization,
"contact_email": "archive@rockarch.org",
"organization": organization})
context["form"] = form
context["bag_info_formset"] = BagItProfileBagInfoFormset(instance=self.object, prefix="bag_info")
context["manifests_allowed_formset"] = ManifestsAllowedFormset(instance=self.object, prefix="manifests_allowed")
context["manifests_formset"] = ManifestsRequiredFormset(instance=self.object, prefix="manifests")
context["serialization_formset"] = AcceptSerializationFormset(instance=self.object, prefix="serialization")
context["version_formset"] = AcceptBagItVersionFormset(instance=self.object, prefix="version")
context["tag_manifests_formset"] = TagManifestsRequiredFormset(instance=self.object, prefix="tag_manifests")
context["tag_files_formset"] = TagFilesRequiredFormset(instance=self.object, prefix="tag_files")
context["organization"] = organization
return context
def get_success_url(self):
return reverse("bagit-profiles:detail", kwargs={"pk": self.object.pk})
def form_valid(self, form):
"""Saves associated formsets."""
bagit_profile = form.save()
bag_info_formset = BagItProfileBagInfoFormset(
self.request.POST, instance=bagit_profile, prefix="bag_info")
manifests_allowed_formset = ManifestsAllowedFormset(
self.request.POST, instance=bagit_profile, prefix="manifests_allowed")
manifests_formset = ManifestsRequiredFormset(
self.request.POST, instance=bagit_profile, prefix="manifests")
serialization_formset = AcceptSerializationFormset(
self.request.POST, instance=bagit_profile, prefix="serialization")
version_formset = AcceptBagItVersionFormset(
self.request.POST, instance=bagit_profile, prefix="version")
tag_manifests_formset = TagManifestsRequiredFormset(
self.request.POST, instance=bagit_profile, prefix="tag_manifests")
tag_files_formset = TagFilesRequiredFormset(
self.request.POST, instance=bagit_profile, prefix="tag_files")
forms_to_save = [
bag_info_formset,
manifests_allowed_formset,
manifests_formset,
serialization_formset,
version_formset,
tag_manifests_formset,
tag_files_formset,
]
for formset in forms_to_save:
if not formset.is_valid():
messages.error(
self.request,
"There was a problem with your submission. Please correct the error(s) below and try again.")
return super().form_invalid(form)
else:
formset.save()
bagit_profile.version = bagit_profile.version + Decimal(1)
bagit_profile.bagit_profile_identifier = self.request.build_absolute_uri(
reverse(
"bagitprofile-detail",
kwargs={"pk": bagit_profile.id, "format": "json"},
)
)
messages.success(self.request, "BagIt Profile saved")
return super().form_valid(form)
def form_invalid(self, form):
messages.error(
self.request,
"There was a problem with your submission. Please correct the error(s) below and try again.")
return super().form_invalid(form)
class BagItProfileCreateView(BagItProfileManageView, CreateView):
pass
class BagItProfileUpdateView(BagItProfileManageView, UpdateView):
pass
class BagItProfileDetailView(PageTitleMixin, OrgReadViewMixin, DetailView):
template_name = "bagit_profiles/detail.html"
page_title = "BagIt Profile"
model = BagItProfile
class BagItProfileAPIAdminView(ManagingArchivistMixin, JSONResponseMixin, TemplateView):
def render_to_response(self, context, **kwargs):
if not self.request.is_ajax():
raise Http404
resp = {"success": 0}
if "action" in self.kwargs:
profile = get_object_or_404(BagItProfile, pk=self.kwargs.get("pk"))
if self.kwargs["action"] == "delete":
profile.delete()
resp["success"] = 1
return self.render_to_json_response(resp, **kwargs)
|
from modulos.controlador import Controlador
class Portafolio(Controlador):
def __init__(self,data,headers=None):
Controlador.__init__(self,data,headers)
self.vista="portafolio"
if data["metodo"]==None and data["action"]==None:
self.servir()
self.modelo=data["model"]["paginas"]
def acerca(self):
self.add_vista("acerca")
self.servir()
def test(self):
self.add_vista("test")
self.servir()
def detalles(self):
self.add_vista("Detalle")
self.servir()
def divi(self):
self.add_vista("divi")
self.servir()
def divi_front(self):
self.add_vista("divi-front")
self.servir()
|
from backends.software_package import SoftwarePackage
from definitions.definition import SoftwareDefinition
from providers.DebRepositoryProvider import DebRepositoryProvider
class GitlabEnterpriseEdition(SoftwareDefinition):
software_package = SoftwarePackage(
name='Jitsi Meet',
vendor='8x8')
provider = DebRepositoryProvider(
software_package=software_package,
repo_base_url='https://download.jitsi.org/',
repo_packages_path='stable/Packages',
repo_package='jitsi-meet-web'
)
path_map = {
'/': '/usr/share/jitsi-meet/',
}
ignore_paths = None
|
import copy
import math
import os
import torch
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
import librosa
import librosa.display
from SAMAF import SAMAF
from SinhalaSongsDataset import SinhalaSongsDataset
import argparse
parser = argparse.ArgumentParser(
description="SAMAF mp3 Songs Trainer")
parser.add_argument("features_src", metavar="FEATURES_SRC",
help="path to pre-processed npy files")
parser.add_argument("snapshots_src", metavar="SNAPSHOTS_SRC",
help="path to save snapshots")
parser.add_argument("device", metavar="DEVICE", help="cuda/cpu", default="cpu")
parser.add_argument("trim_seconds", metavar="TRIM_SECS",
help="audio trim seconds", default=40)
parser.add_argument("batch_size", metavar="BATCH_SIZE",
help="dataset single batch size", default=32)
parser.add_argument("workers", metavar="WORKERS",
help="number of workers", default=4)
parser.add_argument("epochs", metavar="EPOCHS",
help="number of epochs to run", default=10)
parser.add_argument("embedding", metavar="EMBEDDING_DIM",
help="embedding dimension", default=256)
def draw_mfccs(*mfccs):
plots = len(mfccs)
plt.figure()
for i, mfcc in enumerate(mfccs):
plt.subplot(1, plots, i+1)
librosa.display.specshow(mfcc.transpose(0, 1).numpy(), x_axis="time")
plt.colorbar()
plt.title("MFCC")
plt.tight_layout()
def train_model(train_dataset, validation_dataset, epochs, device, embedding_dimension=128, save_path="", start_state=None):
def mseLoss(pred, true):
return torch.nn.functional.mse_loss(pred, true)
def hashLoss(embeddings):
embeddings_repeated_1 = embeddings.repeat(
1, embeddings.shape[1], 1) # [1,2,3] => [1,1,2,2,3,3]
embeddings_repeated_2 = embeddings.repeat_interleave(
embeddings.shape[1], dim=1) # [1,2,3] => [1,2,3,1,2,3]
cosine_similarity = torch.nn.functional.cosine_similarity(
embeddings_repeated_1, embeddings_repeated_2, dim=2)
# print("Cosine similarity values", cosine_similarity.shape)
cosine_similarity = cosine_similarity.view(
-1, embeddings.shape[1], embeddings.shape[1])
multiplier = (torch.ones(
embeddings.shape[1]) - torch.eye(embeddings.shape[1])).unsqueeze(0).to(device)
cosine_similarity = cosine_similarity * multiplier * (1/0.55)
cosine_similarity[cosine_similarity < 0] = 0.0
cosine_similarity[cosine_similarity > 1] = 1.0
# print("Cosine similarity values", cosine_similarity.shape)
# print(cosine_similarity[0])
l2_norm = torch.linalg.norm(embeddings.unsqueeze(
1)-embeddings.unsqueeze(2), ord=2, dim=3)
l2_norm_squared = torch.square(l2_norm)
# print("Squared L2 Norm ", l2_norm_squared.shape)
neumerator = torch.sum(cosine_similarity * l2_norm_squared, dim=(1, 2))
# print("Neumerator ", neumerator.shape)
denominator = torch.count_nonzero(
cosine_similarity.detach(), dim=(1, 2))
# print("Denominator ", denominator.shape)
return torch.mean(neumerator / denominator)
def bitwiseEntropyLoss(embeddings):
# TODO: Implement
return 0
model = SAMAF(embedding_dim=embedding_dimension).to(device)
optimizer = torch.optim.Adagrad(model.parameters(), lr=1e-2)
history = dict(train=[], validation=[])
best_model_weights = copy.deepcopy(model.state_dict())
best_loss = 100000000.0
start_epoch = 1
if start_state:
model.load_state_dict(start_state["model_state_dict"])
optimizer.load_state_dict(start_state["optimizer_state_dict"])
start_epoch = start_state["epoch"]
history = start_state["history"]
best_model_weights = start_state["best_model_weights"]
best_loss = start_state["best_loss"]
for epoch in range(start_epoch, epochs+1):
train_losses = []
model = model.train()
for i, (_, seq_true) in enumerate(train_dataset):
seq_true = seq_true.to(device)
optimizer.zero_grad()
embeddings, seq_pred = model(seq_true)
loss = 1.0 * mseLoss(seq_pred, seq_true) + 1.0 * hashLoss(embeddings) + 1.0 * \
bitwiseEntropyLoss(
embeddings) # criterion(seq_pred, seq_true).to(device)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
if i % 100 == 99:
print("Epoch {} batch {}: train loss {}".format(
epoch, i+1, loss.item()))
validation_losses = []
model = model.eval()
with torch.no_grad():
for i, (_, seq_true) in enumerate(validation_dataset):
seq_true = seq_true.to(device)
_, seq_pred = model(seq_true)
loss = 1.0 * mseLoss(seq_pred, seq_true) + 1.0 * \
hashLoss(embeddings) + 1.0 * bitwiseEntropyLoss(embeddings)
validation_losses.append(loss.item())
if i % 100 == 99:
print("Epoch {} batch {}: validation loss {}".format(
epoch, i+1, loss.item()))
train_loss = np.mean(train_losses)
validation_loss = np.mean(validation_losses)
history['train'].append(train_loss)
history['validation'].append(validation_loss)
print("Epoch {}: train loss {}, validation loss {}".format(
epoch, train_loss, validation_loss))
if not os.path.exists(save_path):
os.makedirs(save_path)
torch.save({
"epoch": epoch,
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"train_loss": train_loss,
"validation_loss": validation_loss,
"history": history,
"best_model_weights": best_model_weights,
"best_loss": best_loss
}, os.path.join(save_path, "snapshot-{}.pytorch".format(epoch)))
if validation_loss < best_loss:
best_loss = validation_loss
best_model_weights = copy.deepcopy(model.state_dict())
x = [*range(1, len(history['train'])+1)]
plt.clf()
plt.plot(x, history['train'], label="Train Loss")
plt.plot(x, history['validation'], label="Validation Loss")
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title("Model Performance upto epoch {}".format(epoch))
plt.legend()
plt.savefig(os.path.join(
save_path, "model-performance-{}.png".format(epoch)))
return best_model_weights, history
def main():
args = parser.parse_args()
train_dataset = SinhalaSongsDataset(
root_dir=args.features_src, trim_seconds=int(args.trim_seconds))
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=int(args.batch_size), num_workers=int(args.workers), shuffle=True)
validation_dataset = SinhalaSongsDataset(
root_dir=args.features_src, trim_seconds=int(args.trim_seconds), validation=True)
validation_dataloader = torch.utils.data.DataLoader(
validation_dataset, batch_size=int(args.batch_size), num_workers=int(args.workers), shuffle=False)
device = torch.device(args.device)
best_model, history = train_model(train_dataloader, validation_dataloader, int(args.epochs), device,
int(args.embedding), args.snapshots_src)
if __name__ == "__main__":
main()
|
from components import featureBroker
import datetime
import app
from entities import *
import traceback
class RFSnifferCleanup(featureBroker.Component):
config = featureBroker.RequiredFeature('conf_RFSniffer')
def register(self, scheduler):
scheduler.add_job(self.doCleanup, 'cron', id='RF Sniffer History Cleanup', hour=2, max_instances=1)
def doCleanup(self):
cleanupDate = datetime.datetime.utcnow()-datetime.timedelta(days=int(self.config['cleanup_age_days']))
dbSession = None
try:
dbSession = app.db.session;
dbSession.query(RFSniffer).filter(RFSniffer.date_created < cleanupDate).delete()
dbSession.commit()
except:
dbSession.rollback()
traceback.print_exc()
raise
featureBroker.features.Provide('job', RFSnifferCleanup) |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
import typing
from tokenize import (
Floatnumber as FLOATNUMBER_RE,
Imagnumber as IMAGNUMBER_RE,
Intnumber as INTNUMBER_RE,
)
from libcst._maybe_sentinel import MaybeSentinel
from libcst._nodes._expression import (
Arg,
Asynchronous,
Attribute,
Await,
BinaryOperation,
BooleanOperation,
Call,
Comparison,
ComparisonTarget,
CompFor,
CompIf,
ConcatenatedString,
Dict,
DictComp,
DictElement,
Element,
Ellipsis,
ExtSlice,
Float,
FormattedString,
FormattedStringExpression,
FormattedStringText,
From,
GeneratorExp,
IfExp,
Imaginary,
Index,
Integer,
Lambda,
LeftCurlyBrace,
LeftParen,
LeftSquareBracket,
List,
ListComp,
Name,
Param,
Parameters,
RightCurlyBrace,
RightParen,
RightSquareBracket,
Set,
SetComp,
Slice,
StarredDictElement,
StarredElement,
Subscript,
Tuple,
UnaryOperation,
Yield,
)
from libcst._nodes._op import (
Add,
And,
AssignEqual,
BaseBinaryOp,
BaseBooleanOp,
BaseCompOp,
BitAnd,
BitInvert,
BitOr,
BitXor,
Colon,
Comma,
Divide,
Dot,
Equal,
FloorDivide,
GreaterThan,
GreaterThanEqual,
In,
Is,
IsNot,
LeftShift,
LessThan,
LessThanEqual,
MatrixMultiply,
Minus,
Modulo,
Multiply,
Not,
NotEqual,
NotIn,
Or,
Plus,
Power,
RightShift,
Subtract,
)
from libcst._nodes._whitespace import SimpleWhitespace
from libcst._parser._custom_itertools import grouper
from libcst._parser._production_decorator import with_production
from libcst._parser._types.config import ParserConfig
from libcst._parser._types.partials import (
ArglistPartial,
AttributePartial,
CallPartial,
FormattedStringConversionPartial,
FormattedStringFormatSpecPartial,
SlicePartial,
SubscriptPartial,
WithLeadingWhitespace,
)
from libcst._parser._types.token import Token
from libcst._parser._whitespace_parser import parse_parenthesizable_whitespace
BINOP_TOKEN_LUT: typing.Dict[str, typing.Type[BaseBinaryOp]] = {
"*": Multiply,
"@": MatrixMultiply,
"/": Divide,
"%": Modulo,
"//": FloorDivide,
"+": Add,
"-": Subtract,
"<<": LeftShift,
">>": RightShift,
"&": BitAnd,
"^": BitXor,
"|": BitOr,
}
BOOLOP_TOKEN_LUT: typing.Dict[str, typing.Type[BaseBooleanOp]] = {"and": And, "or": Or}
COMPOP_TOKEN_LUT: typing.Dict[str, typing.Type[BaseCompOp]] = {
"<": LessThan,
">": GreaterThan,
"==": Equal,
"<=": LessThanEqual,
">=": GreaterThanEqual,
"in": In,
"is": Is,
}
# N.B. This uses a `testlist | star_expr`, not a `testlist_star_expr` because
# `testlist_star_expr` may not always be representable by a non-partial node, since it's
# only used as part of `expr_stmt`.
@with_production("expression_input", "(testlist | star_expr) ENDMARKER")
def convert_expression_input(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
(child, endmarker) = children
# HACK: UGLY! REMOVE THIS SOON!
# Unwrap WithLeadingWhitespace if it exists. It shouldn't exist by this point, but
# testlist isn't fully implemented, and we currently leak these partial objects.
if isinstance(child, WithLeadingWhitespace):
child = child.value
return child
@with_production("test", "or_test ['if' or_test 'else' test] | lambdef")
def convert_test(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 1:
(child,) = children
return child
else:
(body, if_token, test, else_token, orelse) = children
return WithLeadingWhitespace(
IfExp(
body=body.value,
test=test.value,
orelse=orelse.value,
whitespace_before_if=parse_parenthesizable_whitespace(
config, if_token.whitespace_before
),
whitespace_after_if=parse_parenthesizable_whitespace(
config, if_token.whitespace_after
),
whitespace_before_else=parse_parenthesizable_whitespace(
config, else_token.whitespace_before
),
whitespace_after_else=parse_parenthesizable_whitespace(
config, else_token.whitespace_after
),
),
body.whitespace_before,
)
@with_production("test_nocond", "or_test | lambdef_nocond")
def convert_test_nocond(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
(child,) = children
return child
@with_production("lambdef", "'lambda' [varargslist] ':' test")
@with_production("lambdef_nocond", "'lambda' [varargslist] ':' test_nocond")
def convert_lambda(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
lambdatoken, *params, colontoken, test = children
# Grab the whitespace around the colon. If there are no params, then
# the colon owns the whitespace before and after it. If there are
# any params, then the last param owns the whitespace before the colon.
# We handle the parameter movement below.
colon = Colon(
whitespace_before=parse_parenthesizable_whitespace(
config, colontoken.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, colontoken.whitespace_after
),
)
# Unpack optional parameters
if len(params) == 0:
parameters = Parameters()
whitespace_after_lambda = MaybeSentinel.DEFAULT
else:
(parameters,) = params
whitespace_after_lambda = parse_parenthesizable_whitespace(
config, lambdatoken.whitespace_after
)
# Handle pre-colon whitespace
if parameters.star_kwarg is not None:
if parameters.star_kwarg.comma == MaybeSentinel.DEFAULT:
parameters = parameters.with_changes(
star_kwarg=parameters.star_kwarg.with_changes(
whitespace_after_param=colon.whitespace_before
)
)
elif parameters.kwonly_params:
if parameters.kwonly_params[-1].comma == MaybeSentinel.DEFAULT:
parameters = parameters.with_changes(
kwonly_params=(
*parameters.kwonly_params[:-1],
parameters.kwonly_params[-1].with_changes(
whitespace_after_param=colon.whitespace_before
),
)
)
elif isinstance(parameters.star_arg, Param):
if parameters.star_arg.comma == MaybeSentinel.DEFAULT:
parameters = parameters.with_changes(
star_arg=parameters.star_arg.with_changes(
whitespace_after_param=colon.whitespace_before
)
)
elif parameters.default_params:
if parameters.default_params[-1].comma == MaybeSentinel.DEFAULT:
parameters = parameters.with_changes(
default_params=(
*parameters.default_params[:-1],
parameters.default_params[-1].with_changes(
whitespace_after_param=colon.whitespace_before
),
)
)
elif parameters.params:
if parameters.params[-1].comma == MaybeSentinel.DEFAULT:
parameters = parameters.with_changes(
params=(
*parameters.params[:-1],
parameters.params[-1].with_changes(
whitespace_after_param=colon.whitespace_before
),
)
)
# Colon doesn't own its own pre-whitespace now.
colon = colon.with_changes(whitespace_before=SimpleWhitespace(""))
# Return a lambda
return WithLeadingWhitespace(
Lambda(
whitespace_after_lambda=whitespace_after_lambda,
params=parameters,
body=test.value,
colon=colon,
),
lambdatoken.whitespace_before,
)
@with_production("or_test", "and_test ('or' and_test)*")
@with_production("and_test", "not_test ('and' not_test)*")
def convert_boolop(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
leftexpr, *rightexprs = children
if len(rightexprs) == 0:
return leftexpr
whitespace_before = leftexpr.whitespace_before
leftexpr = leftexpr.value
# Convert all of the operations that have no precedence in a loop
for op, rightexpr in grouper(rightexprs, 2):
if op.string not in BOOLOP_TOKEN_LUT:
raise Exception(f"Unexpected token '{op.string}'!")
leftexpr = BooleanOperation(
left=leftexpr,
operator=BOOLOP_TOKEN_LUT[op.string](
whitespace_before=parse_parenthesizable_whitespace(
config, op.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, op.whitespace_after
),
),
right=rightexpr.value,
)
return WithLeadingWhitespace(leftexpr, whitespace_before)
@with_production("not_test", "'not' not_test | comparison")
def convert_not_test(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 1:
(child,) = children
return child
else:
nottoken, nottest = children
return WithLeadingWhitespace(
UnaryOperation(
operator=Not(
whitespace_after=parse_parenthesizable_whitespace(
config, nottoken.whitespace_after
)
),
expression=nottest.value,
),
nottoken.whitespace_before,
)
@with_production("comparison", "expr (comp_op expr)*")
def convert_comparison(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 1:
(child,) = children
return child
lhs, *rest = children
comparisons: typing.List[ComparisonTarget] = []
for operator, comparator in grouper(rest, 2):
comparisons.append(
ComparisonTarget(operator=operator, comparator=comparator.value)
)
return WithLeadingWhitespace(
Comparison(left=lhs.value, comparisons=tuple(comparisons)),
lhs.whitespace_before,
)
@with_production(
"comp_op", "('<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not')"
)
def convert_comp_op(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 1:
(op,) = children
if op.string in COMPOP_TOKEN_LUT:
# A regular comparison containing one token
return COMPOP_TOKEN_LUT[op.string](
whitespace_before=parse_parenthesizable_whitespace(
config, op.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, op.whitespace_after
),
)
elif op.string in ["!=", "<>"]:
# Not equal, which can take two forms in some cases
return NotEqual(
whitespace_before=parse_parenthesizable_whitespace(
config, op.whitespace_before
),
value=op.string,
whitespace_after=parse_parenthesizable_whitespace(
config, op.whitespace_after
),
)
else:
# TODO: Make this a ParserSyntaxError
raise Exception(f"Unexpected token '{op.string}'!")
else:
# A two-token comparison
leftcomp, rightcomp = children
if leftcomp.string == "not" and rightcomp.string == "in":
return NotIn(
whitespace_before=parse_parenthesizable_whitespace(
config, leftcomp.whitespace_before
),
whitespace_between=parse_parenthesizable_whitespace(
config, leftcomp.whitespace_after
),
whitespace_after=parse_parenthesizable_whitespace(
config, rightcomp.whitespace_after
),
)
elif leftcomp.string == "is" and rightcomp.string == "not":
return IsNot(
whitespace_before=parse_parenthesizable_whitespace(
config, leftcomp.whitespace_before
),
whitespace_between=parse_parenthesizable_whitespace(
config, leftcomp.whitespace_after
),
whitespace_after=parse_parenthesizable_whitespace(
config, rightcomp.whitespace_after
),
)
else:
# TODO: Make this a ParserSyntaxError
raise Exception(f"Unexpected token '{leftcomp.string} {rightcomp.string}'!")
@with_production("star_expr", "'*' expr")
def convert_star_expr(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
star, expr = children
return WithLeadingWhitespace(
StarredElement(
expr.value,
whitespace_before_value=parse_parenthesizable_whitespace(
config, expr.whitespace_before
),
# atom is responsible for parenthesis and trailing_whitespace if they exist
# testlist_comp, exprlist, dictorsetmaker, etc are responsible for the comma
# if it exists.
),
whitespace_before=star.whitespace_before,
)
@with_production("expr", "xor_expr ('|' xor_expr)*")
@with_production("xor_expr", "and_expr ('^' and_expr)*")
@with_production("and_expr", "shift_expr ('&' shift_expr)*")
@with_production("shift_expr", "arith_expr (('<<'|'>>') arith_expr)*")
@with_production("arith_expr", "term (('+'|'-') term)*")
@with_production("term", "factor (('*'|'@'|'/'|'%'|'//') factor)*")
def convert_binop(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
leftexpr, *rightexprs = children
if len(rightexprs) == 0:
return leftexpr
whitespace_before = leftexpr.whitespace_before
leftexpr = leftexpr.value
# Convert all of the operations that have no precedence in a loop
for op, rightexpr in grouper(rightexprs, 2):
if op.string not in BINOP_TOKEN_LUT:
raise Exception(f"Unexpected token '{op.string}'!")
leftexpr = BinaryOperation(
left=leftexpr,
operator=BINOP_TOKEN_LUT[op.string](
whitespace_before=parse_parenthesizable_whitespace(
config, op.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, op.whitespace_after
),
),
right=rightexpr.value,
)
return WithLeadingWhitespace(leftexpr, whitespace_before)
@with_production("factor", "('+'|'-'|'~') factor | power")
def convert_factor(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 1:
(child,) = children
return child
op, factor = children
# First, tokenize the unary operator
if op.string == "+":
opnode = Plus(
whitespace_after=parse_parenthesizable_whitespace(
config, op.whitespace_after
)
)
elif op.string == "-":
opnode = Minus(
whitespace_after=parse_parenthesizable_whitespace(
config, op.whitespace_after
)
)
elif op.string == "~":
opnode = BitInvert(
whitespace_after=parse_parenthesizable_whitespace(
config, op.whitespace_after
)
)
else:
raise Exception(f"Unexpected token '{op.string}'!")
return WithLeadingWhitespace(
UnaryOperation(operator=opnode, expression=factor.value), op.whitespace_before
)
@with_production("power", "atom_expr ['**' factor]")
def convert_power(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 1:
(child,) = children
return child
left, power, right = children
return WithLeadingWhitespace(
BinaryOperation(
left=left.value,
operator=Power(
whitespace_before=parse_parenthesizable_whitespace(
config, power.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, power.whitespace_after
),
),
right=right.value,
),
left.whitespace_before,
)
@with_production("atom_expr", "atom_expr_await | atom_expr_trailer")
def convert_atom_expr(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
(child,) = children
return child
@with_production("atom_expr_await", "'await' atom_expr_trailer")
def convert_atom_expr_await(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
keyword, expr = children
return WithLeadingWhitespace(
Await(
whitespace_after_await=parse_parenthesizable_whitespace(
config, keyword.whitespace_after
),
expression=expr.value,
),
keyword.whitespace_before,
)
@with_production("atom_expr_trailer", "atom trailer*")
def convert_atom_expr_trailer(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
atom, *trailers = children
whitespace_before = atom.whitespace_before
atom = atom.value
# Need to walk through all trailers from left to right and construct
# a series of nodes based on each partial type. We can't do this with
# left recursion due to limits in the parser.
for trailer in trailers:
if isinstance(trailer, SubscriptPartial):
atom = Subscript(
value=atom,
whitespace_after_value=parse_parenthesizable_whitespace(
config, trailer.whitespace_before
),
lbracket=trailer.lbracket,
slice=trailer.slice,
rbracket=trailer.rbracket,
)
elif isinstance(trailer, AttributePartial):
atom = Attribute(value=atom, dot=trailer.dot, attr=trailer.attr)
elif isinstance(trailer, CallPartial):
# If the trailing argument doesn't have a comma, then it owns the
# trailing whitespace before the rpar. Otherwise, the comma owns
# it.
if (
len(trailer.args) > 0
and trailer.args[-1].comma == MaybeSentinel.DEFAULT
):
args = (
*trailer.args[:-1],
trailer.args[-1].with_changes(
whitespace_after_arg=trailer.rpar.whitespace_before
),
)
else:
args = trailer.args
atom = Call(
func=atom,
whitespace_after_func=parse_parenthesizable_whitespace(
config, trailer.lpar.whitespace_before
),
whitespace_before_args=trailer.lpar.value.whitespace_after,
args=tuple(args),
)
else:
# This is an invalid trailer, so lets give up
raise Exception("Logic error!")
return WithLeadingWhitespace(atom, whitespace_before)
@with_production(
"trailer", "trailer_arglist | trailer_subscriptlist | trailer_attribute"
)
def convert_trailer(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
(child,) = children
return child
@with_production("trailer_arglist", "'(' [arglist] ')'")
def convert_trailer_arglist(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
lpar, *arglist, rpar = children
return CallPartial(
lpar=WithLeadingWhitespace(
LeftParen(
whitespace_after=parse_parenthesizable_whitespace(
config, lpar.whitespace_after
)
),
lpar.whitespace_before,
),
args=() if not arglist else arglist[0].args,
rpar=RightParen(
whitespace_before=parse_parenthesizable_whitespace(
config, rpar.whitespace_before
)
),
)
@with_production("trailer_subscriptlist", "'[' subscriptlist ']'")
def convert_trailer_subscriptlist(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
(lbracket, subscriptlist, rbracket) = children
return SubscriptPartial(
lbracket=LeftSquareBracket(
whitespace_after=parse_parenthesizable_whitespace(
config, lbracket.whitespace_after
)
),
slice=subscriptlist.value,
rbracket=RightSquareBracket(
whitespace_before=parse_parenthesizable_whitespace(
config, rbracket.whitespace_before
)
),
whitespace_before=lbracket.whitespace_before,
)
@with_production("subscriptlist", "subscript (',' subscript)* [',']")
def convert_subscriptlist(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) > 1:
# This is a list of ExtSlice, so construct as such by grouping every
# subscript with an optional comma and adding to a list.
extslices = []
for slice, comma in grouper(children, 2):
if comma is None:
extslices.append(ExtSlice(slice=slice.value))
else:
extslices.append(
ExtSlice(
slice=slice.value,
comma=Comma(
whitespace_before=parse_parenthesizable_whitespace(
config, comma.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, comma.whitespace_after
),
),
)
)
return WithLeadingWhitespace(extslices, children[0].whitespace_before)
else:
# This is an Index or Slice, as parsed in the child.
(index_or_slice,) = children
return index_or_slice
@with_production("subscript", "test | [test] ':' [test] [sliceop]")
def convert_subscript(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 1 and not isinstance(children[0], Token):
# This is just an index node
(test,) = children
return WithLeadingWhitespace(Index(test.value), test.whitespace_before)
if isinstance(children[-1], SlicePartial):
# We got a partial slice as the final param. Extract the final
# bits of the full subscript.
*others, sliceop = children
whitespace_before = others[0].whitespace_before
second_colon = sliceop.second_colon
step = sliceop.step
else:
# We can just parse this below, without taking extras from the
# partial child.
others = children
whitespace_before = others[0].whitespace_before
second_colon = MaybeSentinel.DEFAULT
step = None
# We need to create a partial slice to pass up. So, align so we have
# a list that's always [Optional[Test], Colon, Optional[Test]].
if isinstance(others[0], Token):
# First token is a colon, so insert an empty test on the LHS. We
# know the RHS is a test since it's not a sliceop.
slicechildren = [None, *others]
else:
# First token is non-colon, so its a test.
slicechildren = [*others]
if len(slicechildren) < 3:
# Now, we have to fill in the RHS. We know its two long
# at this point if its not already 3.
slicechildren = [*slicechildren, None]
lower, first_colon, upper = slicechildren
return WithLeadingWhitespace(
Slice(
lower=lower.value if lower is not None else None,
first_colon=Colon(
whitespace_before=parse_parenthesizable_whitespace(
config, first_colon.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, first_colon.whitespace_after
),
),
upper=upper.value if upper is not None else None,
second_colon=second_colon,
step=step,
),
whitespace_before=whitespace_before,
)
@with_production("sliceop", "':' [test]")
def convert_sliceop(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 2:
colon, test = children
step = test.value
else:
(colon,) = children
step = None
return SlicePartial(
second_colon=Colon(
whitespace_before=parse_parenthesizable_whitespace(
config, colon.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, colon.whitespace_after
),
),
step=step,
)
@with_production("trailer_attribute", "'.' NAME")
def convert_trailer_attribute(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
dot, name = children
return AttributePartial(
dot=Dot(
whitespace_before=parse_parenthesizable_whitespace(
config, dot.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, dot.whitespace_after
),
),
attr=Name(name.string),
)
@with_production(
"atom",
"atom_parens | atom_squarebrackets | atom_curlybraces | atom_string | atom_basic | atom_ellipses",
)
def convert_atom(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
(child,) = children
return child
@with_production("atom_basic", "NAME | NUMBER | 'None' | 'True' | 'False'")
def convert_atom_basic(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
(child,) = children
if child.type.name == "NAME":
# This also handles 'None', 'True', and 'False' directly, but we
# keep it in the grammar to be more correct.
return WithLeadingWhitespace(Name(child.string), child.whitespace_before)
elif child.type.name == "NUMBER":
# We must determine what type of number it is since we split node
# types up this way.
if re.fullmatch(INTNUMBER_RE, child.string):
return WithLeadingWhitespace(Integer(child.string), child.whitespace_before)
elif re.fullmatch(FLOATNUMBER_RE, child.string):
return WithLeadingWhitespace(Float(child.string), child.whitespace_before)
elif re.fullmatch(IMAGNUMBER_RE, child.string):
return WithLeadingWhitespace(
Imaginary(child.string), child.whitespace_before
)
else:
raise Exception("Unparseable number {child.string}")
else:
raise Exception(f"Logic error, unexpected token {child.type.name}")
@with_production("atom_squarebrackets", "'[' [testlist_comp_list] ']'")
def convert_atom_squarebrackets(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
lbracket_tok, *body, rbracket_tok = children
lbracket = LeftSquareBracket(
whitespace_after=parse_parenthesizable_whitespace(
config, lbracket_tok.whitespace_after
)
)
rbracket = RightSquareBracket(
whitespace_before=parse_parenthesizable_whitespace(
config, rbracket_tok.whitespace_before
)
)
if len(body) == 0:
list_node = List((), lbracket=lbracket, rbracket=rbracket)
else: # len(body) == 1
# body[0] is a List or ListComp
list_node = body[0].value.with_changes(lbracket=lbracket, rbracket=rbracket)
return WithLeadingWhitespace(list_node, lbracket_tok.whitespace_before)
@with_production("atom_curlybraces", "'{' [dictorsetmaker] '}'")
def convert_atom_curlybraces(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
lbrace_tok, *body, rbrace_tok = children
lbrace = LeftCurlyBrace(
whitespace_after=parse_parenthesizable_whitespace(
config, lbrace_tok.whitespace_after
)
)
rbrace = RightCurlyBrace(
whitespace_before=parse_parenthesizable_whitespace(
config, rbrace_tok.whitespace_before
)
)
if len(body) == 0:
dict_or_set_node = Dict((), lbrace=lbrace, rbrace=rbrace)
else: # len(body) == 1
dict_or_set_node = body[0].value.with_changes(lbrace=lbrace, rbrace=rbrace)
return WithLeadingWhitespace(dict_or_set_node, lbrace_tok.whitespace_before)
@with_production("atom_parens", "'(' [yield_expr|testlist_comp_tuple] ')'")
def convert_atom_parens(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
lpar_tok, *atoms, rpar_tok = children
lpar = LeftParen(
whitespace_after=parse_parenthesizable_whitespace(
config, lpar_tok.whitespace_after
)
)
rpar = RightParen(
whitespace_before=parse_parenthesizable_whitespace(
config, rpar_tok.whitespace_before
)
)
if len(atoms) == 1:
# inner_atom is a _BaseParenthesizedNode
inner_atom = atoms[0].value
return WithLeadingWhitespace(
inner_atom.with_changes(
lpar=(lpar, *inner_atom.lpar), rpar=(*inner_atom.rpar, rpar)
),
lpar_tok.whitespace_before,
)
else:
return WithLeadingWhitespace(
Tuple((), lpar=(lpar,), rpar=(rpar,)), lpar_tok.whitespace_before
)
@with_production("atom_ellipses", "'...'")
def convert_atom_ellipses(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
(token,) = children
return WithLeadingWhitespace(Ellipsis(), token.whitespace_before)
@with_production("atom_string", "(STRING | fstring) [atom_string]")
def convert_atom_string(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 1:
return children[0]
else:
left, right = children
return WithLeadingWhitespace(
ConcatenatedString(
left=left.value,
whitespace_between=parse_parenthesizable_whitespace(
config, right.whitespace_before
),
right=right.value,
),
left.whitespace_before,
)
@with_production("fstring", "FSTRING_START fstring_content* FSTRING_END")
def convert_fstring(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
start, *content, end = children
return WithLeadingWhitespace(
FormattedString(start=start.string, parts=tuple(content), end=end.string),
start.whitespace_before,
)
@with_production("fstring_content", "FSTRING_STRING | fstring_expr")
def convert_fstring_content(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
(child,) = children
if isinstance(child, Token):
# Construct and return a raw string portion.
return FormattedStringText(child.string)
else:
# Pass the expression up one production.
return child
@with_production("fstring_conversion", "'!' NAME")
def convert_fstring_conversion(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
exclaim, name = children
# There cannot be a space between the two tokens, so no need to preserve this.
return FormattedStringConversionPartial(name.string, exclaim.whitespace_before)
@with_production(
"fstring_expr", "'{' testlist [ fstring_conversion ] [ fstring_format_spec ] '}'"
)
def convert_fstring_expr(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
openbrkt, testlist, *conversions, closebrkt = children
# Extract any optional conversion
if len(conversions) > 0 and isinstance(
conversions[0], FormattedStringConversionPartial
):
conversion = conversions[0].value
conversions = conversions[1:]
else:
conversion = None
# Extract any optional format spec
if len(conversions) > 0:
format_spec = conversions[0].values
else:
format_spec = None
return FormattedStringExpression(
whitespace_before_expression=parse_parenthesizable_whitespace(
config, testlist.whitespace_before
),
expression=testlist.value,
whitespace_after_expression=parse_parenthesizable_whitespace(
config, children[2].whitespace_before
),
conversion=conversion,
format_spec=format_spec,
)
@with_production("fstring_format_spec", "':' fstring_content*")
def convert_fstring_format_spec(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
colon, *content = children
return FormattedStringFormatSpecPartial(tuple(content), colon.whitespace_before)
@with_production(
"testlist_comp_tuple",
"(test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )",
)
def convert_testlist_comp_tuple(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
return _convert_testlist_comp(
config,
children,
single_child_is_sequence=False,
sequence_type=Tuple,
comprehension_type=GeneratorExp,
)
@with_production(
"testlist_comp_list",
"(test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )",
)
def convert_testlist_comp_list(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
return _convert_testlist_comp(
config,
children,
single_child_is_sequence=True,
sequence_type=List,
comprehension_type=ListComp,
)
def _convert_testlist_comp(
config: ParserConfig,
children: typing.Sequence[typing.Any],
single_child_is_sequence: bool,
sequence_type: typing.Union[
typing.Type[Tuple], typing.Type[List], typing.Type[Set]
],
comprehension_type: typing.Union[
typing.Type[GeneratorExp], typing.Type[ListComp], typing.Type[SetComp]
],
) -> typing.Any:
# This is either a single-element list, or the second token is a comma, so we're not
# in a generator.
if len(children) == 1 or isinstance(children[1], Token):
return _convert_sequencelike(
config, children, single_child_is_sequence, sequence_type
)
else:
# N.B. The parent node (e.g. atom) is responsible for computing and attaching
# whitespace information on any parenthesis, square brackets, or curly braces
elt, for_in = children
return WithLeadingWhitespace(
comprehension_type(elt=elt.value, for_in=for_in, lpar=(), rpar=()),
elt.whitespace_before,
)
@with_production("testlist_star_expr", "(test|star_expr) (',' (test|star_expr))* [',']")
@with_production("testlist", "test (',' test)* [',']")
@with_production("exprlist", "(expr|star_expr) (',' (expr|star_expr))* [',']")
def convert_test_or_expr_list(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
# Used by expression statements and assignments. Neither of these cases want to
# treat a single child as a sequence.
return _convert_sequencelike(
config, children, single_child_is_sequence=False, sequence_type=Tuple
)
def _convert_sequencelike(
config: ParserConfig,
children: typing.Sequence[typing.Any],
single_child_is_sequence: bool,
sequence_type: typing.Union[
typing.Type[Tuple], typing.Type[List], typing.Type[Set]
],
) -> typing.Any:
if not single_child_is_sequence and len(children) == 1:
return children[0]
# N.B. The parent node (e.g. atom) is responsible for computing and attaching
# whitespace information on any parenthesis, square brackets, or curly braces
elements = []
for wrapped_expr_or_starred_element, comma_token in grouper(children, 2):
expr_or_starred_element = wrapped_expr_or_starred_element.value
if comma_token is None:
comma = MaybeSentinel.DEFAULT
else:
comma = Comma(
whitespace_before=parse_parenthesizable_whitespace(
config, comma_token.whitespace_before
),
# Only compute whitespace_after if we're not a trailing comma.
# If we're a trailing comma, that whitespace should be consumed by the
# TrailingWhitespace, parenthesis, etc.
whitespace_after=(
parse_parenthesizable_whitespace(
config, comma_token.whitespace_after
)
if comma_token is not children[-1]
else SimpleWhitespace("")
),
)
if isinstance(expr_or_starred_element, StarredElement):
starred_element = expr_or_starred_element
elements.append(starred_element.with_changes(comma=comma))
else:
expr = expr_or_starred_element
elements.append(Element(value=expr, comma=comma))
# lpar/rpar are the responsibility of our parent
return WithLeadingWhitespace(
sequence_type(elements, lpar=(), rpar=()), children[0].whitespace_before
)
@with_production(
"dictorsetmaker",
(
"( ((test ':' test | '**' expr)"
+ " (comp_for | (',' (test ':' test | '**' expr))* [','])) |"
+ "((test | star_expr) "
+ " (comp_for | (',' (test | star_expr))* [','])) )"
),
)
def convert_dictorsetmaker(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
# We'll always have at least one child. `atom_curlybraces` handles empty
# dicts.
if len(children) > 1 and (
(isinstance(children[1], Token) and children[1].string == ":")
or (isinstance(children[0], Token) and children[0].string == "**")
):
return _convert_dict(config, children)
else:
return _convert_set(config, children)
def _convert_dict_element(
config: ParserConfig,
children_iter: typing.Iterator[typing.Any],
last_child: typing.Any,
) -> typing.Union[DictElement, StarredDictElement]:
first = next(children_iter)
if isinstance(first, Token) and first.string == "**":
expr = next(children_iter)
element = StarredDictElement(
expr.value,
whitespace_before_value=parse_parenthesizable_whitespace(
config, expr.whitespace_before
),
)
else:
key = first
colon_tok = next(children_iter)
value = next(children_iter)
element = DictElement(
key.value,
value.value,
whitespace_before_colon=parse_parenthesizable_whitespace(
config, colon_tok.whitespace_before
),
whitespace_after_colon=parse_parenthesizable_whitespace(
config, colon_tok.whitespace_after
),
)
# Handle the trailing comma (if there is one)
try:
comma_token = next(children_iter)
element = element.with_changes(
comma=Comma(
whitespace_before=parse_parenthesizable_whitespace(
config, comma_token.whitespace_before
),
# Only compute whitespace_after if we're not a trailing comma.
# If we're a trailing comma, that whitespace should be consumed by the
# RightBracket.
whitespace_after=(
parse_parenthesizable_whitespace(
config, comma_token.whitespace_after
)
if comma_token is not last_child
else SimpleWhitespace("")
),
)
)
except StopIteration:
pass
return element
def _convert_dict(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
is_first_starred = isinstance(children[0], Token) and children[0].string == "**"
if is_first_starred:
possible_comp_for = None if len(children) < 3 else children[2]
else:
possible_comp_for = None if len(children) < 4 else children[3]
if isinstance(possible_comp_for, CompFor):
if is_first_starred:
# TODO: Make this a ParserSyntaxError
raise Exception("dict unpacking cannot be used in dict comprehension")
return _convert_dict_comp(config, children)
children_iter = iter(children)
last_child = children[-1]
elements = []
while True:
try:
elements.append(_convert_dict_element(config, children_iter, last_child))
except StopIteration:
break
# lbrace, rbrace, lpar, and rpar will be attached as-needed by the atom grammar
return WithLeadingWhitespace(Dict(tuple(elements)), children[0].whitespace_before)
def _convert_dict_comp(config, children: typing.Sequence[typing.Any]) -> typing.Any:
key, colon_token, value, comp_for = children
return WithLeadingWhitespace(
DictComp(
key.value,
value.value,
comp_for,
# lbrace, rbrace, lpar, and rpar will be attached as-needed by the atom grammar
whitespace_before_colon=parse_parenthesizable_whitespace(
config, colon_token.whitespace_before
),
whitespace_after_colon=parse_parenthesizable_whitespace(
config, colon_token.whitespace_after
),
),
key.whitespace_before,
)
def _convert_set(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
return _convert_testlist_comp(
config,
children,
single_child_is_sequence=True,
sequence_type=Set,
comprehension_type=SetComp,
)
@with_production("arglist", "argument (',' argument)* [',']")
def convert_arglist(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
args = []
for argument, comma in grouper(children, 2):
if comma is None:
args.append(argument)
else:
args.append(
argument.with_changes(
comma=Comma(
whitespace_before=parse_parenthesizable_whitespace(
config, comma.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, comma.whitespace_after
),
)
)
)
return ArglistPartial(args)
@with_production("argument", "arg_assign_comp_for | star_arg")
def convert_argument(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
(child,) = children
return child
@with_production("arg_assign_comp_for", "test [comp_for] | test '=' test")
def convert_arg_assign_comp_for(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 1:
# Simple test
(child,) = children
return Arg(value=child.value)
elif len(children) == 2:
elt, for_in = children
return Arg(value=GeneratorExp(elt.value, for_in, lpar=(), rpar=()))
else:
# "key = value" assignment argument
lhs, equal, rhs = children
return Arg(
keyword=lhs.value,
equal=AssignEqual(
whitespace_before=parse_parenthesizable_whitespace(
config, equal.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, equal.whitespace_after
),
),
value=rhs.value,
)
@with_production("star_arg", "'**' test | '*' test")
def convert_star_arg(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
star, test = children
return Arg(
star=star.string,
whitespace_after_star=parse_parenthesizable_whitespace(
config, star.whitespace_after
),
value=test.value,
)
@with_production("sync_comp_for", "'for' exprlist 'in' or_test comp_if* [comp_for]")
def convert_sync_comp_for(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
# unpack
for_tok, target, in_tok, iter, *trailing = children
if len(trailing) and isinstance(trailing[-1], CompFor):
*ifs, inner_for_in = trailing
else:
ifs, inner_for_in = trailing, None
return CompFor(
target=target.value,
iter=iter.value,
ifs=ifs,
inner_for_in=inner_for_in,
whitespace_before=parse_parenthesizable_whitespace(
config, for_tok.whitespace_before
),
whitespace_after_for=parse_parenthesizable_whitespace(
config, for_tok.whitespace_after
),
whitespace_before_in=parse_parenthesizable_whitespace(
config, in_tok.whitespace_before
),
whitespace_after_in=parse_parenthesizable_whitespace(
config, in_tok.whitespace_after
),
)
@with_production("comp_for", "['async'] sync_comp_for")
def convert_comp_for(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 1:
(sync_comp_for,) = children
return sync_comp_for
else:
(async_tok, sync_comp_for) = children
return sync_comp_for.with_changes(
# asynchronous steals the `CompFor`'s `whitespace_before`.
asynchronous=Asynchronous(whitespace_after=sync_comp_for.whitespace_before),
# But, in exchange, `CompFor` gets to keep `async_tok`'s leading
# whitespace, because that's now the beginning of the `CompFor`.
whitespace_before=parse_parenthesizable_whitespace(
config, async_tok.whitespace_before
),
)
@with_production("comp_if", "'if' test_nocond")
def convert_comp_if(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if_tok, test = children
return CompIf(
test.value,
whitespace_before=parse_parenthesizable_whitespace(
config, if_tok.whitespace_before
),
whitespace_before_test=parse_parenthesizable_whitespace(
config, test.whitespace_before
),
)
@with_production("yield_expr", "'yield' [yield_arg]")
def convert_yield_expr(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 1:
# Yielding implicit none
(yield_token,) = children
yield_node = Yield(value=None)
else:
# Yielding explicit value
(yield_token, yield_arg) = children
yield_node = Yield(
value=yield_arg.value,
whitespace_after_yield=parse_parenthesizable_whitespace(
config, yield_arg.whitespace_before
),
)
return WithLeadingWhitespace(yield_node, yield_token.whitespace_before)
@with_production("yield_arg", "'from' test | testlist")
def convert_yield_arg(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
if len(children) == 1:
# Just a regular testlist, pass it up
(child,) = children
return child
else:
# Its a yield from
(from_token, test) = children
return WithLeadingWhitespace(
From(
item=test.value,
whitespace_after_from=parse_parenthesizable_whitespace(
config, test.whitespace_before
),
),
from_token.whitespace_before,
)
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017 Moritz Lode
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from sqlalchemy import Column, ForeignKey, Integer, BigInteger, String, Table, UnicodeText, Unicode, Boolean, LargeBinary, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import sessionmaker
from contextlib import contextmanager
import json
from os import path #posixpath
Base = declarative_base()
'''
''
' Table Definitions
''
'''
class Channel(Base):
__tablename__ = 'channel'
id = Column(String(24), primary_key=True) # string may not work, use other then
title = Column(Unicode(100), nullable=False)
keywords = Column(UnicodeText, nullable=False) #a space-separated list of strings.
description = Column(UnicodeText) # max 1000chars
dateAdded = Column(String(24), nullable=False)
uploadsPlaylistID = Column(String(24), nullable=False)
latestUploadsIDs = Column(String(750), nullable=False) # json list string
unsubscribedTrailer = Column(String(11), nullable=False)
topicIds = Column(String(131))
network = Column(String(42)) # for later addition
crawlTimestamp = Column(String(20), nullable=False)
thumbnailUrl = Column(String(300))
history = relationship('ChannelHistory', cascade='delete')
videos = relationship('Video', cascade='delete')
featured = relationship('FeaturedChannel', cascade='delete')
def __repr__(self):
return u'<Channel (name=|%s|)>' % self.title
class FeaturedChannel(Base):
__tablename__ = 'channel_featured'
id = Column(Integer, primary_key=True)
channelID = Column(String(24), ForeignKey('channel.id'))
featuredChannelID = Column(String(24))
class ChannelHistory(Base):
__tablename__ = 'channel_history'
id = Column(Integer, primary_key=True)
channelID = Column(String(24), ForeignKey('channel.id'))
viewCount = Column(BigInteger, nullable=False)
subscriberCount = Column(Integer, nullable=False)
commentCount = Column(Integer, nullable=False)
videoCount = Column(Integer, nullable=False)
crawlTimestamp = Column(String(20), nullable=False)
class Video(Base):
__tablename__ = 'video'
id = Column(String(11), primary_key=True) # string may not work, use other then
channelID = Column(String(24), ForeignKey('channel.id'))
title = Column(Unicode(300), nullable=False)
description = Column(UnicodeText, nullable=False) # max ~5000 characters actually
category = Column(Integer, nullable=False)
dateAdded = Column(String(24), nullable=False)
tags = Column(Unicode(750), nullable=False) # max 500 characters
topicIds = Column(String(131))
attribution = Column(String(42)) # for later network attribution
duration = Column(String(20), nullable=False)
crawlTimestamp = Column(String(20), nullable=False) # datetime type
deleted = Column(Boolean)
history = relationship('VideoHistory')
feature = relationship('VideoFeatures', cascade='delete')
class VideoHistory(Base):
__tablename__ = 'video_history'
id = Column(Integer, primary_key=True)
videoID = Column(String(11), ForeignKey('video.id'))
viewCount = Column(Integer, nullable=False)
commentCount = Column(Integer, nullable=False)
likeCount = Column(Integer, nullable=False)
dislikeCount = Column(Integer, nullable=False)
crawlTimestamp = Column(String(20), nullable=False)
class VideoFeatures(Base):
__tablename__ = 'video_features'
id = Column(Integer, primary_key=True)
videoID = Column(String(11), ForeignKey('video.id'))
feature = Column(LargeBinary) # correct datatype for numpy/pandas array? test
duration = Column(Float) # correct datatype for numpy/pandas array? test
cluster = relationship('VideoFaceCluster', cascade='delete')
class VideoFaceCluster(Base):
__tablename__ = 'video_face_cluster'
id = Column(Integer, primary_key=True)
featureID = Column(Integer, ForeignKey('video_features.id'))
cluster = Column(Integer)
class VideoFeatureQueue(Base):
__tablename__ = 'video_feature_queue'
id = Column(String(11), primary_key=True)
state = Column(String(9))
'''
''
' Database API class
''
'''
class YTDatabase(object):
#DATA_DIR = '/../../../data/'
#DB_FILE = 'ytDatabase.db'
DB_NAME = 'X'
DB_USER = 'X'
DB_PW = 'X'
DB_HOST = '127.0.0.1'
DB_PORT = '3306'
def __init__(self):
#DB_PATH = path.join(self.DATA_DIR, self.DB_FILE)
#self.engine = create_engine('sqlite://'+DB_PATH, encoding='utf-8', convert_unicode=True)
# This engine just used to query for list of databases
mysql_engine = create_engine('mysql+mysqldb://{0}:{1}@{2}:{3}'.format(self.DB_USER, self.DB_PW, self.DB_HOST, self.DB_PORT), encoding='utf-8', convert_unicode=True)
# Query for existing databases
mysql_engine.execute("CREATE DATABASE IF NOT EXISTS {0} ".format(self.DB_NAME))
# Go ahead and use this engine
self.engine = create_engine('mysql+mysqldb://{0}:{1}@{2}:{3}/{4}?charset=utf8mb4'.format(self.DB_USER, self.DB_PW, self.DB_HOST, self.DB_PORT, self.DB_NAME), encoding='utf-8', convert_unicode=True)
Base.metadata.bind = self.engine
self.DBSession = sessionmaker(bind = self.engine)
self.createDatabase()
self.DBSession().execute("SET NAMES utf8mb4 COLLATE 'utf8mb4_unicode_ci'")
self.DBSession().execute("SET CHARACTER SET utf8mb4")
def createDatabase(self, drop=False, update=False):
if drop:
Base.metadata.drop_all()
if not self.engine.table_names(): # checks if no tables exists
Base.metadata.create_all()
elif update:
Base.metadata.create_all()
@contextmanager
def _session_scope(self, commit=False):
"""Provide a transactional scope around a series of operations."""
session = self.DBSession()
try:
yield session
if commit:
session.commit()
except:
session.rollback()
raise
finally:
session.close()
# TODO handle if already entry with id, sqlalchemy raises exception, integrityerror
def addChannelEntry(self, channelItem):
with self._session_scope(True) as session:
channel = Channel(
id=channelItem['id'],
title=channelItem['title'],
keywords=u'{}'.format(json.dumps(channelItem['keywords'])),
description=channelItem['description'], # use zlib to compress string?
dateAdded=channelItem['dateAdded'],
uploadsPlaylistID=channelItem['uploadsPlaylistID'],
latestUploadsIDs=json.dumps([]), # will be updated later
topicIds=json.dumps(channelItem['topicIds'][:10], separators=(',',':')),
unsubscribedTrailer = channelItem['unsubscribedTrailer'],
crawlTimestamp=channelItem['crawlTimestamp'])
for id in channelItem['featuredChannelsIDs']:
fc = FeaturedChannel(channelID=channelItem['id'], featuredChannelID=id)
session.add(fc)
channel.featured.append(fc)
session.add(channel)
def updateChannelEntry(self, channelItem):
with self._session_scope(True) as session:
session.query(Channel).filter(Channel.id == channelItem['id'])\
.update({
"title": channelItem['title'],
"keywords": u'{}'.format(json.dumps(channelItem['keywords'])),
"description": channelItem['description'],
"topicIds": json.dumps(channelItem['topicIds'][:10], separators=(',',':')),
"unsubscribedTrailer": channelItem['unsubscribedTrailer']})
def updateChannelThumbnail(self, id, thumbnailUrl):
with self._session_scope(True) as session:
session.query(Channel).filter(Channel.id == id)\
.update({
"thumbnailUrl": thumbnailUrl
})
def addChannelHistoryEntry(self, channelStatisticsItem):
with self._session_scope(True) as session:
channelStats = ChannelHistory(
channelID=channelStatisticsItem['id'],
viewCount=channelStatisticsItem['viewCount'],
subscriberCount=channelStatisticsItem['subscriberCount'],
commentCount=channelStatisticsItem['commentCount'],
videoCount=channelStatisticsItem['videoCount'],
crawlTimestamp=channelStatisticsItem['crawlTimestamp'])
session.add(channelStats)
def updateLatestUploads(self, videoListItem):
with self._session_scope(True) as session:
session.query(Channel).filter(Channel.id == videoListItem['channelID'])\
.update({'latestUploadsIDs': json.dumps(videoListItem['videoIDs'], separators=(',',':'))})
def updateChannelNetwork(self, channelID, networkName):
with self._session_scope(True) as session:
session.query(Channel).filter(Channel.id == channelID)\
.update({'network': networkName})
def addVideoEntry(self, videoItem):
with self._session_scope(True) as session:
video = Video(
id=videoItem['id'],
channelID=videoItem['channelID'],
title=videoItem['title'],
description=videoItem['description'], # use zlib to compress string?
category=videoItem['category'],
duration=videoItem['duration'],
dateAdded=videoItem['dateAdded'],
tags=u'{:.750}'.format(json.dumps(videoItem['tags'], separators=(',',':'))),
topicIds=json.dumps(videoItem['topicIds'][:10], separators=(',',':')),
crawlTimestamp=videoItem['crawlTimestamp'])
if 'attribution' in videoItem:
video.attribution = videoItem['attribution']
session.add(video)
def updateVideoEntry(self, videoItem):
with self._session_scope(True) as session:
session.query(Video).filter(Video.id == videoItem['id'])\
.update({
"title": videoItem['title'],
"description": videoItem['description'], # use zlib to compress string?,
"tags": u'{:.750}'.format(json.dumps(videoItem['tags'], separators=(',',':'))),
"topicIds": json.dumps(videoItem['topicIds'][:10], separators=(',',':'))})
def setVideoDeleted(self, videoID):
with self._session_scope(True) as session:
session.query(Video).filter(Video.id == videoID)\
.update({
"deleted": True})
def addVideoHistoryEntry(self, videoStatisticsItem):
with self._session_scope(True) as session:
videoStats = VideoHistory(
videoID=videoStatisticsItem['id'],
viewCount=videoStatisticsItem['viewCount'],
commentCount=videoStatisticsItem['commentCount'],
likeCount=videoStatisticsItem['likeCount'],
dislikeCount=videoStatisticsItem['dislikeCount'],
crawlTimestamp=videoStatisticsItem['crawlTimestamp'])
session.add(videoStats)
def enqueueVideo(self, videoItem):
with self._session_scope(True) as session:
videoQ = VideoFeatureQueue(
videoID=videoItem['id'],
state='new')
session.add(videoQ)
def updateQueue(self, videoID, state):
with self._session_scope(True) as session:
session.query(VideoFeatureQueue).filter(VideoFeatureQueue.id == videoID)\
.update({
"state": state})
|
from auxiliary_functions import *
def create_degrees_list(degrees, low, high):
degrees_low_high = []
is_cycle = False
if high < low:
is_cycle = True
for degree in degrees:
if not is_cycle:
if not (degree in degrees_low_high) and low < degree < high:
degrees_low_high.append(degree)
else:
if not (degree in degrees_low_high) and (degree > low or degree < high):
degrees_low_high.append(degree)
return degrees_low_high
def create_array_of_points_by_degrees(degrees, frames):
points = []
for key in degrees:
for point in list(frames[key]):
if point not in points:
points.append(point)
return points
def find_rect_center(rect):
x_center = 0
y_center = 0
for point in rect:
x_center += point.x
y_center += point.y
return x_center / 4, y_center / 4
def nlogn_median(points_with_distance): # l already sorted by second index
return points_with_distance[int(0.5 * len(points_with_distance))]
def closest_corner_index(point, corner_points):
min_distance = maxsize
closest_index = 0
for i in range(4):
d = calculate_distance(point, corner_points[i])
if min_distance > d:
min_distance = d
closest_index = i
return closest_index
def get_slope(point1, point2):
return (point1.y - point2.y) / (point1.x - point2.x)
def find_intersection(m, b, n, c): # intersection of mx+b, nx+c
x = (c - b) / (m - n)
y = m * x + b
return Point(x, y, 0)
def find_rect_point(closest_line, rect):
index_of_rect = 0
for point in rect:
if point.x == closest_line[0] and point.y == closest_line[1]:
break
index_of_rect += 1
return index_of_rect
def find_best_segment_without_fix(point, lines):
min_distance = maxsize
min_index = -1
counter = 0
for line in lines:
current_distance = distance_between_point_and_segment(point.x, point.y, line[0][0], line[1][0], line[0][1],
line[1][1])
if current_distance < min_distance:
min_distance = current_distance
min_index = counter
counter = counter + 1
return lines[min_index], min_distance
"""def check_if_point_is_between_two_parallel_lines(slope, y_intercept1, y_intercept2, point):
min = y_intercept1 if y_intercept1<y_intercept2 else y_intercept2
max = y_intercept1 + y_intercept2 - min
value = point.y - slope * point.x
if value>=min and value<=max:
return True
return False"""
def check_if_point_in_corner(point, lines):
rect_lines = turn_lines_to_rect_lines(lines)
parallel_lines1, parallel_lines2 = find_parallel_lines(rect_lines)
return (not check_point_between_two_lines(point, parallel_lines1[0], parallel_lines1[1]) and
not check_point_between_two_lines(point, parallel_lines2[0], parallel_lines2[1]))
# ToDo: fix slope == 0 (in all of the code)
def update_rect_and_lines(rect, point_median, lines): # update by the point(expand the rect)
"""if check_if_point_in_rectangle(point_median, lines):
return rect, lines"""
if check_if_point_in_corner(point_median, lines):
closest_corner = closest_corner_index(point_median, rect)
# new sides
slope1 = get_slope(rect[(closest_corner - 1) % 4], rect[closest_corner])
slope2 = get_slope(rect[(closest_corner + 1) % 4], rect[closest_corner])
rect[closest_corner] = point_median
y_intercept1 = rect[closest_corner].y - slope1 * rect[closest_corner].x
y_intercept2 = rect[closest_corner].y - slope2 * rect[closest_corner].x
# old_sides
slope3 = slope1
slope4 = slope2
y_intercept3 = rect[(closest_corner + 1) % 4].y - slope1 * rect[(closest_corner + 1) % 4].x
y_intercept4 = rect[(closest_corner - 1) % 4].y - slope2 * rect[(closest_corner - 1) % 4].x
rect[(closest_corner + 1) % 4] = find_intersection(slope2, y_intercept2, slope3, y_intercept3)
rect[(closest_corner - 1) % 4] = find_intersection(slope1, y_intercept1, slope4, y_intercept4)
else:
closest_line = find_best_segment_without_fix(point_median, lines)[0]
slope_new = (closest_line[1][1] - closest_line[1][0]) / (closest_line[0][1] - closest_line[0][0])
new_y_intercept = point_median.y - slope_new * point_median.x
index_of_rect1, index_of_rect2 = find_rect_point((closest_line[0][0], closest_line[1][0]),
rect), find_rect_point(
(closest_line[0][1], closest_line[1][1]), rect)
if slope_new != 0:
slope_other_side = -1 / slope_new
index_of_rect1_intercept = rect[index_of_rect1].y - slope_other_side * rect[index_of_rect1].x
index_of_rect2_intercept = rect[index_of_rect2].y - slope_other_side * rect[index_of_rect2].x
rect[index_of_rect1] = find_intersection(slope_new, new_y_intercept, slope_other_side,
index_of_rect1_intercept)
rect[index_of_rect2] = find_intersection(slope_new, new_y_intercept, slope_other_side,
index_of_rect2_intercept)
else:
rect[index_of_rect1] = Point(rect[index_of_rect1].x, point_median.y, 0)
rect[index_of_rect2] = Point(rect[index_of_rect2].x, point_median.y, 0)
for i in range(4):
lines[i] = ((rect[i].x, rect[(i + 1) % 4].x), (rect[i].y, rect[(i + 1) % 4].y))
return rect, lines
def find_frame_id_of_array_points_from_rect_center(points, rect):
points_with_distance = []
rect_center_x, rect_center_y = find_rect_center(rect)
rect_center = Point(rect_center_x, rect_center_y, 0)
for point in points:
points_with_distance.append([point, calculate_distance(point, rect_center)])
points_with_distance.sort(key=lambda point_with_distance: point_with_distance[1])
point_with_distance_median = nlogn_median(points_with_distance)
return point_with_distance_median[0], point_with_distance_median[1]
def rect_area(rect):
return calculate_distance(rect[0], rect[1]) * calculate_distance(rect[1], rect[2])
def expend_rectangle(points, rect, lines):
frames = create_frames_by_degree(points)
degrees_350_10 = create_degrees_list(frames.keys(), 340, 20)
degrees_80_100 = create_degrees_list(frames.keys(), 70, 110)
degrees_170_190 = create_degrees_list(frames.keys(), 160, 200)
degrees_260_280 = create_degrees_list(frames.keys(), 250, 290)
points_350_10 = create_array_of_points_by_degrees(degrees_350_10, frames)
points_80_100 = create_array_of_points_by_degrees(degrees_80_100, frames)
points_170_190 = create_array_of_points_by_degrees(degrees_170_190, frames)
points_260_280 = create_array_of_points_by_degrees(degrees_260_280, frames)
for degree_change in range(0, 90, 1):
is_happened = False
if len(degrees_350_10) > 0:
point_350_10, frame_id_350_10 = find_frame_id_of_array_points_from_rect_center(points_350_10, rect)
if is_happened:
curr_rect, curr_lines = update_rect_and_lines(curr_rect, point_350_10, curr_lines)
else:
curr_rect, curr_lines = update_rect_and_lines(rect, point_350_10, lines)
is_happened = True
if len(degrees_80_100) > 0:
point_80_100, frame_id_80_100 = find_frame_id_of_array_points_from_rect_center(points_80_100, rect)
rect, lines = update_rect_and_lines(rect, point_80_100, lines)
if is_happened:
curr_rect, curr_lines = update_rect_and_lines(curr_rect, point_80_100, curr_lines)
else:
curr_rect, curr_lines = update_rect_and_lines(rect, point_80_100, lines)
is_happened = True
if len(degrees_170_190) > 0:
point_170_190, frame_id_170_190 = find_frame_id_of_array_points_from_rect_center(points_170_190, rect)
rect, lines = update_rect_and_lines(rect, point_170_190, lines)
if is_happened:
curr_rect, curr_lines = update_rect_and_lines(curr_rect, point_170_190, curr_lines)
else:
curr_rect, curr_lines = update_rect_and_lines(rect, point_170_190, lines)
is_happened = True
if len(degrees_260_280) > 0:
point_260_280, frame_id_260_280 = find_frame_id_of_array_points_from_rect_center(points_260_280, rect)
rect, lines = update_rect_and_lines(rect, point_260_280, lines)
if is_happened:
curr_rect, curr_lines = update_rect_and_lines(curr_rect, point_260_280, curr_lines)
else:
curr_rect, curr_lines = update_rect_and_lines(rect, point_260_280, lines)
if rect_area(curr_rect) > rect_area(rect):
rect = curr_rect
lines = curr_lines
return rect, lines
|
import ast
import os
import re
import sys
import json
import math
import collections
import nbconvert
import nbformat
PASS = "PASS"
TEXT_FORMAT = "text"
Question = collections.namedtuple("Question", ["number", "weight", "format"])
questions = [
Question(number=1, weight=1, format=TEXT_FORMAT),
Question(number=2, weight=1, format=TEXT_FORMAT),
Question(number=3, weight=1, format=TEXT_FORMAT),
Question(number=4, weight=1, format=TEXT_FORMAT),
Question(number=5, weight=1, format=TEXT_FORMAT),
Question(number=6, weight=1, format=TEXT_FORMAT),
Question(number=7, weight=1, format=TEXT_FORMAT),
Question(number=8, weight=1, format=TEXT_FORMAT),
Question(number=9, weight=1, format=TEXT_FORMAT),
Question(number=10, weight=1, format=TEXT_FORMAT),
Question(number=11, weight=1, format=TEXT_FORMAT),
Question(number=12, weight=1, format=TEXT_FORMAT),
Question(number=13, weight=1, format=TEXT_FORMAT),
Question(number=14, weight=1, format=TEXT_FORMAT),
Question(number=15, weight=1, format=TEXT_FORMAT),
Question(number=16, weight=1, format=TEXT_FORMAT),
Question(number=17, weight=1, format=TEXT_FORMAT),
Question(number=18, weight=1, format=TEXT_FORMAT),
Question(number=19, weight=1, format=TEXT_FORMAT),
Question(number=20, weight=1, format=TEXT_FORMAT),
]
question_nums = set([q.number for q in questions])
# JSON and plaintext values
expected_json = {
"1": 'C. Muñoz',
"2": 'L. Messi',
"3": 'Neymar Jr',
"4": 'Brazil',
"5": ['Argentina', 'Portugal', 'Brazil', 'Slovenia', 'Belgium'],
"6": ['A. Abdallah', 'A. Abdellaoui', 'A. Abdennour', 'A. Abdi', 'A. Abdu'],
"7": 2484037.64,
"8": 181.36,
"9": 1146,
"10": {'Argentina': 886,
'Portugal': 344,
'Brazil': 824,
'Slovenia': 61,
'Belgium': 268,
'Germany': 1216,
'Netherlands': 416,
'Croatia': 126,
'Egypt': 30,
'France': 984,
'Senegal': 127,
'England': 1667,
'Spain': 1035,
'Italy': 732,
'Uruguay': 164,
'Poland': 324,
'Denmark': 345,
'Gabon': 16,
'Korea Republic': 322,
'Costa Rica': 30,
'Slovakia': 54,
'Bosnia Herzegovina': 66,
'Serbia': 139,
'Scotland': 277,
'Hungary': 35,
'Switzerland': 229,
'Greece': 96,
'Austria': 319,
'Morocco': 94,
'Sweden': 358,
'Wales': 117,
'Colombia': 591,
'Czech Republic': 102,
'Chile': 370,
'Algeria': 50,
'Ivory Coast': 105,
'Togo': 13,
'Norway': 350,
'Mexico': 340,
'Iceland': 46,
'Finland': 72,
'Jamaica': 29,
'Albania': 43,
'Guinea': 35,
'Cameroon': 78,
'Ghana': 130,
'Montenegro': 33,
'Ukraine': 69,
'Russia': 81,
'DR Congo': 54,
'Central African Rep.': 4,
'Venezuela': 66,
'Nigeria': 126,
'Armenia': 8,
'Israel': 16,
'Ecuador': 53,
'Paraguay': 80,
'Australia': 196,
'Turkey': 294,
'Romania': 287,
'Japan': 453,
'Mali': 55,
'United States': 347,
'Kosovo': 40,
'Dominican Republic': 4,
'Tanzania': 4,
'China PR': 373,
'Northern Ireland': 81,
'Republic of Ireland': 348,
'Tunisia': 35,
'Cape Verde': 20,
'FYR Macedonia': 20,
'Burkina Faso': 16,
'Kenya': 7,
'Angola': 16,
'South Africa': 72,
'Peru': 35,
'Syria': 4,
'Gambia': 22,
'New Zealand': 35,
'Equatorial Guinea': 6,
'Zimbabwe': 12,
'Georgia': 25,
'Canada': 61,
'Estonia': 6,
'Benin': 15,
'Bulgaria': 41,
'Mozambique': 4,
'Honduras': 13,
'Guinea Bissau': 21,
'Iran': 15,
'Philippines': 2,
'Cyprus': 11,
'Madagascar': 8,
'Uzbekistan': 3,
'Moldova': 12,
'Cuba': 4,
'Sierra Leone': 10,
'Curacao': 16,
'Zambia': 10,
'Congo': 18,
'Bolivia': 23,
'Comoros': 9,
'Iraq': 5,
'Chad': 1,
'Lithuania': 10,
'Saudi Arabia': 310,
'Panama': 12,
'Libya': 4,
'Bahrain': 1,
'St Kitts Nevis': 4,
'New Caledonia': 2,
'Luxembourg': 9,
'Trinidad & Tobago': 6,
'Thailand': 4,
'United Arab Emirates': 22,
'Eritrea': 1,
'Korea DPR': 4,
'El Salvador': 4,
'Azerbaijan': 6,
'Latvia': 6,
'Montserrat': 3,
'Puerto Rico': 1,
'Bermuda': 3,
'São Tomé & Príncipe': 1,
'Antigua & Barbuda': 7,
'Burundi': 4,
'Kazakhstan': 2,
'Liberia': 1,
'Guyana': 4,
'Haiti': 7,
'Jordan': 1,
'Faroe Islands': 5,
'Mauritania': 5,
'Namibia': 2,
'Rwanda': 2,
'Uganda': 3,
'Hong Kong': 1,
'Chinese Taipei': 1,
'Belize': 1,
'Palestine': 4,
'Mauritius': 1,
'Guam': 1,
'Suriname': 2,
'Lebanon': 3,
'Guatemala': 2,
'Sudan': 3,
'Liechtenstein': 2,
'Grenada': 2,
'St Lucia': 1,
'Afghanistan': 2,
'Ethiopia': 1,
'Barbados': 1,
'India': 23,
'Malta': 2,
'Niger': 3,
'Vietnam': 1,
'Malawi': 1,
'Gibraltar': 1,
'Macau': 1,
'South Sudan': 1,
'Indonesia': 1},
"11": 'England',
"12": 886,
"13": {'ID': '242444',
'Name': 'João Félix',
'Age': 19,
'Height(cm)': 181,
'Weight(kg)': 70,
'Nationality': 'Portugal',
'Club': 'Atlético Madrid',
'Value': 28000000,
'Wage': 38000,
'Player_Position': 'CF',
'Preferred_Foot': 'Right',
'Body_Type': 'Lean',
'Stamina': 79},
"14": {'ID': '211300',
'Name': 'A. Martial',
'Age': 23,
'Height(cm)': 184,
'Weight(kg)': 76,
'Nationality': 'France',
'Club': 'Manchester United',
'Value': 34500000,
'Wage': 140000,
'Player_Position': 'LW',
'Preferred_Foot': 'Right',
'Body_Type': 'Normal',
'Stamina': 75},
"15": {'ID': '198717',
'Name': 'W. Zaha',
'Age': 26,
'Height(cm)': 180,
'Weight(kg)': 66,
'Nationality': 'Ivory Coast',
'Club': 'Crystal Palace',
'Value': 32000000,
'Wage': 89000,
'Player_Position': 'CF',
'Preferred_Foot': 'Right',
'Body_Type': 'Lean',
'Stamina': 76},
"16": {'ID': '200536',
'Name': 'N. Schulz',
'Age': 26,
'Height(cm)': 181,
'Weight(kg)': 83,
'Nationality': 'Germany',
'Club': 'Borussia Dortmund',
'Value': 22500000,
'Wage': 82000,
'Player_Position': 'LWB',
'Preferred_Foot': 'Left',
'Body_Type': 'Normal',
'Stamina': 79},
"17": {'Messi': 1,
'C. Ronaldo': 1,
'Neymar': 1,
'Normal': 10750,
'Lean': 6505,
'PLAYER_BODY_TYPE_25': 1,
'Stocky': 1016,
'Courtois': 1,
'Shaqiri': 1,
'Akinfenwa': 1},
"18": 4318,
"19": 70.61,
"20": {'FC Barcelona': 150000.0,
'Juventus': 113636.36,
'Paris Saint-Germain': 72606.06,
'Atlético Madrid': 44848.48,
'Real Madrid': 162242.42,
'Manchester City': 120727.27,
'Liverpool': 80818.18,
'Napoli': 59838.71,
'Tottenham Hotspur': 78878.79,
'Manchester United': 87090.91,
'Chelsea': 85030.3,
'FC Bayern München': 109391.3,
'Inter': 53733.33,
'Borussia Dortmund': 57806.45,
'Arsenal': 61393.94,
'Valencia CF': 29363.64,
'Lazio': 40454.55,
'Milan': 32482.76,
'Sporting CP': 11233.33,
'Olympique Lyonnais': 43655.17,
'RB Leipzig': 33636.36,
'Ajax': 15000.0,
'LA Galaxy': 4185.19,
'Atalanta': 38392.86,
'RC Celta': 14575.76,
'Bayer 04 Leverkusen': 50240.0,
'Real Betis': 18774.19,
'FC Porto': 11633.33,
'SV Werder Bremen': 18312.5,
'West Ham United': 52939.39,
'Wolverhampton Wanderers': 41969.7,
'AS Saint-Étienne': 17875.0,
'Torino': 28793.1,
'Dalian YiFang FC': 4928.57,
'Borussia Mönchengladbach': 20121.21,
'Roma': 31516.13,
'Guangzhou Evergrande Taobao FC': 10000.0,
'SL Benfica': 11833.33,
'Medipol Başakşehir FK': 27066.67,
'Everton': 56606.06,
'VfL Wolfsburg': 31424.24,
'Crystal Palace': 33939.39,
'Getafe CF': 18600.0,
'Shanghai SIPG FC': 6892.86,
'Eintracht Frankfurt': 24363.64,
'Olympique de Marseille': 23592.59,
'Hertha BSC': 17151.52,
'RSC Anderlecht': 12000.0,
'Villarreal CF': 21937.5,
'Sampdoria': 18781.25,
'Leicester City': 53000.0,
'AS Monaco': 24818.18,
'Jiangsu Suning FC': 7520.0,
'Los Angeles FC': 4217.39,
'Cagliari': 20322.58,
'Sevilla FC': 14939.39,
'Fenerbahçe SK': 38862.07,
'Real Sociedad': 18433.33,
'TSG 1899 Hoffenheim': 28266.67,
'Atlético Mineiro': 24750.0,
'Grêmio': 20800.0,
'PSV': 12233.33,
'Athletic Club de Bilbao': 17848.48,
'Deportivo Alavés': 15424.24,
'Boca Juniors': 16285.71,
'Lokomotiv Moscow': 1000.0,
'Al Nassr': 15133.33,
'Brescia': 7464.29,
'Shakhtar Donetsk': 1000.0,
'Shanghai Greenland Shenhua FC': 5571.43,
'PFC CSKA Moscow': 1000.0,
'Beijing Sinobo Guoan FC': 7538.46,
'Levante UD': 17531.25,
'Cruzeiro': 17050.0,
'Uruguay': 0.0,
'Montpellier HSC': 18222.22,
'Atlanta United': 3833.33,
'Watford': 47787.88,
'1. FC Köln': 17387.1,
'Bournemouth': 38060.61,
'Beşiktaş JK': 38433.33,
'Real Valladolid CF': 12151.52,
'Racing Club': 13928.57,
'Al Hilal': 20068.97,
'Guangzhou R&F FC': 5407.41,
'Sassuolo': 27392.86,
'FC Girondins de Bordeaux': 18781.25,
'LOSC Lille': 19483.87,
'Galatasaray SK': 41433.33,
'Chicago Fire': 3740.74,
'Fluminense': 16450.0,
'Ecuador': 0.0,
'RCD Espanyol': 18400.0,
'Dinamo Zagreb': 1000.0,
'FC Nantes': 12909.09,
'River Plate': 15785.71,
'OGC Nice': 16034.48,
'Newcastle United': 29393.94,
'Brighton & Hove Albion': 32060.61,
'Club Brugge KV': 14888.89,
'FC Schalke 04': 19600.0,
'SD Eibar': 18642.86,
'DC United': 3962.96,
'Orlando City SC': 3214.29,
'Hebei China Fortune FC': 4730.77,
'Tigres U.A.N.L.': 39769.23,
'Aston Villa': 28939.39,
'Montreal Impact': 3153.85,
'Olympiacos CFP': 1000.0,
'Norwich City': 23333.33,
'Feyenoord': 8100.0,
'Toronto FC': 3466.67,
'KRC Genk': 9321.43,
'Fiorentina': 26566.67,
'Spartak Moscow': 1000.0,
'Dynamo Kyiv': 1000.0,
'SK Slavia Praha': 1000.0,
'Southampton': 33272.73,
'Burnley': 23939.39,
'SC Braga': 11392.86,
'Russia': 0.0,
'RC Strasbourg Alsace': 15642.86,
'Wuhan Zall': 3250.0,
'Vissel Kobe': 3310.34,
'Portland Timbers': 3777.78,
'Genoa': 10656.25,
'Beijing Renhe FC': 3607.14,
'Toulouse Football Club': 13206.9,
'Girona FC': 6960.0,
'Real Zaragoza': 4241.38,
'CD Leganés': 16151.52,
'Shenzhen FC': 3851.85,
'Internacional': 18150.0,
'CSA - AL': 7900.0,
'Santos': 18550.0,
'1. FSV Mainz 05': 14515.15,
'Stoke City': 20533.33,
'Udinese': 11060.61,
'Colombia': 0.0,
'Angers SCO': 12290.32,
'FC Augsburg': 13515.15,
'Netherlands': 0.0,
'Fulham': 24266.67,
'FC København': 9888.89,
'KAA Gent': 12535.71,
'SC Freiburg': 14161.29,
'Stade Rennais FC': 24869.57,
'Club América': 29681.82,
'Trabzonspor': 12466.67,
'BSC Young Boys': 10037.04,
'Helsingborgs IF': 1259.26,
'Kaizer Chiefs': 1000.0,
'Parma': 22272.73,
'Mexico': 0.0,
'Royal Antwerp FC': 6535.71,
'Tianjin TEDA FC': 4678.57,
'Hannover 96': 15214.29,
'Tianjin Quanjian FC': 5962.96,
'Al Ahli': 14866.67,
'Bologna': 20387.1,
'VfB Stuttgart': 13200.0,
'Seattle Sounders FC': 4074.07,
'Godoy Cruz': 3035.71,
'Sparta Praha': 1000.0,
'Independiente Medellín': 1928.57,
'Sivasspor': 5923.08,
'Independiente': 13214.29,
'Nîmes Olympique': 9782.61,
'Club Tijuana': 9142.86,
'SPAL': 8740.74,
'Monterrey': 23344.83,
'CD Tondela': 2925.93,
'Fortuna Düsseldorf': 15212.12,
'Vitória Guimarães': 6433.33,
'AZ Alkmaar': 5481.48,
'Atlético de San Luis': 5111.11,
'PAOK': 1000.0,
'Nagoya Grampus': 2655.17,
'Club Atlético Banfield': 6678.57,
'Shandong Luneng TaiShan FC': 6000.0,
'New York Red Bulls': 3827.59,
'FC Red Bull Salzburg': 16000.0,
'Sweden': 0.0,
'Amiens SC': 9562.5,
'Hellas Verona': 9484.85,
'Cruz Azul': 13571.43,
'Gençlerbirliği SK': 7541.67,
'1. FC Union Berlin': 17606.06,
'Standard de Liège': 9785.71,
'Chongqing Dangdai Lifan FC SWM Team': 2720.0,
'New England Revolution': 3423.08,
'Club Atlético Colón': 7214.29,
'Celtic': 25678.57,
'Club Atlas': 7892.86,
'Botafogo': 12550.0,
'En Avant de Guingamp': 3966.67,
'West Bromwich Albion': 19366.67,
'Pachuca': 11560.0,
'AEK Athens': 1000.0,
'Portimonense SC': 3966.67,
'Real Salt Lake': 3034.48,
'FC Utrecht': 8703.7,
'Sheffield United': 17909.09,
"Newell's Old Boys": 7928.57,
'Club Atlético Talleres': 5392.86,
'Philadelphia Union': 3740.74,
'Rosenborg BK': 3111.11,
'FC Basel 1893': 11629.63,
'Brentford': 14866.67,
'Club León': 13964.29,
'Unión de Santa Fe': 6120.0,
'Deportivo de La Coruña': 5916.67,
'Rangers FC': 25892.86,
'Turkey': 0.0,
'Atiker Konyaspor': 8481.48,
'Granada CF': 11444.44,
'Perth Glory': 2478.26,
'Club Atlético Lanús': 9214.29,
'Hamburger SV': 7800.0,
'Al Ittihad': 13833.33,
'Santos Laguna': 10320.0,
'Western United FC': 1300.0,
'Columbus Crew SC': 3607.14,
'Deportivo Toluca': 11760.0,
'Cardiff City': 14700.0,
'CA Osasuna': 16166.67,
'Swansea City': 11833.33,
'Melbourne Victory': 3000.0,
'Leeds United': 25166.67,
'Göztepe SK': 10178.57,
'Hungary': 0.0,
'New York City FC': 4000.0,
'Bulgaria': 0.0,
'Kayserispor': 5758.62,
'Minnesota United FC': 3791.67,
'Guadalajara': 16148.15,
'FC Groningen': 3333.33,
'Paraguay': 0.0,
'Junior FC': 2392.86,
'Al Taawoun': 7653.85,
'Huddersfield Town': 14100.0,
'Ettifaq FC': 8037.04,
'Stade de Reims': 13153.85,
'Rayo Vallecano': 4333.33,
'San Lorenzo de Almagro': 11428.57,
'Bahia': 7400.0,
'Atlético Paranaense': 7200.0,
'Goiás': 7400.0,
'Avaí FC': 8050.0,
'Fortaleza': 5700.0,
'Kawasaki Frontale': 4172.41,
'Vélez Sarsfield': 7571.43,
'Hull City': 5300.0,
'Houston Dynamo': 2965.52,
'Birmingham City': 6300.0,
'Al Wehda': 7964.29,
'Nottingham Forest': 23500.0,
'CD Tenerife': 3900.0,
'Aalborg BK': 4041.67,
'Preston North End': 11000.0,
'Bristol City': 15866.67,
'Lecce': 7575.76,
'Gimnasia y Esgrima La Plata': 5392.86,
'CD Aves': 4440.0,
'Viktoria Plzeň': 1000.0,
'1. FC Nürnberg': 7200.0,
'Slovenia': 0.0,
'FC Metz': 8296.3,
'FC Midtjylland': 8296.3,
'Molde FK': 2814.81,
'Colo-Colo': 4160.0,
'RCD Mallorca': 12185.19,
'FC Sion': 9185.19,
'Wisła Kraków': 2000.0,
'Denizlispor': 9913.04,
'Middlesbrough': 11366.67,
'Universidad Católica': 6107.14,
'Alanyaspor': 7862.07,
'Moreirense FC': 6192.31,
"Côte d'Ivoire": 0.0,
'Stade Brestois 29': 8960.0,
'Chievo Verona': 2400.0,
'Boavista FC': 3482.76,
'MKE Ankaragücü': 8952.38,
'Sydney FC': 3666.67,
'Al Ain FC': 1000.0,
'Yeni Malatyaspor': 6321.43,
'Urawa Red Diamonds': 4733.33,
'Querétaro': 4142.86,
'Sporting Kansas City': 3964.29,
'Málaga CF': 5600.0,
'1. FC Heidenheim 1846': 5206.9,
'Atlético Tucumán': 5481.48,
'Gazişehir Gaziantep F.K.': 6533.33,
'Clube Sport Marítimo': 4655.17,
'Chapecoense': 8100.0,
'Atlético Nacional': 2571.43,
'Vitesse': 8153.85,
'Australia': 0.0,
'Henan Jianye FC': 3444.44,
'Panathinaikos FC': 1000.0,
'Blackburn Rovers': 7433.33,
'Santa Clara': 3148.15,
'Cameroon': 0.0,
'Puebla FC': 3851.85,
'U.N.A.M.': 9120.0,
'SD Huesca': 4038.46,
'Estudiantes de La Plata': 6307.69,
'Austria': 0.0,
'KAS Eupen': 4222.22,
'LASK Linz': 4000.0,
'Sporting de Charleroi': 6074.07,
'Daegu FC': 1750.0,
'Real Sporting de Gijón': 4000.0,
'Derby County': 11633.33,
'Rio Ave FC': 4592.59,
'South Africa': 0.0,
'Famalicão': 3916.67,
'Neuchâtel Xamax': 1615.38,
'Benevento': 2913.04,
'Cádiz CF': 5233.33,
'AIK': 4000.0,
'Sheffield Wednesday': 12166.67,
'Empoli': 2466.67,
'Colorado Rapids': 2518.52,
'Os Belenenses': 3233.33,
'Unión Magdalena': 1035.71,
'Real Oviedo': 5200.0,
'Peru': 0.0,
'Antalyaspor': 6785.71,
'Dijon FCO': 6166.67,
'SV Sandhausen': 2833.33,
'Rosario Central': 8923.08,
'Reading': 8666.67,
'DSC Arminia Bielefeld': 6666.67,
'Malmö FF': 4000.0,
'Jeonbuk Hyundai Motors': 4714.29,
'Frosinone': 2480.0,
'FC Tokyo': 3333.33,
'Canada': 0.0,
'Çaykur Rizespor': 6478.26,
'FCSB (Steaua)': 6333.33,
'Defensa y Justicia': 6142.86,
'Monarcas Morelia': 4392.86,
'Club Atlético Huracán': 7200.0,
'Ceará Sporting Club': 5400.0,
'Argentinos Juniors': 4428.57,
'Al Shabab': 10033.33,
'Legia Warszawa': 4555.56,
'Shimizu S-Pulse': 1533.33,
'Millonarios FC': 1714.29,
'Lechia Gdańsk': 3148.15,
'Brøndby IF': 6481.48,
'Albacete BP': 3964.29,
'FC Lorient': 3275.86,
'Universitatea Craiova': 3592.59,
'Deportivo Cali': 1392.86,
'SK Rapid Wien': 6640.0,
'Kashima Antlers': 3233.33,
'Poland': 0.0,
'Elche CF': 3238.1,
'Club Atlético Aldosivi': 4423.08,
'Deportes Tolima': 1821.43,
'Cúcuta Deportivo': 1035.71,
'Club Necaxa': 4357.14,
'Piast Gliwice': 2185.19,
'Pescara': 1833.33,
'Kasimpaşa SK': 6148.15,
'Egypt': 0.0,
'Holstein Kiel': 4714.29,
'Livorno': 1333.33,
'Coquimbo Unido': 1461.54,
'UD Las Palmas': 4433.33,
'Górnik Zabrze': 1444.44,
'FC Twente': 3692.31,
'Paris FC': 2615.38,
'Racing Club de Lens': 3633.33,
'VfL Bochum 1848': 6344.83,
'Sunderland': 4357.14,
'Aberdeen': 2807.69,
'Heart of Midlothian': 3107.14,
'Romania': 0.0,
'Crotone': 1653.85,
'Iceland': 0.0,
'CFR Cluj': 5703.7,
'FK Austria Wien': 5407.41,
'UD Almería': 2785.71,
'SK Brann': 2304.35,
'BK Häcken': 1592.59,
'Al Fateh': 6464.29,
'CD Everton de Viña del Mar': 1538.46,
'FC St. Pauli': 6666.67,
'Gamba Osaka': 4000.0,
'Yokohama F. Marinos': 2222.22,
'Djurgårdens IF': 2115.38,
'FC Dallas': 2785.71,
'Sagan Tosu': 1733.33,
'Al Fayha': 5384.62,
'Chile': 0.0,
'Once Caldas': 1285.71,
'Atlético Bucaramanga': 1071.43,
'FC Cincinnati': 2931.03,
'San Jose Earthquakes': 3103.45,
'América de Cali': 1214.29,
'Perugia': 1923.08,
'La Equidad': 1035.71,
'SV Darmstadt 98': 7366.67,
'FC Zürich': 3884.62,
'SC Paderborn 07': 8272.73,
'SV Zulte-Waregem': 5370.37,
'IFK Norrköping': 2807.69,
'FC Viitorul': 2703.7,
'Pordenone': 1240.0,
'AD Alcorcón': 4440.0,
'Sint-Truidense VV': 3760.0,
'KV Kortrijk': 5384.62,
'Royal Excel Mouscron': 3214.29,
'FC Lugano': 2444.44,
'Hokkaido Consadole Sapporo': 1958.33,
'Spezia': 1533.33,
'FC Paços de Ferreira': 3714.29,
'Servette FC': 3000.0,
'US Salernitana 1919': 1766.67,
'Lech Poznań': 3074.07,
'ESTAC Troyes': 2821.43,
'Fortuna Sittard': 2760.0,
'KV Oostende': 6307.69,
'VVV-Venlo': 2370.37,
'KSV Cercle Brugge': 4111.11,
'FC Seoul': 2821.43,
'Cerezo Osaka': 2866.67,
'Stade Malherbe Caen': 3520.0,
'Universidad de Chile': 5038.46,
'Kalmar FF': 1222.22,
'KV Mechelen': 4464.29,
'Deportes Iquique': 1269.23,
'US Cremonese': 1916.67,
'Bolivia': 0.0,
'Valenciennes FC': 1851.85,
'Ulsan Hyundai FC': 3535.71,
'Vegalta Sendai': 1413.79,
'ADO Den Haag': 2880.0,
'CD Palestino': 1520.0,
'Cittadella': 1346.15,
'Vitória de Setúbal': 2833.33,
'FC Nordsjælland': 2653.85,
'Charlton Athletic': 5034.48,
'Al Raed': 5318.18,
'Jagiellonia Białystok': 3259.26,
'Odense Boldklub': 4181.82,
'FC Thun': 3037.04,
'SG Dynamo Dresden': 6333.33,
'Hammarby IF': 2115.38,
'Central Córdoba': 3142.86,
'Queens Park Rangers': 3500.0,
' SSV Jahn Regensburg': 4120.0,
'Tiburones Rojos de Veracruz': 2888.89,
'Patronato': 4821.43,
'Virtus Entella': 1100.0,
'SK Sturm Graz': 7840.0,
'Kilmarnock': 2363.64,
'Extremadura UD': 2576.92,
'Willem II': 3000.0,
'Gil Vicente FC': 3038.46,
'Randers FC': 2640.0,
'Ascoli': 1620.69,
'Vancouver Whitecaps FC': 3571.43,
'Millwall': 5100.0,
'Pohang Steelers': 2178.57,
'Suwon Samsung Bluewings': 2642.86,
'Heracles Almelo': 2259.26,
'Al Hazem': 3222.22,
'FC Juárez': 5461.54,
'SC Heerenveen': 2521.74,
'Dinamo Bucureşti': 2814.81,
'Gyeongnam FC': 1714.29,
'KFC Uerdingen 05': 2785.71,
'PEC Zwolle': 3370.37,
'Al Faisaly': 7050.0,
'Arsenal de Sarandí': 3750.0,
'FC Ingolstadt 04': 2423.08,
'Wolfsberger AC': 3423.08,
'Wigan Athletic': 6033.33,
'Júbilo Iwata': 2133.33,
'CD Lugo': 2862.07,
'SpVgg Greuther Fürth': 3344.83,
'FC Emmen': 1857.14,
'Cosenza': 1454.55,
'AJ Auxerre': 2466.67,
'Cracovia': 2074.07,
'FC Erzgebirge Aue': 2689.66,
'Grenoble Foot 38': 2000.0,
'Korona Kielce': 1518.52,
'Alianza Petrolera': 1142.86,
'CD Universidad de Concepción': 1461.54,
'Melbourne City FC': 3380.95,
'CD Antofagasta': 1500.0,
'Independiente Santa Fe': 1071.43,
'Sanfrecce Hiroshima': 4041.67,
'Damac FC': 4875.0,
'Chamois Niortais Football Club': 1777.78,
'Atlético Huila': 1000.0,
'CD Numancia': 3518.52,
'Luton Town': 5333.33,
'Unión La Calera': 1375.0,
'Le Havre AC': 2384.62,
'AS Nancy Lorraine': 1966.67,
'Audax Italiano': 1833.33,
'Hibernian': 2875.0,
'Barnsley': 3133.33,
'Pogoń Szczecin': 2000.0,
'Sarpsborg 08 FF': 1592.59,
'Kristiansund BK': 1238.1,
'FC Luzern': 3240.0,
'Unión Española': 1814.81,
'IFK Göteborg': 1608.7,
'Envigado FC': 1000.0,
'Jeju United FC': 1928.57,
'Patriotas Boyacá FC': 1000.0,
'Peterborough United': 3071.43,
'Deportivo Pasto': 1035.71,
'Wales': 0.0,
'Clermont Foot 63': 1851.85,
'Northern Ireland': 0.0,
'Astra Giurgiu': 2703.7,
'Racing Santander': 2692.31,
'Pisa': 1133.33,
'Newcastle Jets': 1181.82,
'Castellammare di Stabia': 1250.0,
'Rionegro Águilas': 1000.0,
'Vålerenga Fotball': 1666.67,
"CD O'Higgins": 1925.93,
'AC Ajaccio': 1583.33,
'CF Fuenlabrada': 1862.07,
'VfL Osnabrück': 4500.0,
'Arka Gdynia': 1555.56,
'FC Sochaux-Montbéliard': 1533.33,
'SpVgg Unterhaching': 1607.14,
'Central Coast Mariners': 1100.0,
'SV Wehen Wiesbaden': 4266.67,
'Fleetwood Town': 2392.86,
'Venezia FC': 1344.83,
'Karlsruher SC': 4321.43,
'Wisła Płock': 1592.59,
'Gangwon FC': 1500.0,
'IF Elfsborg': 1521.74,
'Sangju Sangmu FC': 1285.71,
'Zagłębie Lubin': 1560.0,
'Abha Club': 4285.71,
'Jaguares de Córdoba': 1000.0,
'FC Botoşani': 2444.44,
'Motherwell': 1833.33,
'FC St. Gallen': 3037.04,
'Bayern München II': 1250.0,
'CD Mirandés': 2476.19,
'St. Johnstone FC': 1833.33,
'Lillestrøm SK': 1347.83,
'Odds BK': 1545.45,
'SD Ponferradina': 2615.38,
'Doncaster Rovers': 2250.0,
'Örebro SK': 1407.41,
'Shamrock Rovers': 1045.45,
'FSV Zwickau': 1000.0,
'1. FC Kaiserslautern': 1607.14,
'Portsmouth': 3321.43,
'Waasland-Beveren': 2846.15,
'Ipswich Town': 4000.0,
'Dundalk': 1086.96,
'CD Huachipato': 2071.43,
'Le Mans FC': 1416.67,
'Strømsgodset IF': 1259.26,
'Sepsi OSK': 2888.89,
'Sparta Rotterdam': 2809.52,
'FC Admira Wacker Mödling': 2769.23,
'SønderjyskE': 3333.33,
'Hallescher FC': 1500.0,
'SKN St. Pölten': 3625.0,
'MSV Duisburg': 1125.0,
'La Berrichonne de Châteauroux': 1615.38,
'FC Hansa Rostock': 1400.0,
'Incheon United FC': 1214.29,
'Shonan Bellmare': 2000.0,
'Orlando Pirates': 1000.0,
'FK Bodø/Glimt': 1148.15,
'Lincoln City': 3000.0,
'Brisbane Roar': 1291.67,
'Eintracht Braunschweig': 1615.38,
'Curicó Unido': 1259.26,
'Falkenbergs FF': 1041.67,
'Venezuela': 0.0,
'1. FC Magdeburg': 1916.67,
'CD Cobresal': 1107.14,
'Aarhus GF': 3160.0,
'Salford City': 3285.71,
'HJK Helsinki': 1000.0,
'Oxford United': 2428.57,
'Hobro IK': 2375.0,
'Esbjerg fB': 2791.67,
'Bolton Wanderers': 1217.39,
'SV Mattersburg': 3920.0,
'Rotherham United': 3090.91,
'SCR Altach': 2629.63,
'Gaz Metan Mediaş': 2259.26,
'Tromsø IL': 1074.07,
'WSG Tirol': 2360.0,
'IK Sirius': 1185.19,
'Shrewsbury': 2142.86,
'Oldham Athletic': 2285.71,
'Southend United': 2321.43,
'Blackpool': 2285.71,
'Coventry City': 2214.29,
'Adelaide United': 1190.48,
'Ranheim Fotball': 1043.48,
'SG Sonnenhof Großaspach': 1280.0,
'Matsumoto Yamaga': 1233.33,
'Burton Albion': 2115.38,
'FC Hermannstadt': 2037.04,
'TSV 1860 München': 1000.0,
'LKS Lodz': 1111.11,
'FC Würzburger Kickers': 1571.43,
'TSV Hartberg': 1769.23,
'Politehnica Iaşi': 1740.74,
'Wellington Phoenix': 1000.0,
'Milton Keynes Dons': 2384.62,
'Stabæk Fotball': 1074.07,
'Wycombe Wanderers': 1875.0,
'SV Waldhof Mannheim': 1000.0,
'AC Horsens': 2238.1,
'Scunthorpe United': 3357.14,
'RKC Waalwijk': 1680.0,
'Oita Trinita': 1300.0,
'St. Mirren': 1565.22,
'Bristol Rovers': 2071.43,
'Rodez Aveyron Football': 1269.23,
'SV Meppen': 1000.0,
'Viking FK': 1160.0,
'Östersunds FK': 1259.26,
'FK Haugesund': 1037.04,
'Rochdale': 1240.0,
'Colchester United': 2892.86,
'Trapani': 1000.0,
'Stevenage': 2035.71,
'Bradford City': 3480.0,
'Livingston FC': 1607.14,
'FC Chambly Oise': 1366.67,
'Mansfield Town': 3678.57,
'SC Preußen Münster': 1250.0,
'FC Voluntari': 1666.67,
'Accrington Stanley': 1214.29,
'Al Adalah': 3640.0,
'Lyngby BK': 1958.33,
'FC Carl Zeiss Jena': 1000.0,
'Viktoria Köln': 1107.14,
'Tranmere Rovers': 1714.29,
'Silkeborg IF': 1814.81,
'Gillingham': 2000.0,
'Plymouth Argyle': 3440.0,
'Chemnitzer FC': 1000.0,
'Mjøndalen IF': 1090.91,
'Walsall': 3115.38,
'Northampton Town': 3000.0,
'Hamilton Academical FC': 1285.71,
'Grimsby Town': 1642.86,
'Exeter City': 2464.29,
'Swindon Town': 2535.71,
'Raków Częstochowa': 1041.67,
'Chindia Târgovişte': 1304.35,
'US Orléans Loiret Football': 1000.0,
'Forest Green Rovers': 3375.0,
'AFC Wimbledon': 1464.29,
'Carlisle United': 1920.0,
'Morecambe': 1629.63,
'Port Vale': 2360.0,
'Cheltenham Town': 1958.33,
'Academica Clinceni': 1076.92,
'Crawley Town': 1714.29,
'Ross County FC': 1520.0,
'AFC Eskilstuna': 1000.0,
'Macclesfield Town': 1541.67,
'Cork City': 1000.0,
'Newport County': 2038.46,
'Crewe Alexandra': 1960.0,
'Leyton Orient': 2222.22,
'Cambridge United': 1875.0,
"St. Patrick's Athletic": 1000.0,
'Bohemian FC': 1000.0,
'India': 0.0,
'Finland': 0.0,
'Waterford FC': 1000.0,
'Derry City': 1000.0,
'Bury': 1000.0,
'New Zealand': 0.0,
'GIF Sundsvall': 1000.0,
'Sligo Rovers': 1000.0,
'Finn Harps': 1000.0,
'Seongnam FC': 1000.0,
'UCD AFC': 1000.0,
'Śląsk Wrocław': 1000.0},
}
# find a comment something like this: #q10
def extract_question_num(cell):
for line in cell.get('source', []):
line = line.strip().replace(' ', '').lower()
m = re.match(r'\#q(\d+)', line)
if m:
return int(m.group(1))
return None
# rerun notebook and return parsed JSON
def rerun_notebook(orig_notebook):
new_notebook = 'cs-220-test.ipynb'
# re-execute it from the beginning
with open(orig_notebook, encoding='utf-8') as f:
nb = nbformat.read(f, as_version=nbformat.NO_CONVERT)
ep = nbconvert.preprocessors.ExecutePreprocessor(timeout=120, kernel_name='python3')
try:
out = ep.preprocess(nb, {'metadata': {'path': os.getcwd()}})
except nbconvert.preprocessors.CellExecutionError:
out = None
msg = 'Error executing the notebook "%s".\n\n' % orig_notebook
msg += 'See notebook "%s" for the traceback.' % new_notebook
print(msg)
raise
finally:
with open(new_notebook, mode='w', encoding='utf-8') as f:
nbformat.write(nb, f)
# Note: Here we are saving and reloading, this isn't needed but can help student's debug
# parse notebook
with open(new_notebook, encoding='utf-8') as f:
nb = json.load(f)
return nb
def normalize_json(orig):
try:
return json.dumps(json.loads(orig.strip("'")), indent=2, sort_keys=True)
except:
return 'not JSON'
def check_cell_text(qnum, cell):
outputs = cell.get('outputs', [])
if len(outputs) == 0:
return 'no outputs in an Out[N] cell'
actual_lines = None
for out in outputs:
lines = out.get('data', {}).get('text/plain', [])
if lines:
actual_lines = lines
break
if actual_lines == None:
return 'no Out[N] output found for cell (note: printing the output does not work)'
actual = ''.join(actual_lines)
actual = ast.literal_eval(actual)
expected = expected_json[str(qnum)]
expected_mismatch = False
if type(expected) != type(actual):
return "expected an answer of type %s but found one of type %s" % (type(expected), type(actual))
elif type(expected) == float:
if not math.isclose(actual, expected, rel_tol=1e-06, abs_tol=1e-06):
expected_mismatch = True
elif type(expected) == list:
extra = set(actual) - set(expected)
missing = set(expected) - set(actual)
if extra:
return "found unexpected entry in list: %s" % repr(list(extra)[0])
elif missing:
return "missing %d entries list, such as: %s" % (len(missing), repr(list(missing)[0]))
elif len(actual) != len(expected):
return "expected %d entries in the list but found %d" % (len(expected), len(actual))
else:
if expected != actual:
expected_mismatch = True
if expected_mismatch:
return "found {} in cell {} but expected {}".format(actual, qnum, expected)
return PASS
def check_cell(question, cell):
print('Checking question %d' % question.number)
if question.format == TEXT_FORMAT:
return check_cell_text(question.number, cell)
raise Exception("invalid question type")
def grade_answers(cells):
results = {'score':0, 'tests': []}
for question in questions:
cell = cells.get(question.number, None)
status = "not found"
if question.number in cells:
status = check_cell(question, cells[question.number])
row = {"test": question.number, "result": status, "weight": question.weight}
results['tests'].append(row)
return results
def main():
# rerun everything
orig_notebook = 'main.ipynb'
if len(sys.argv) > 2:
print("Usage: test.py main.ipynb")
return
elif len(sys.argv) == 2:
orig_notebook = sys.argv[1]
nb = rerun_notebook(orig_notebook)
# extract cells that have answers
answer_cells = {}
for cell in nb['cells']:
q = extract_question_num(cell)
if q == None:
continue
if not q in question_nums:
print('no question %d' % q)
continue
answer_cells[q] = cell
# do grading on extracted answers and produce results.json
results = grade_answers(answer_cells)
passing = sum(t['weight'] for t in results['tests'] if t['result'] == PASS)
total = sum(t['weight'] for t in results['tests'])
results['score'] = 100.0 * passing / total
print("\nSummary:")
for test in results["tests"]:
print(" Test %d: %s" % (test["test"], test["result"]))
print('\nTOTAL SCORE: %.2f%%' % results['score'])
with open('result.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(results, indent=2))
if __name__ == '__main__':
main()
|
import argparse
from typing import Union, Optional, Callable, Tuple, Any, Dict
import gpflow
from gpflow.mean_functions import Constant
import numpy as np
from sklearn.model_selection import train_test_split
import logging as log
# from data_utils import transform_data
from data_utils import transform_data, TaskDataLoader, featurise_mols
class ActiveLearner:
def __init__(
self,
kernel_params: Dict[str, Any],
model_params: Dict[str, Any],
x_train: np.ndarray,
y_train: np.ndarray,
x_test: np.ndarray,
y_test: np.ndarray,
acquisition_function: str,
scaler: str,
seed: int,
init_size: float,
acquisition_size: float,
):
self.kernel_params = kernel_params
self.model_params = model_params
self.seed = seed
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.init_size = init_size
self.acquisition_size = acquisition_size
self.n_feats = self.x_train.shape[1]
self.n_samp = self._get_n_samp()
self.acquisition_function = self._select_acquisition_function(
acquisition_function
)
# self.scaler = self._get_data_scaler(scaler)
self.optimizer = gpflow.optimizers.Scipy()
def train(self, n_iter: int):
X_holdout, X_init, y_holdout, y_init = self.create_initial_sample(
self.x_train, self.y_train
)
for i in range(n_iter + 1):
log.info("Performing iteration {i}.")
# Fit Scaler to data
(
X_init_scaled,
y_init_scaled,
X_test_scaled,
y_test_scaled,
y_scaler,
) = transform_data(X_init, y_init, self.x_test, self.y_test)
# todo BUILD MODEL m = self.assemble_model()
k = gpflow.kernels.Matern32(lengthscales=np.ones(self.n_feats), variance=1)
m = gpflow.models.GPR(
data=(X_init_scaled, y_init_scaled),
kernel=k,
noise_variance=0.01,
mean_function=Constant(np.mean(y_init_scaled)),
)
# Fit model
opt_logs = self.optimizer.minimize(
m.training_loss, m.trainable_variables, options=dict(maxiter=100)
)
# Todo Performance logging
# Update datasets
(
X_init_scaled,
y_init_scaled,
X_holdout_scaled,
y_holdout_scaled,
y_scaler,
) = transform_data(X_init, y_init, X_holdout, y_holdout)
sample_indices = self.suggest_sample(X_holdout_scaled, m, self.n_samp)
X_init, X_holdout, y_init, y_holdout = self.update_training_data(
sample_indices, X_init, X_holdout, y_init, y_holdout
)
def create_initial_sample(
self, x_train: np.ndarray, y_train: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Creates initialisation and hold out sets
"""
X_holdout, X_init, y_holdout, y_init = train_test_split(
x_train, y_train, test_size=self.init_size, random_state=self.seed
)
return X_holdout, X_init, y_holdout, y_init
def build_model(self):
raise NotImplementedError
def suggest_sample(self, A: np.ndarray, model, n_samp: int):
scores = self.acquisition_function(A, model)
indices = np.argpartition(scores, -n_samp)[-n_samp:] # Todo random
return indices
def objective_closure(self):
return -self.model.log_marginal_likelihood()
@staticmethod
def update_training_data(
sample_indices: np.ndarray,
X_init: np.ndarray,
X_holdout: np.ndarray,
y_init: np.ndarray,
y_holdout: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
X_init = np.concatenate((X_init, X_holdout[sample_indices]))
y_init = np.concatenate((y_init, y_holdout[sample_indices]))
X_holdout = np.delete(X_holdout, sample_indices, axis=0)
y_holdout = np.delete(y_holdout, sample_indices, axis=0)
return X_init, X_holdout, y_init, y_holdout
def _get_n_samp(self) -> int:
return round(len(self.x_train) * (1 - self.init_size) * self.acqusition_size)
@staticmethod
def _select_acquisition_function(acquisition_function: str = "var") -> Callable:
log.info(f"Using acquisition function: {acquisition_function}")
if acquisition_function == "var":
from acquisition_functions import gp_var
return gp_var
elif acquisition_function == "rand":
from acquisition_functions import gp_rand
return gp_rand
elif acquisition_function == "expected_improvement":
from acquisition_functions import gp_ei
return gp_ei
def _select_data_scaler(self, scaler: str = "standard") -> Callable:
raise NotImplementedError
def _fill_logs(self):
raise NotImplementedError
def main():
# Build Active Learner
a = ActiveLearner(
kernel_params=None,
model_params=None,
x_train=X_train,
y_train=y_train,
x_test=X_test,
y_test=y_test,
acquisition_function="var",
scaler="standard",
seed=10,
init_size=0.1,
)
a.train(n_iter=20)
# Build Metrics Handler
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--path",
type=str,
default="../datasets/ESOL.csv",
help="Path to the csv file for the task.",
)
parser.add_argument(
"-t",
"--task",
type=str,
default="ESOL",
help="str specifying the task. One of [Photoswitch, ESOL, FreeSolv, Lipophilicity].",
)
parser.add_argument(
"-r",
"--representation",
type=str,
default="fingerprints",
help="str specifying the molecular representation. "
"One of [SMILES, fingerprints, fragments, fragprints].",
)
parser.add_argument(
"-pca",
"--use_pca",
type=bool,
default=False,
help="If True apply PCA to perform Principal Components Regression.",
)
parser.add_argument(
"-n",
"--n_trials",
type=int,
default=1,
help="int specifying number of random train/test splits to use",
)
parser.add_argument(
"-ts",
"--test_set_size",
type=float,
default=0.2,
help="float in range [0, 1] specifying fraction of dataset to use as test set",
)
parser.add_argument(
"-rms",
"--use_rmse_conf",
type=bool,
default=True,
help="bool specifying whether to compute the rmse confidence-error curves or the mae "
"confidence-error curves. True is the option for rmse.",
)
parser.add_argument(
"-pr",
"--precompute_repr",
type=bool,
default=True,
help="bool indicating whether to precompute representations",
)
args = parser.parse_args()
data_loader = TaskDataLoader(task="ESOL", path="../datasets/ESOL.csv")
smiles_list, y = data_loader.load_property_data()
X = featurise_mols(smiles_list, representation="fingerprints")
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1, random_state=10
)
a = ActiveLearner(
kernel_params=None,
model_params=None,
x_train=X_train,
y_train=y_train,
x_test=X_test,
y_test=y_test,
acquisition_function="var",
scaler="standard",
seed=10,
init_size=0.1,
acquisition_size=0.025
)
a.train(n_iter=20)
"""
kernel_params = {
"lengthscale": np.ones,
"noise_variance": None
}
model_params: {
}
"""
|
"""
Friedrich Schotte, 2 Mar 2011 - 6 Oct 2011
"""
__version__ = "1.2.1"
def find(topdir,name=[],exclude=[]):
"""A list of files on directory 'topdir' matching the patterns given by
'name', excuding those matching thw patterns ''given by 'exclude'"""
from os import walk
import re
if type(name) == str: name = [name]
if type(exclude) == str: exclude = [exclude]
name = [re.compile(glob_to_regex(pattern)) for pattern in name]
exclude = [re.compile(glob_to_regex(pattern)) for pattern in exclude]
file_list = []
for (directory,subdirs,files) in walk(topdir):
for file in files:
pathname = directory+"/"+file
match = any([pattern.match(pathname) for pattern in name]) and\
not any([pattern.match(pathname) for pattern in exclude])
if match: file_list += [pathname]
return file_list
def glob_to_regex(pattern):
"""Convert a 'glob' pattern for file name matching to a regular
expression. E.g. "foo.? bar*" -> "foo\.. \bar.*" """
return "^"+pattern.replace(".","\.").replace("*",".*").replace("?",".")+"$"
if __name__ == "__main__": ##for testing
topdir = "//Femto/C/All Projects/APS/Experiments/2011.02/Analysis/WAXS/Friedrich/run1"
files = find(topdir,name="*.log",exclude=["*/laser_beamcheck.log","*/backup/*"])
for file in files: print(file)
|
import numpy as np
np.random.seed(1000) # シードの固定
print(np.random.binomial(100, 0.3)) # n=100, p=0.3 の二項乱数
'''
32
'''
print(np.random.binomial(100, 0.3, 10)) # n=100, p=0.3 の二項乱数を 10 個
'''
[25 38 30 35 26 22 29 27 35 26]
''' |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #내 맥북에서 발생되는 에러를 없애기 위한 코드
from keras.models import Sequential, load_model
from keras.layers import Dense
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import tensorflow as tf
import numpy
seed = 0
numpy.random.seed(seed)
tf.set_random_seed(seed)
df = pd.read_csv('../dataset/sonar.csv', header=None)
dataset = df.values
X = dataset[:, 0:60]
Y_obj = dataset[:, 60]
e = LabelEncoder()
e.fit(Y_obj)
Y = e.transform(Y_obj)
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=seed)
model = Sequential()
model.add(Dense(24, input_dim=60, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=130, batch_size=5)
model.save('../model/my_model.h5')
del model
model = load_model('../model/my_model.h5')
print("\n Test Accuracy: %.4f" % (model.evaluate(X_test, Y_test)[1])) |
import torch
import matplotlib.pyplot as plt
import torchvision.transforms as T
import torch.nn.functional as F
import numpy as np
import os
import json
import tqdm
import shutil
from PIL import Image, ImageFont, ImageDraw
from models.build_model import PrepareModel
from config import get_classify_config
#############################################
# 进行伪标签预测,并将大于设定阈值的样本移动到指定目录
#############################################
class PredictDownloadImage(object):
def __init__(self, model_type, classes_num, weight_path, image_size, label_json_path, mean=[], std=[]):
self.model_type = model_type
self.classes_num = classes_num
self.weight_path = weight_path
self.image_size = image_size
self.mean = mean
self.std = std
self.model, self.label_dict = self.__prepare__(label_json_path)
def predict_multi_smaples(self, samples_root, thresh={}, save_path=''):
"""预测多张样本的伪标签,并将保留下的样本存放至指定的目录下
Args:
samples_root: 原始样本的路径
thresh: dir, {'大雁塔': 0.95, ...}
save_path: 保存路径
"""
samples_list = os.listdir(samples_root)
samples_list = set([sample.split('.')[0] for sample in samples_list])
images_name = [sample + '.jpg' for sample in samples_list]
predict_results = []
tbar = tqdm.tqdm(images_name)
if not os.path.exists(save_path):
print('Making %s' % save_path)
os.mkdir(save_path)
else:
print('Removing %s' % save_path)
shutil.rmtree(save_path)
print('Making %s' % save_path)
os.mkdir(save_path)
for image_name in tbar:
label = image_name.split('_')[0]
if label == '浆水鱼鱼':
label = '凉鱼'
elif label == '酥饺':
label = '蜜饯张口酥饺'
current_thresh = thresh[label]
image_path = os.path.join(samples_root, image_name)
index, predict_label, remain = self.predict_single_sample(label, image_path, thresh=current_thresh)
if remain:
descript = 'Remain: %s' % image_name
self.save_image_label(save_path, image_path, image_name, predict_label, index)
else:
descript = 'Removing: %s' % image_name
tbar.set_description(desc=descript)
return predict_results
def predict_single_sample(self, annotation, sample_path, thresh=0.6):
"""对单张样本进行预测
Args:
annotation: 标注的标签
sample_path: 样本路径
rank: 返回前rank个预测结果
Returns:
index: 预测的类别标号
label: 真实类标,如:大雁塔
remain: bool, True: 保留, False: 不保留
"""
try:
image = Image.open(sample_path).convert('RGB')
transforms = T.Compose([
T.Resize(self.image_size),
T.ToTensor(),
T.Normalize(self.mean, self.std)
])
image = transforms(image)
image = torch.unsqueeze(image, dim=0).cuda()
output = self.model(image)
output = torch.squeeze(output)
predicts = F.softmax(output)
predicts_numpy = predicts.cpu().detach().numpy()
indexs = np.argsort(predicts_numpy)
index = indexs[-1]
predict_label = self.label_dict[str(index)]
score = predicts_numpy[index]
# 得分大于阈值且预测出的标签和标注的标签相同时保留
if score > thresh and predict_label.split('/')[1] == annotation:
remain = True
else:
remain = False
except:
remain = False
index = -1
predict_label = -1
return index, predict_label, remain
def save_image_label(self, save_path, image_path, image_name, label, index):
"""保存图片和类别文件
Args:
save_path: 保存根目录
image_path: 原始图片路径
image_name: 图片名称
label: 真实类别名称
index: 类别索引
"""
label_file_name = image_name.replace('jpg', 'txt')
label_file_path = os.path.join(save_path, label_file_name)
with open(label_file_path, 'w') as f:
line = image_name + ', ' + str(index)
f.writelines(line)
save_image_path = os.path.join(save_path, image_name)
shutil.copy(image_path, save_image_path)
def __prepare__(self, label_json_path):
prepare_model = PrepareModel()
model = prepare_model.create_model(self.model_type, self.classes_num, 0, pretrained=False)
model.load_state_dict(torch.load(self.weight_path)['state_dict'])
model = model.cuda()
model.eval()
# 得到类标到真实标注的映射
with open(label_json_path, 'r') as f:
label_dict = json.load(f)
return model, label_dict
def compute_labels_thresh(labels_scores, thresh_max=0.95, thresh_min=0.85):
"""依据各个类别的分数计算产生伪标签时的阈值
Args:
labels_scores: dir, {'大雁塔': 0.85, ...}
thresh_max: 最大阈值
thresh_min: 最小阈值
Returns:
labels_thresh: 类别对应的阈值 dir, {’大雁塔': 0.85, ...}
"""
scores = labels_scores.values()
max_score = max(scores)
min_score = min(scores)
labels_thresh = {}
for label, score in labels_scores.items():
thresh = (max_score - score) / (max_score - min_score) * (thresh_max - thresh_min) + thresh_min
labels_thresh[label.split('/')[1]] = thresh
return labels_thresh
if __name__ == "__main__":
config = get_classify_config()
weight_path = 'model_best.pth'
label_json_path = 'label_id_name.json'
samples_root = ''
save_path = ''
labels_score_file = 'checkpoints/se_resnext101_32x4d/log-2019-12-17T18-24-58/classes_acc.json'
thresh_max = 0.90
thresh_min = 0.90
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
with open(labels_score_file, 'r') as f:
labels_score = json.load(f)
labels_thresh = compute_labels_thresh(labels_score, thresh_max, thresh_min)
print(labels_thresh)
predict_download_images = PredictDownloadImage(config.model_type, config.num_classes, weight_path, config.image_size, label_json_path, mean=mean, std=std)
predict_download_images.predict_multi_smaples(samples_root, thresh=labels_thresh, save_path=save_path) |
import os
import numpy as np
from sklearn.model_selection import train_test_split
# Can be either `quadrant` or `uniform`
type = 'quadrant'
assert type in ['uniform', 'quadrant']
if not os.path.exists('data-uniform/'):
os.makedirs('data-uniform/')
if not os.path.exists('data-quadrant/'):
os.makedirs('data-quadrant/')
if __name__ == '__main__':
import tensorflow as tf
np.random.seed(0)
tf.set_random_seed(0)
# From https://arxiv.org/pdf/1807.03247.pdf
onehots = np.pad(np.eye(3136, dtype='float32').reshape((3136, 56, 56, 1)),
((0, 0), (4, 4), (4, 4), (0, 0)), mode="constant")
images = tf.nn.conv2d(onehots, np.ones((9, 9, 1, 1)), [1] * 4, "SAME")
# Get the images
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
images = sess.run(images)
if type == 'uniform':
# Create the uniform datasets
indices = np.arange(0, len(onehots), dtype='int32')
train, test = train_test_split(indices, test_size=0.2, random_state=0)
train_onehot = onehots[train]
train_images = images[train]
test_onehot = onehots[test]
test_images = images[test]
np.save('data-uniform/train_onehot.npy', train_onehot)
np.save('data-uniform/train_images.npy', train_images)
np.save('data-uniform/test_onehot.npy', test_onehot)
np.save('data-uniform/test_images.npy', test_images)
else:
# Create the quadrant datasets
pos = np.where(onehots == 1.0)
X = pos[1]
Y = pos[2]
train_set = []
test_set = []
train_ids = []
test_ids = []
for i, (x, y) in enumerate(zip(X, Y)):
if x > 32 and y > 32: # 4th quadrant
test_ids.append(i)
test_set.append([x, y])
else:
train_ids.append(i)
train_set.append([x, y])
train_set = np.array(train_set)
test_set = np.array(test_set)
train_set = train_set[:, None, None, :]
test_set = test_set[:, None, None, :]
print(train_set.shape)
print(test_set.shape)
train_onehot = onehots[train_ids]
test_onehot = onehots[test_ids]
train_images = images[train_ids]
test_images = images[test_ids]
print(train_onehot.shape, test_onehot.shape)
print(train_images.shape, test_images.shape)
np.save('data-quadrant/train_set.npy', train_set)
np.save('data-quadrant/test_set.npy', test_set)
np.save('data-quadrant/train_onehot.npy', train_onehot)
np.save('data-quadrant/train_images.npy', train_images)
np.save('data-quadrant/test_onehot.npy', test_onehot)
np.save('data-quadrant/test_images.npy', test_images)
|
import lusid.models as models
import lusid
from lusid import ApiException
def setup_portfolio(api_factory, portfolio_scope, portfolio_code, portfolio_created_date, properties: dict):
# Create portfolio and assign the corp action source
portfolio_request = models.CreateTransactionPortfolioRequest(
display_name=portfolio_code,
code=portfolio_code,
base_currency="GBP",
created=portfolio_created_date,
properties=properties
)
try:
api_factory.build(lusid.api.TransactionPortfoliosApi).create_portfolio(
scope=portfolio_scope,
create_transaction_portfolio_request=portfolio_request
)
except ApiException:
print(f"Portfolio with code {portfolio_code} and scope {portfolio_scope} already exists")
def update_portfolio_properties():
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Impulse Noise Restoration via CSC
=================================
This example demonstrates the removal of salt & pepper noise from a colour image using convolutional sparse coding, with a colour dictionary :cite:`wohlberg-2016-convolutional` and with an $\ell_1$ data fidelity term, an $\ell_1$ regularisation term, and an additional gradient regularization term :cite:`wohlberg-2016-convolutional2`
$$\mathrm{argmin}_\mathbf{x} \; \sum_c \left\| \sum_m \mathbf{d}_{c,m} * \mathbf{x}_m -\mathbf{s}_c \right\|_1 + \lambda \sum_m \| \mathbf{x}_m \|_1 + (\mu/2) \sum_i \sum_m \| G_i \mathbf{x}_m \|_2^2$$
where $\mathbf{d}_{c,m}$ is channel $c$ of the $m^{\text{th}}$ dictionary filter, $\mathbf{x}_m$ is the coefficient map corresponding to the $m^{\text{th}}$ dictionary filter, $\mathbf{s}_c$ is channel $c$ of the input image, and $G_i$ is an operator computing the derivative along spatial index $i$.
This example uses the GPU accelerated version of :mod:`.admm.cbpdn` within the :mod:`sporco.cupy` subpackage.
"""
from __future__ import print_function
from builtins import input
import pyfftw # See https://github.com/pyFFTW/pyFFTW/issues/40
import numpy as np
from sporco import util
from sporco import signal
from sporco import plot
from sporco.metric import psnr
from sporco.cupy import (cupy_enabled, np2cp, cp2np, select_device_by_load,
gpu_info)
from sporco.cupy.admm import cbpdn
"""
Boundary artifacts are handled by performing a symmetric extension on the image to be denoised and then cropping the result to the original image support. This approach is simpler than the boundary handling strategies that involve the insertion of a spatial mask into the data fidelity term, and for many problems gives results of comparable quality. The functions defined here implement symmetric extension and cropping of images.
"""
def pad(x, n=8):
if x.ndim == 2:
return np.pad(x, n, mode='symmetric')
else:
return np.pad(x, ((n, n), (n, n), (0, 0)), mode='symmetric')
def crop(x, n=8):
return x[n:-n, n:-n]
"""
Load a reference image and corrupt it with 33% salt and pepper noise. (The call to ``np.random.seed`` ensures that the pseudo-random noise is reproducible.)
"""
img = util.ExampleImages().image('monarch.png', zoom=0.5, scaled=True,
idxexp=np.s_[:, 160:672])
np.random.seed(12345)
imgn = signal.spnoise(img, 0.33)
"""
We use a colour dictionary. The impulse denoising problem is solved by appending some additional filters to the learned dictionary ``D0``, which is one of those distributed with SPORCO. These additional components consist of a set of three impulse filters, one per colour channel, that will represent the low frequency image components when used together with a gradient penalty on the coefficient maps, as discussed below.
"""
D0 = util.convdicts()['RGB:8x8x3x64']
Di = np.zeros(D0.shape[0:2] + (3, 3), dtype=np.float32)
np.fill_diagonal(Di[0, 0], 1.0)
D = np.concatenate((Di, D0), axis=3)
"""
The problem is solved using class :class:`.admm.cbpdn.ConvL1L1Grd`, which implements a convolutional sparse coding problem with an $\ell_1$ data fidelity term, an $\ell_1$ regularisation term, and an additional gradient regularization term :cite:`wohlberg-2016-convolutional2`, as defined above. The regularization parameters for the $\ell_1$ and gradient terms are ``lmbda`` and ``mu`` respectively. Setting correct weighting arrays for these regularization terms is critical to obtaining good performance. For the $\ell_1$ norm, the weights on the filters that are intended to represent low frequency components are set to zero (we only want them penalised by the gradient term), and the weights of the remaining filters are set to zero. For the gradient penalty, all weights are set to zero except for those corresponding to the filters intended to represent low frequency components, which are set to unity.
"""
lmbda = 3e0
mu = 2e1
w1 = np.ones((1, 1, 1, 1, D.shape[-1]), dtype=np.float32)
w1[..., 0:3] = 0.0
wg = np.zeros((D.shape[-1]), dtype=np.float32)
wg[..., 0:3] = 1.0
opt = cbpdn.ConvL1L1Grd.Options({'Verbose': True, 'MaxMainIter': 200,
'RelStopTol': 5e-3, 'AuxVarObj': False,
'rho': 4e1, 'RelaxParam': 1.8,
'L1Weight': np2cp(w1),
'GradWeight': np2cp(wg)})
"""
Initialise the :class:`.admm.cbpdn.ConvL1L1Grd` object and call the ``solve`` method.
"""
if not cupy_enabled():
print('CuPy/GPU device not available: running without GPU acceleration\n')
else:
id = select_device_by_load()
info = gpu_info()
if info:
print('Running on GPU %d (%s)\n' % (id, info[id].name))
b = cbpdn.ConvL1L1Grd(np2cp(D), np2cp(pad(imgn)), lmbda, mu, opt=opt, dimK=0)
X = cp2np(b.solve())
"""
The denoised estimate of the image is just the reconstruction from all coefficient maps.
"""
imgdp = cp2np(b.reconstruct().squeeze())
imgd = crop(imgdp)
"""
Display solve time and denoising performance.
"""
print("ConvL1L1Grd solve time: %5.2f s" % b.timer.elapsed('solve'))
print("Noisy image PSNR: %5.2f dB" % psnr(img, imgn))
print("Denoised image PSNR: %5.2f dB" % psnr(img, imgd))
"""
Display the reference, noisy, and denoised images.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(21, 7))
fig.suptitle('ConvL1L1Grd Results')
plot.imview(img, ax=ax[0], title='Reference', fig=fig)
plot.imview(imgn, ax=ax[1], title='Noisy', fig=fig)
plot.imview(imgd, ax=ax[2], title='CSC Result', fig=fig)
fig.show()
"""
Display the low frequency image component.
"""
plot.imview(X[..., 0, 0:3].squeeze(), title='Low frequency component')
# Wait for enter on keyboard
input()
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : 526614962@qq.com
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : tensorflow_cookbook
@File : C0705_CBOW.py
@Version : v0.1
@Time : 2019-12-06 11:29
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《TensorFlow机器学习实战指南,Nick McClure》, Sec0705,P162
@Desc : 自然语言处理,使用 TensorFlow 实现 CBOW 词嵌入模型
@理解:
"""
# common imports
import os
import pickle
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import sklearn
import tensorflow as tf
import winsound
from nltk.corpus import stopwords
from tensorflow.python.framework import ops
from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)
# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样
seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 初始化默认的计算图
ops.reset_default_graph()
# Open graph session
sess = tf.Session()
# ----------------------------------------------------------------------
# Declare model parameters
batch_size = 500
embedding_size = 200
vocabulary_size = 7500
generations = 50000
model_learning_rate = 0.25
num_sampled = int(batch_size / 2) # Number of negative examples to sample.
window_size = 3 # How many words to consider left and right.
# Add checkpoints to training
save_embeddings_every = 5000
print_valid_every = 5000
print_loss_every = 100
# Declare stop words
stops = stopwords.words('english')
# We pick some test words. We are expecting synonyms to appear
valid_words = ['love', 'hate', 'happy', 'sad', 'man', 'woman']
# Later we will have to transform these into indices
# Load the movie review data
print('Loading Data')
texts, target = load_movie_data()
# Normalize text
print('Normalizing Text Data')
texts = normalize_text(texts, stops)
# Texts must contain at least 3 words
target = [target[ix] for ix, x in enumerate(texts) if len(x.split()) > 2]
texts = [x for x in texts if len(x.split()) > 2]
# Build our data set and dictionaries
print('Creating Dictionary')
word_dictionary = build_dictionary(texts, vocabulary_size)
word_dictionary_rev = dict(zip(word_dictionary.values(), word_dictionary.keys()))
text_data = text_to_numbers(texts, word_dictionary)
# Get validation word keys
valid_examples = [word_dictionary[x] for x in valid_words]
print('Creating Model')
# Create data/target placeholders
# ToDo:为什么要固定输入数据的大小?
x_inputs = tf.placeholder(tf.int32, shape = [batch_size, 2 * window_size], name = 'x_inputs')
y_target = tf.placeholder(tf.int32, shape = [batch_size, 1], name = 'y_target')
valid_dataset = tf.constant(valid_examples, dtype = tf.int32)
# Define Embeddings:
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
# 7. 处理单词嵌套。CBOW 模型将上下文窗口内的单词嵌套叠加在一起
embed = tf.zeros([batch_size, embedding_size])
for element in range(2 * window_size):
embed += tf.nn.embedding_lookup(embeddings, x_inputs[:, element])
# NCE loss parameters
nce_weights = tf.Variable(tf.truncated_normal(
[vocabulary_size, embedding_size], stddev = 1.0 / np.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Get loss from prediction
loss = tf.reduce_mean(tf.nn.nce_loss(weights = nce_weights,
biases = nce_biases,
labels = y_target,
inputs = embed,
num_sampled = num_sampled,
num_classes = vocabulary_size))
# Create optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate = model_learning_rate).minimize(loss)
# Cosine similarity between words
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims = True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b = True)
# Create model saving operation
saver = tf.train.Saver({"embeddings": embeddings})
# Add variable initializer.
init = tf.global_variables_initializer()
sess.run(init)
# Filter out sentences that aren't long enough:
text_data = [x for x in text_data if len(x) >= (2 * window_size + 1)]
# Run the CBOW model.
print('Starting Training')
data_folder_name = 'temp'
loss_vec, loss_x_vec = [], []
for i in range(generations):
batch_inputs, batch_labels = generate_batch_data(text_data, batch_size, window_size, method = 'cbow')
feed_dict = {x_inputs: batch_inputs, y_target: batch_labels}
# Run the train step
sess.run(optimizer, feed_dict = feed_dict)
# Return the loss
if (i + 1) % print_loss_every == 0:
loss_val = sess.run(loss, feed_dict = feed_dict)
loss_vec.append(loss_val)
loss_x_vec.append(i + 1)
print('Loss at step {} : {}'.format(i + 1, loss_val))
pass
# Validation: Print some random words and top 5 related words
if (i + 1) % print_valid_every == 0:
sim = sess.run(similarity, feed_dict = feed_dict)
for j in range(len(valid_words)):
valid_word = word_dictionary_rev[valid_examples[j]]
top_k = 5 # number of nearest neighbors
nearest = (-sim[j, :]).argsort()[1:top_k + 1]
log_str = "Nearest to {}:".format(valid_word)
for k in range(top_k):
close_word = word_dictionary_rev[nearest[k]]
log_str = '{} {},'.format(log_str, close_word)
pass
print(log_str)
# Save dictionary + embeddings
if (i + 1) % save_embeddings_every == 0:
# Save vocabulary dictionary
with open(os.path.join(data_folder_name, 'movie_vocab.pkl'), 'wb') as f:
pickle.dump(word_dictionary, f)
pass
# Save embeddings
model_checkpoint_path = os.path.join(os.getcwd(), data_folder_name, 'cbow_movie_embeddings.ckpt')
save_path = saver.save(sess, model_checkpoint_path)
print('Model saved in file: {}'.format(save_path))
pass
pass
# ----------------------------------------------------------------------
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
reload (sys)
sys.setdefaultencoding('utf8')
import frappe
from frappe import _
from frappe.utils import cint, random_string
from frappe.utils import cstr, flt, getdate, nowdate, formatdate
from StringIO import StringIO
from frappeclient import FrappeClient
import csv
import json
from lxml import html
import requests
#OPENCBS entry
#Read People, Savings, Contracts, SavingEvents ....
#Entries to Account JV
@frappe.whitelist()
def opencbs_get_dados():
#fonte should be the Page
empresa = 'AngolaERP2'
centrocusto = frappe.db.sql("""SELECT name from `tabCost Center` where company = %s and is_group=0 """,(empresa),as_dict=True)
#print "CENTRO "
#print centrocusto[0]['name']
centrocusto = centrocusto[0]['name'] # look for based on the Empresa
#Has to create the Clients or check if exist first due to the loop ...
page0=requests.get('http://192.168.229.138:8080/api/people')
if page0.status_code == 200:
#clie = page2.json()
for clie in page0.json():
print "cliente"
print clie['firstName']
#Found on CBS now look on ours
cliente = frappe.db.sql("""SELECT name from `tabCustomer` where customer_name like %s """,(str(clie['firstName']) + '%'),as_dict=True)
print "LISTA CLIENTES "
print (cliente == [])
if (cliente == []):
#Creates the Cliente
print "CRIAR CLIETETEEEEE"
response = frappe.get_doc({
"doctype":"Customer",
"customer_name": str(clie['firstName']) + ' ' + str(clie['lastName']),
"customer_type": "Company",
"customer_group": "Individual",
"territory": "Angola",
"customer_details": str(clie),
"tax)id":str(clie['identificationData']),
"company": empresa
})
response.insert()
try:
page=requests.get('http://192.168.229.138:8080/api/savingevents')
except Exception, e:
if frappe.message_log: frappe.message_log.pop()
return 0,0
#print page
if page.status_code == 200:
#Json reading
num =0
registo = page.json()
for reg in page.json()['items']:
#for reg in registo.keys():
print reg['id']
#print registo['items'][num]
#print registo['items'][num]['contractid']
print formatdate(reg['creationdate'],"dd-MM-YYYY")
#Deve filtrar somente os dados do DIA CORRENTE
#
# Id int `json:"id"`
# Contractid int `json:"contractid"`
# Code string `json:"code"`
# Amount float64 `json:"amount"`
# Description string `json:"description"`
# Creationdate time.Time `json:"creationdate"`
# Relatedcontractcode string `json:"relatedcontract"`
#id user_id contract_id code amount description deleted creation_date cancelable is_fired related_contract_code fees is_exported savings_method pending pending_event_id teller_id loan_event_id cancel_date doc1 parent_event_id
#1 1 1 SVDE 10000.0000 Initial deposit False 2017-06-22 10:58:21.110 True True NULL NULL False 1 False NULL NULL NULL NULL NULL NULL
#SAVING EVENTS GetbyID
page1=requests.get('http://192.168.229.138:8080/api/savings/' + str(reg['contractid']))
if page1.status_code == 200:
num1 =0
registo1 = page1.json()
print "keys ", registo1.keys()
#for reg1 in registo1.keys():
print "campo "
print registo1['code'] #To be used as REFERENCE on ERPNEXT
#Gets Client info ... should add on local DB?????
page2=requests.get('http://192.168.229.138:8080/api/people/' + str(registo1['tiersid']))
if page2.status_code == 200:
clie = page2.json()
print "cliente"
print clie['firstName']
#Found on CBS now look on ours
cliente = frappe.db.sql("""SELECT name from `tabCustomer` where customer_name like %s """,(str(clie['firstName']) + '%'),as_dict=True)
print "CLIEEEEEEEE "
print (cliente == [])
if (cliente == []):
#Creates the Cliente
print "CRIAR CLIETETEEEEECLIETETEEEEECLIETETEEEEECLIETETEEEEE"
# response = frappe.get_doc({
# "doctype":"Customer",
# "customer_name": str(clie['firstName']) + ' ' + str(clie['lastName']),
# "customer_type": "Company",
# "customer_group": "Individual",
# "territory": "Angola",
# "customer_details": str(clie),
# "company": empresa
# })
# response.insert()
#Lancamento Accounts ERPNEXt
jv_name = ""
journal_entry = frappe.new_doc('Journal Entry')
journal_entry.voucher_type = 'Journal Entry' #To see what type of entry to add
journal_entry.user_remark = str(reg['description']) + ' #' + str(registo1['code']) + '-' + str(registo1['id'])
journal_entry.cheque_no = str(registo1['code'])
journal_entry.cheque_date = formatdate(reg['creationdate'],"dd-MM-YYYY")
journal_entry.company = empresa
journal_entry.posting_date =nowdate()
account_amt_list = []
adjustment_amt = 0
contasal = 0
#DEBIT
#if CODE SVDE (Deposit
#IF CODE SCTE (Transfer
print str(reg['code'])
acc = '1.10.10.10' #1.10.10.10 default
if str(reg['code']) == "SCTE":
acc = '2.10.10' #1.10.10.10 default
#if str(registo['items'][num]['code']) == "SVDE":
# if str(reg['code']) == "SCTE":
# acc = '2.10.10' #2.10.10 default
accs = frappe.db.sql("""SELECT name from tabAccount where account_name like %s and company = %s """,(acc + '%',empresa),as_dict=True)
print "Debito CONTAB"
print type(accs[0]['name'])
print type(str(accs[0]['name']))
acc = accs[0]['name']
amt =float(reg['amount'])
adjustment_amt = adjustment_amt+amt
account_amt_list.append({
"account": acc,
"debit_in_account_currency": amt,
"cost_center": centrocusto
})
#CREDIT
acc = '2.10.10' #2.10.10 default
if str(reg['code']) == "SCTE":
acc = '2.10.80' #2.10.10 default
accs = frappe.db.sql("""SELECT name from tabAccount where account_name like %s and company = %s """,(acc + '%',empresa),as_dict=True)
acc = accs[0]['name']
print "CREDITO CONTAB"
print accs[0]['name']
amt =float(reg['amount'])
adjustment_amt = adjustment_amt-amt
account_amt_list.append({
"account": acc,
"credit_in_account_currency": amt,
"cost_center": centrocusto
})
conta = acc;
journal_entry.set("accounts", account_amt_list)
journal_entry.save()
try:
journal_entry.submit()
jv_name = journal_entry.name
except Exception, e:
frappe.msgprint(e)
num += 1
|
import context
def summe(a,b):
a = float(a)
b = float(b)
sum = a*b
return sum
|
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# directory to file mamnage.py
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #sciezka absolutna do pliku manage.py
env = environ.Env(
#defaul types and values
DEBUG=(bool, False),
ALLOWED_HOSTS=(list, []),
)
# .env is in parent directory to 'settings.py'
env_file= os.path.join(BASE_DIR, '.env') #sciezka absolutna do pliku env
# read from .env file if it exists
environ.Env.read_env(env_file) #laduje zmienne z pliku env do naszego srodowiska
DEBUG = env('DEBUG')
SECRET_KEY = env('SECRET_KEY')
ALLOWED_HOSTS = env('ALLOWED_HOSTS')
# Application definition
INSTALLED_APPS = [
'account.apps.AccountConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jquery',
'debug_toolbar', # bardzo wazne, zeby to bylo za staticfiles!
'django_extensions',
'sass_processor',
'compressor',
'taggit',
'bootstrap4',
'reservation.apps.ReservationConfig',
'homepage.apps.HomepageConfig',
'menu.apps.MenuConfig',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware', # <---- to tez dopisujemy na samej gorze to doinsatalowaniu paczek
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'restaurant.urls'
INTERNAL_IPS = [
'127.0.0.1',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'restaurant.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# zamieniamy dla heroku tak jak ponizej:
'default': env.db(),
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'sass_processor.finders.CssFinder',
'compressor.finders.CompressorFinder',
]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
SASS_PROCESOR_ROOT = STATIC_ROOT
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, '/static'),
# ]
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
BOOTSTRAP4 = {
'include_jquery': True,
}
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# ######################################################################
# ######################################################################
# import os
# import environ
# # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# # directory to file manage.py
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #sciezka absolutna do pliku manage.py
# # PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# env = environ.Env(
# #defaul types and values
# DEBUG=(bool, False),
# ALLOWED_HOSTS=(list, []),
# )
# # .env is in parent directory to 'settings.py'
# env_file= os.path.join(BASE_DIR, '.env') #sciezka absolutna do pliku env
# # read from .env file if it exists
# environ.Env.read_env(env_file) #laduje zmienne z pliku env do naszego srodowiska
# DEBUG = env('DEBUG')
# SECRET_KEY = env('SECRET_KEY')
# ALLOWED_HOSTS = env('ALLOWED_HOSTS')
# # Application definition
# INSTALLED_APPS = [
# 'account.apps.AccountConfig',
# 'django.contrib.admin',
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.messages',
# 'django.contrib.staticfiles',
# 'jquery',
# 'debug_toolbar', # bardzo wazne, zeby to bylo za staticfiles!
# 'django_extensions',
# 'sass_processor',
# 'compressor',
# 'taggit',
# 'bootstrap4',
# 'reservation.apps.ReservationConfig',
# 'homepage.apps.HomepageConfig',
# 'menu.apps.MenuConfig',
# 'blog.apps.BlogConfig',
# ]
# MIDDLEWARE = [
# 'debug_toolbar.middleware.DebugToolbarMiddleware', # <---- to tez dopisujemy na samej gorze to doinsatalowaniu paczek
# 'django.middleware.security.SecurityMiddleware',
# 'whitenoise.middleware.WhiteNoiseMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
# ]
# ROOT_URLCONF = 'restaurant.urls'
# INTERNAL_IPS = [
# '127.0.0.1',
# ]
# TEMPLATES = [
# {
# 'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': [],
# 'APP_DIRS': True,
# 'OPTIONS': {
# 'context_processors': [
# 'django.template.context_processors.debug',
# 'django.template.context_processors.request',
# 'django.contrib.auth.context_processors.auth',
# 'django.contrib.messages.context_processors.messages',
# ],
# },
# },
# ]
# WSGI_APPLICATION = 'restaurant.wsgi.application'
# # Database
# # https://docs.djangoproject.com/en/3.0/ref/settings/#databases
# DATABASES = {
# # 'default': {
# # 'ENGINE': 'django.db.backends.sqlite3',
# # 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# # }
# # zamieniamy dla heroku tak jak ponizej:
# 'default': env.db(),
# }
# # Password validation
# # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# # Internationalization
# # https://docs.djangoproject.com/en/3.0/topics/i18n/
# LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
# USE_I18N = True
# USE_L10N = True
# USE_TZ = True
# # Static files (CSS, JavaScript, Images)
# # https://docs.djangoproject.com/en/3.0/howto/static-files/
# STATICFILES_FINDERS = [
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'sass_processor.finders.CssFinder',
# 'compressor.finders.CompressorFinder',
# ]
# STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# # STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
# STATIC_URL = '/static/'
# SASS_PROCESOR_ROOT = STATIC_ROOT
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static'),
# ]
# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# LOGIN_REDIRECT_URL = 'dashboard'
# LOGIN_URL = 'login'
# LOGOUT_URL = 'logout'
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# #BOOTSTRAP4 = {
# # 'include_jquery': True,
# #}
# CRISPY_TEMPLATE_PACK = 'bootstrap4' |
import os
import json
import torch
import numpy as np
# label_path = "/data/code/deep-metric-learning/datasets/DukeMTMC-reID/labels"
label_path = "/data/code/deep-metric-learning/datasets/DukeMTMC-reID/labels/hzh"
#
# focus=[]
# for fname in sorted(os.listdir(label_path)):
# fpath = os.path.join(label_path, fname)
# print(fname)
# with open(fpath, 'r') as f:
# label = json.load(f)
# focus.append(torch.tensor(label['shapes'][0]['points'])//16) # mean(1)
# focus = torch.stack(focus).int()[:, :, [1,0]]
# focus[:, 1] = (focus[:, 1]-focus[:, 0]).clamp_min(1) # n, left top start(y,x), box_scale(y,x)
# print(focus.tolist())
def get_focus_label(a=None):
files=[]
focus=[]
for fname in sorted(os.listdir(label_path)):
fpath = os.path.join(label_path, fname)
# print(fname)
with open(fpath, 'r') as f:
label = json.load(f)
focus.append(torch.tensor(label['shapes'][0]['points'])/1) # mean(1)
files.append(label['imagePath'].split('\\')[-1])
focus = torch.stack(focus)[:, :, [1,0]]
focus[:, 1, 0].clamp_max_(16*(16-1))
focus[:, 1, 1].clamp_max_(16*(8-1))
focus[:, 1] = (focus[:, 1]-focus[:, 0]).clamp_min(16)
return files, focus
#
# import time
# lasttime=time.time()
# for i in range(100):
# # torch.randperm(100000)
# np.random.choice(10000, 100000)
# print(time.time() - lasttime) |
from pypureclient.flashblade import NetworkInterface
# Update the existing network interface "myvip"
# Change the address to "1.2.3.201"
# Change the service type to "replication"
res = client.patch_network_interfaces(
names=['myvip'], network_interface=NetworkInterface(address='1.2.3.201', services=['replication']))
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# Other valid fields: ids
# See section "Common Fields" for examples
|
import torch
# requires more recent version of torch
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
class RolloutStorage(object):
def __init__(self, device):
self.clear_history()
self.device = device
def insert(self, obs, action, r, value, action_logprob, mask):
self.observations.append(obs.clone())
self.actions.append(action)
self.rewards.append(r)
self.value_preds.append(value)
self.action_logprobs.append(action_logprob)
self.masks.append(mask)
def clear_history(self):
self.observations = []
self.actions = []
self.action_logprobs = []
self.value_preds = []
self.rewards = []
self.masks = []
self.returns = []
def convert_to_tensor(self):
self.observations = torch.cat(self.observations).to(self.device)
self.actions = torch.cat(self.actions).to(self.device)
self.returns = torch.cat(self.returns).to(self.device)
self.value_preds = torch.cat(self.value_preds).to(self.device)
self.action_logprobs = torch.cat(self.action_logprobs).to(self.device)
def compute_returns(self, next_value, use_gae, gamma, tau):
self.returns = [0]*len(self.rewards)
if use_gae:
self.value_preds.append(next_value)
gae = 0
for step in reversed(range(len(self.rewards))):
delta = self.rewards[step] + gamma * self.value_preds[step + 1] * self.masks[step] - self.value_preds[step]
gae = delta + gamma * tau * self.masks[step] * gae
self.returns[step] = gae + self.value_preds[step]
self.value_preds = self.value_preds[:-1]
else:
self.returns.append(next_value)
for step in reversed(range(len(self.rewards))):
self.returns[step] = self.returns[step + 1] * gamma * self.masks[step] + self.rewards[step]
self.returns = self.returns[:-1]
def feed_forward_generator(self, advantages, num_mini_batch):
batch_size = len(self.rewards)
mini_batch_size = batch_size // num_mini_batch
sampler = BatchSampler(SubsetRandomSampler(range(batch_size)), mini_batch_size, drop_last=False)
for indices in sampler:
indices = torch.LongTensor(indices).to(self.device)
observations_batch = self.observations[indices]
actions_batch = self.actions[indices]
return_batch = self.returns[indices]
old_action_log_probs_batch = self.action_logprobs[indices]
adv_targ = advantages[indices]
yield observations_batch, actions_batch, return_batch, old_action_log_probs_batch, adv_targ
@property
def size(self):
return len(self.rewards) |
from insta_checker import InstaChecker, super_print
import asyncio
async def main():
cookie = 'ds_user_id=48743733271;mid=YQgRZwAAAAEPjPkWIONy9q0lPPdu;sessionid=48743733271%3AJRUhxoYZxBh4BX%3A6;ig_nrcb=1;ig_did=A446BA65-9920-462A-BAA8-F85FEAC0AB98;csrftoken=iD5MZwMnXZHgNzm8H5kACrNHKVBIvpDz;rur="CLN\05448743733271\0541659454776:01f7e7cbc38f962c9fbe9fe015e28ac99145941e231df3769a1682f8ce14b2a8ca254ddf";'
checker = InstaChecker(
cookie=cookie,
timeout=1,
)
some_wrong_url = 'https://www.instagram.com/antonnechaev990123123213213132/'
profile1_url = 'https://www.instagram.com/antonnechaev990/'
post1_url = 'https://www.instagram.com/p/CQlptixpNQq/'
urls_to_scrape = [
some_wrong_url,
profile1_url,
post1_url
]
data = await checker.run(urls_to_scrape)
"""
returns
{
'your-url': {
'success': (bool),
'data': (dict),
'errors': (dict of strings)
}
}
"""
wrong_success = data[some_wrong_url]['success']
print(f"{wrong_success = }") # f string with = is easy format
if not wrong_success:
print(f"{data[some_wrong_url]['errors'] = }")
profile1_success = data[profile1_url]['success']
print(f"{profile1_success = }")
if profile1_success:
print(data[profile1_url])
profile1_data = data[profile1_url]['data']
print(f"{data[profile1_url]['type'] = }")
print(f"{profile1_data['id'] = }")
print(f"{profile1_data['avatar_url'] = }")
print(f"{profile1_data['followers'] = }")
print(f"{profile1_data['follows'] = }")
print(f"{profile1_data['media_count'] = }")
print(f"{profile1_data['recent_media'][0]['text'] = }")
print('')
print('Printing all profile1 data:')
super_print(profile1_data)
post1_success = data[post1_url]['success']
print(f"{post1_success = }")
if post1_success:
post1_data = data[post1_url]['data']
print(f"{data[post1_url]['type'] = }")
print(f"{post1_data['id'] = }")
print(f"{post1_data['likes'] = }")
super_print(post1_data['owner'])
print('')
print('Printing all post1 data:')
super_print(post1_data)
asyncio.run(main())
|
# Copyright (c) 2019, Bosch Engineering Center Cluj and BFMC organizers
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
import serial
from src.templates.workerprocess import WorkerProcess
from src.hardware.serialhandler.filehandler import FileHandler
from src.hardware.serialhandler.readthread import ReadThread
from src.hardware.serialhandler.writethread import WriteThread
class SerialHandlerProcess(WorkerProcess):
# ===================================== INIT =========================================
def __init__(self,inPs, outPs):
"""The functionality of this process is to redirectionate the commands from the RemoteControlReceiverProcess (or other process) to the
micro-controller via the serial port. The default frequency is 256000 and device file /dev/ttyACM0. It automatically save the sent
commands into a log file, named historyFile.txt.
Parameters
----------
inPs : list(Pipes)
A list of pipes, where the first element is used for receiving the command to control the vehicle from other process.
outPs : None
Has no role.
"""
super(SerialHandlerProcess,self).__init__(inPs, outPs)
devFile = '/dev/ttyACM0'
logFile = 'historyFile.txt'
# comm init
self.serialCom = serial.Serial(devFile,256000,timeout=0.1)
self.serialCom.flushInput()
self.serialCom.flushOutput()
# log file init
self.historyFile = FileHandler(logFile)
def run(self):
super(SerialHandlerProcess,self).run()
#Post running process -> close the history file
self.historyFile.close()
# ===================================== INIT THREADS =================================
def _init_threads(self):
""" Initializes the read and the write thread.
"""
# read write thread
readTh = ReadThread(self.serialCom,self.historyFile)
self.threads.append(readTh)
writeTh = WriteThread(self.inPs[0], self.serialCom, self.historyFile)
self.threads.append(writeTh)
|
class TinyUrl:
NAME = "tinyurl"
API_URL = "http://tinyurl.com/api-create.php"
CONTENT_TYPE = "application/json"
class BitLy:
NAME = 'bitly'
API_URL = "https://api-ssl.bitly.com/v4/shorten"
TOKEN = "Bearer 8ce4e9f7e73b0da7303a325b64bbb1c540e16a73"
CONTENT_TYPE = "application/json"
class Messages:
WRONG_PROVIDER_MSG = "Not supported Provider. Choose between bit.ly and tinyurl"
WRONG_PARAMETER_MSG = "Invalid parameters. Provide a <url> and optionally a <provider> parameter."
WRONG_URL_MSG = "Invalid Url. Please provide a valid url"
SUCCESS_MSG = "Successfully Shortened the URL"
class GeneralStrs:
UNKNOWN = "unknown"
AUTHORIZATION = "Authorization"
CONTENT_TYPE = "Content-Type" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
import imutils
import numpy as np
MIN_AREA_FOR_MOVEMENT = 1000 # Min size in pixels of a area where movement has been detected to count
MOVEMENT_THRESH = 20
DEBUG_MODE = True
class MovementDetectionService:
last_frame = None
def __init__(self):
print('[Movement MovementDetectionService]: Ready')
# Check if movement was detected within the frame
# Based on https://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/
def detect_movement(self, frame):
# List where all areas of movement will be collected
movements = []
if self.last_frame is None:
self.last_frame = frame.copy()
return movements
# The current and the last captured frame are compared to one another.
# Convert both frames to grey and blur them generously to prevent noise in the image to cause an erroneously
# detection.
grey_new = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2GRAY)
grey_old = cv2.cvtColor(self.last_frame, cv2.COLOR_BGR2GRAY)
grey_new = cv2.GaussianBlur(grey_new, (21, 21), 0)
grey_old = cv2.GaussianBlur(grey_old, (21, 21), 0)
# Compute the absolute difference between the current frame and first frame
frame_delta = cv2.absdiff(grey_old, grey_new)
# Now threshold the difference image
thresh = cv2.threshold(frame_delta, MOVEMENT_THRESH, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours on thresholded image
kernel = np.ones((21, 21), np.uint8)
dilate = cv2.dilate(thresh, kernel, iterations=5)
# Find contours in the thresholded image (Those are areas where movement has been detected)
contours = cv2.findContours(dilate.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
# Loop over the contours
for contour in contours:
# if the contour is too small, ignore it. Otherwise we accept it as an area where movement has been detected
if cv2.contourArea(contour) >= MIN_AREA_FOR_MOVEMENT:
(x, y, w, h) = cv2.boundingRect(contour)
movement = {'bounding_rect_x': x,
'bounding_rect_y': y,
'bounding_rect_width': w,
'bounding_rect_height': h}
movements.append(movement)
self.last_frame = frame.copy()
return movements
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 7 15:07:15 2020
@author: rolly
"""
import filetodb
fd = filetodb.Filetodb()
df = fd.openFile('VA SPP Genap 2019-2020.xlsx')
df = fd.cleanEmptyCell(df)
df = fd.checkSetHeader(df,'upload_id')
df = fd.joinDatetime(df,'expired_date','expired_time')
df = fd.fixEmail(df,'customer_email')
df = fd.cekEmailValid(df, 'customer_email')
df = fd.fixPhoneNumber(df,'customer_phone')
invalidemails = fd.getInvalidEmails(df,'customer_email')
unregisteredemail = fd.getUnregEmails()
unregisteredphone = fd.getUnregPhones()
if len(invalidemails) == 0 and len(unregisteredemail) == 0 and len(unregisteredphone) == 0:
fd.toDB(df)
print('ok')
else:
print('{ "invalid_phones" :')
print(unregisteredphone)
print(',')
print(' "invalid_emails" :')
print(unregisteredemail)
print('}')
# In[]
|
from flask import render_template, url_for, redirect, flash, request, Blueprint
from flask_login import login_user, current_user, logout_user, login_required
from blogapp import db, bcrypt
from blogapp.models import User, Post
from blogapp.users.forms import RegistrationForm, LoginForm, UpdateAccountForm, RequestResetForm, ResetPasswordForm
from blogapp.users.utils import save_picture, send_reset_email
users = Blueprint ('users', __name__)
@users.route ("/register", methods = ['GET', 'POST'])
def register ():
if current_user.is_authenticated:
return redirect (url_for ('main.home'))
form = RegistrationForm ()
if form.validate_on_submit ():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User (username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add (user)
db.session.commit()
flash ('Your Account has been Created! You are Now able to log in', 'success')
return redirect (url_for('users.login'))
return render_template ('register.html', title = 'Register', form=form)
@users.route ("/login", methods = ['GET', 'POST'])
def login ():
if current_user.is_authenticated:
return redirect (url_for ('main.home'))
form = LoginForm ()
if form.validate_on_submit ():
user = User.query.filter_by (email = form.email.data).first()
if user and bcrypt.check_password_hash (user.password, form.password.data):
login_user (user, remember=form.remember.data)
next_page = request.args.get ('next')
return redirect (next_page) if next_page else redirect (url_for ('main.home'))
else:
flash ('Unsuccessful login please check your password or Email and try again', 'danger')
return render_template ('login.html', title = 'Login', form=form)
@users.route ("/logout")
def logout ():
logout_user ()
return redirect (url_for ('main.home'))
@users.route ("/account", methods = ['GET', 'POST'])
@login_required
def account ():
form = UpdateAccountForm ()
if form.validate_on_submit ():
if form.picture.data:
picture_file = save_picture (form.picture.data)
current_user.image_file = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit ()
flash ('Your Account has been Updated!', 'success')
return redirect (url_for ('users.account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for ('static', filename='profile_pics/' + current_user.image_file)
return render_template ('account.html', title = 'Account', image_file=image_file, form=form)
@users.route ("/user/<string:username>")
def user_posts (username):
page = request.args.get ('page', 1, type = int)
user = User.query.filter_by (username=username).first_or_404()
posts = Post.query.filter_by(author=user).order_by(Post.date_posted.desc()).paginate (page = page, per_page = 5)
return render_template ('user_posts.html', posts=posts, user=user)
@users.route ("/reset_password", methods = ['GET', 'POST'])
def reset_request ():
if current_user.is_authenticated:
return redirect (url_for ('main.home'))
form = RequestResetForm ()
if form.validate_on_submit ():
user = User.query.filter_by (email = form.email.data).first ()
send_reset_email (user)
flash ('An Email has been sent with Instructions to Reset your Password.', 'info')
return redirect (url_for ('users.login'))
return render_template ('reset_request.html', title = 'Reset Password', form=form)
@users.route ("/reset_password/<token>", methods = ['GET', 'POST'])
def reset_token (token):
if current_user.is_authenticated:
return redirect (url_for ('main.home'))
user = User.verify_reset_token (token)
if user is None:
flash ('The token is Invalid or Expired', 'warning')
return redirect (url_for ('users.reset_request'))
form = ResetPasswordForm ()
if form.validate_on_submit ():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.password = hashed_password
db.session.commit()
flash ('Your Password has been Updated! You are Now able to log in', 'success')
return redirect (url_for('users.login'))
return render_template ('reset_token.html', title = 'Reset Password', form=form)
|
from rfai.dao.rfai_request_repository import generate_sub_query_for_update_parameters
from datetime import datetime as dt
class VoteDAO:
def __init__(self, repo):
self.repo = repo
def get_vote_details_for_given_request_id(self, request_id):
query_response = self.repo.execute(
"SELECT rfai_solution_id, COUNT(*) as vote_count FROM rfai_vote WHERE request_id = %s GROUP BY "
"rfai_solution_id", int(request_id))
return query_response
def get_vote_details_for_given_rfai_solution_id(self, rfai_solution_id):
query_response = self.repo.execute(
"SELECT COUNT(*) as votes FROM rfai_vote WHERE rfai_solution_id = %s", int(rfai_solution_id))
return query_response[0]
def get_votes_count_for_given_request(self, request_id):
query_response = self.repo.execute(
"SELECT COUNT(*) as vote_count FROM rfai_vote WHERE request_id = %s", int(request_id))
return query_response[0]
def add_vote(self, request_id, voter, rfai_solution_id, created_at):
query_response = self.repo.execute(
"INSERT INTO rfai_vote (request_id, voter, rfai_solution_id, created_at, "
"row_created, row_updated) "
"VALUES (%s, %s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)",
[request_id, voter, rfai_solution_id, created_at])
return query_response[0]
def update_vote_for_given_request_id(self, request_id, update_parameters):
sub_query, sub_query_values = generate_sub_query_for_update_parameters(update_parameters=update_parameters)
query_response = self.repo.execute("UPDATE rfai_vote SET " + update_parameters + " WHERE request_id = %s",
sub_query_values + [request_id])
return query_response[0]
def delete_vote_for_given_request_id(self, request_id):
query_response = self.repo.execute("DELETE FROM rfai_vote WHERE request_id = %s", request_id)
return query_response[0]
def create_or_update_vote(self, request_id, voter, rfai_solution_id, created_at):
query_response = self.repo.execute(
"INSERT INTO rfai_vote (request_id, voter, rfai_solution_id, created_at, "
"row_created, row_updated) VALUES (%s, %s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) ON DUPLICATE KEY "
"UPDATE request_id = %s, voter = %s, rfai_solution_id = %s, row_updated = CURRENT_TIMESTAMP",
[request_id, voter, rfai_solution_id, created_at, request_id, voter, rfai_solution_id])
return query_response[0]
|
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.views import View
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, DeleteView
from django.urls import reverse_lazy
from twitter.forms import TwitterAccountForm
from twitter.models import TwitterAccount
class TwitterAccountListView(PermissionRequiredMixin, ListView):
permission_required = 'twitter.view_twitteraccount'
model = TwitterAccount
template_name = 'twitter/list.html'
context_object_name = 'accounts'
class TwitterAccountDetail(PermissionRequiredMixin, DetailView):
permission_required = 'twitter.view_twitteraccount'
model = TwitterAccount
template_name = 'twitter/detail.html'
context_object_name = 'account'
class TwitterAccountCreate(PermissionRequiredMixin, CreateView):
permission_required = 'twitter.add_twitteraccount'
model = TwitterAccount
template_name = 'twitter/form.html'
form_class = TwitterAccountForm
class TwitterAccountDelete(PermissionRequiredMixin, DeleteView):
permission_required = 'twitter.delete_twitteraccount'
model = TwitterAccount
template_name = 'twitter/delete.html'
success_url = reverse_lazy('twitter:list')
context_object_name = 'account'
class FetchTweets(PermissionRequiredMixin, View):
permission_required = 'twitter.add_twitteraccount'
def post(self, request, pk):
twitter_account = get_object_or_404(TwitterAccount, pk=pk)
twitter_account.fetch_tweets()
return HttpResponse()
|
from glob import glob
import mmcv
import time
timer = mmcv.utils.Timer()
file_dir = '/Pseudo_Lidar_V2/waymo_kitti_converter/data/waymo/training/calib'
get_num = lambda : len(glob(file_dir+'/*'))
previous = get_num()
start = timer.start()
while True:
now = get_num()
speed = (now-previous)/timer.since_start()
print('Generating speed: {:0.2f} files/s'.format(speed))
# previous = now
time.sleep(2)
|
class Solution(object):
def groupThePeople(self, groupSizes):
"""
:type groupSizes: List[int]
:rtype: List[List[int]]
"""
from collections import defaultdict
groups = defaultdict(list)
def add_to_group(group, size, i):
last_group = group[-1]
if len(last_group) < size:
last_group.append(i)
else:
group.append([i])
for idx, g in enumerate(groupSizes):
if not groups[g]:
groups[g] = [[idx]]
else:
add_to_group(groups[g], g, idx)
result = []
for g in groups.values():
result.extend(g)
return result
s = Solution()
data = [
[3,3,3,3,3,1,3],
[2,1,3,3,3,2]
]
for d in data:
print s.groupThePeople(d)
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
from unittest import mock
import fixtures
from reno import config
from reno import defaults
from reno import main
from reno.tests import base
class TestConfig(base.TestCase):
EXAMPLE_CONFIG = """
collapse_pre_releases: false
"""
def setUp(self):
super(TestConfig, self).setUp()
# Temporary directory to store our config
self.tempdir = self.useFixture(fixtures.TempDir())
def test_defaults(self):
c = config.Config(self.tempdir.path)
actual = c.options
expected = {
o.name: o.default
for o in config._OPTIONS
}
self.assertEqual(expected, actual)
def test_override(self):
c = config.Config(self.tempdir.path)
c.override(
collapse_pre_releases=False,
)
actual = c.options
expected = {
o.name: o.default
for o in config._OPTIONS
}
expected['collapse_pre_releases'] = False
self.assertEqual(expected, actual)
def test_override_multiple(self):
c = config.Config(self.tempdir.path)
c.override(
notesdir='value1',
)
c.override(
notesdir='value2',
)
actual = c.options
expected = {
o.name: o.default
for o in config._OPTIONS
}
expected['notesdir'] = 'value2'
self.assertEqual(expected, actual)
def test_load_file_not_present(self):
missing = 'reno.config.Config._report_missing_config_files'
with mock.patch(missing) as error_handler:
config.Config(self.tempdir.path)
self.assertEqual(1, error_handler.call_count)
def _test_load_file(self, config_path):
with open(config_path, 'w') as fd:
fd.write(self.EXAMPLE_CONFIG)
self.addCleanup(os.unlink, config_path)
c = config.Config(self.tempdir.path)
self.assertEqual(False, c.collapse_pre_releases)
def test_load_file_in_releasenotesdir(self):
rn_path = self.tempdir.join('releasenotes')
os.mkdir(rn_path)
config_path = self.tempdir.join('releasenotes/config.yaml')
self._test_load_file(config_path)
def test_load_file_in_repodir(self):
config_path = self.tempdir.join('reno.yaml')
self._test_load_file(config_path)
def test_load_file_empty(self):
config_path = self.tempdir.join('reno.yaml')
with open(config_path, 'w') as fd:
fd.write('# Add reno config here')
self.addCleanup(os.unlink, config_path)
c = config.Config(self.tempdir.path)
self.assertEqual(True, c.collapse_pre_releases)
def test_get_default(self):
d = config.Config.get_default('notesdir')
self.assertEqual('notes', d)
def test_get_default_unknown(self):
self.assertRaises(
ValueError,
config.Config.get_default,
'unknownopt',
)
def _run_override_from_parsed_args(self, argv):
parser = argparse.ArgumentParser()
main._build_query_arg_group(parser)
args = parser.parse_args(argv)
c = config.Config(self.tempdir.path)
c.override_from_parsed_args(args)
return c
def test_override_from_parsed_args_empty(self):
c = self._run_override_from_parsed_args([])
actual = {
o.name: getattr(c, o.name)
for o in config._OPTIONS
}
expected = {
o.name: o.default
for o in config._OPTIONS
}
self.assertEqual(expected, actual)
def test_override_from_parsed_args_boolean_false(self):
c = self._run_override_from_parsed_args([
'--no-collapse-pre-releases',
])
actual = c.options
expected = {
o.name: o.default
for o in config._OPTIONS
}
expected['collapse_pre_releases'] = False
self.assertEqual(expected, actual)
def test_override_from_parsed_args_boolean_true(self):
c = self._run_override_from_parsed_args([
'--collapse-pre-releases',
])
actual = c.options
expected = {
o.name: o.default
for o in config._OPTIONS
}
expected['collapse_pre_releases'] = True
self.assertEqual(expected, actual)
def test_override_from_parsed_args_string(self):
c = self._run_override_from_parsed_args([
'--earliest-version', '1.2.3',
])
actual = c.options
expected = {
o.name: o.default
for o in config._OPTIONS
}
expected['earliest_version'] = '1.2.3'
self.assertEqual(expected, actual)
def test_override_from_parsed_args_ignore_non_options(self):
parser = argparse.ArgumentParser()
main._build_query_arg_group(parser)
parser.add_argument('not_a_config_option')
args = parser.parse_args(['value'])
c = config.Config(self.tempdir.path)
c.override_from_parsed_args(args)
self.assertFalse(hasattr(c, 'not_a_config_option'))
class TestConfigProperties(base.TestCase):
def setUp(self):
super(TestConfigProperties, self).setUp()
# Temporary directory to store our config
self.tempdir = self.useFixture(fixtures.TempDir())
self.c = config.Config('releasenotes')
def test_reporoot(self):
self.c.reporoot = 'blah//'
self.assertEqual('blah/', self.c.reporoot)
self.c.reporoot = 'blah'
self.assertEqual('blah/', self.c.reporoot)
def test_notespath(self):
self.assertEqual('releasenotes/notes', self.c.notespath)
self.c.override(notesdir='thenotes')
self.assertEqual('releasenotes/thenotes', self.c.notespath)
def test_template(self):
template = defaults.TEMPLATE.format(defaults.PRELUDE_SECTION_NAME)
self.assertEqual(template, self.c.template)
self.c.override(template='i-am-a-template')
self.assertEqual('i-am-a-template', self.c.template)
def test_prelude_override(self):
template = defaults.TEMPLATE.format(defaults.PRELUDE_SECTION_NAME)
self.assertEqual(template, self.c.template)
self.c.override(prelude_section_name='fake_prelude_name')
expected_template = defaults.TEMPLATE.format('fake_prelude_name')
self.assertEqual(expected_template, self.c.template)
def test_prelude_and_template_override(self):
template = defaults.TEMPLATE.format(defaults.PRELUDE_SECTION_NAME)
self.assertEqual(template, self.c.template)
self.c.override(prelude_section_name='fake_prelude_name',
template='i-am-a-template')
self.assertEqual('fake_prelude_name', self.c.prelude_section_name)
self.assertEqual('i-am-a-template', self.c.template)
|
import os
import streamlit as st
import pandas as pd
import numpy as np
import trends
INPUT_DATAFRAME = os.environ.get('INPUT_DATAFRAME')
st.title("Database trends for Oracle DBA")
st.markdown(
"""
This is a small demo of a database stats forecasting based on Oracle Cloud Control metrics
""")
@st.cache(persist=True)
def load_data():
data = pd.read_csv(INPUT_DATAFRAME)
data = data.set_index('rollup_timestamp')
return data
data = load_data()
metrics_dict = {'size_gd': 'Database size in Gb per day',
'dbtime': 'DB time per day',
'redosize_mb': 'Redo size in Mb per day'}
input_database = st.sidebar.selectbox(
'Select database id for analysis',
data.database.unique())
input_metric = st.sidebar.selectbox('Select metric for analysis', list(metrics_dict.keys()), format_func=lambda x: metrics_dict[x])
sample = trends.SampleClass(input_database, input_metric)
sample.preprocess_data()
st.write(f"You selected {input_database} database")
if st.checkbox('Show raw data'):
st.subheader('Raw data')
st.write(sample._get_df())
st.write(f"Please select buttons from left sidebar for various tools!")
if st.sidebar.button("Graph for raw data"):
st.title("Graph for raw data:")
sample.raw_data_plot()
days = st.sidebar.slider('Rolling window size in days for averaging', 1, 10, 1)
if st.sidebar.button("Graph with moving average"):
st.title("Graph with moving average:")
sample.moving_average_plot(days)
if st.sidebar.button("Graph for decomposition"):
st.title("Graph for decomposition:")
sample.decomposition_plot()
period = st.sidebar.slider('Prediction period', -30, 90, 30)
if st.sidebar.button("Build model and predict"):
st.title("Graph for actual data and model prediction:")
sample.split_data_by_period(period)
sample.machine_learning()
sample.make_compasion()
sample.show_forecast()
if period < 0:
st.write('Calculated errors for this model:')
for err_name, err_value in sample.prophet.calculate_forecast_errors(sample.cmp_df, sample.period_past).items():
st.write(err_name, err_value)
st.subheader('Raw data')
st.write(sample.forecast)
|
import sys
import pytest
from clime import clime
from docs_src.integers_floats_and_other_simple_classes.tutorial_001 import main, Dude
def test_help(set_cli_sys_argv, capsys):
"""
no args with when one ar is mandatory will fail on argparser
"""
sys.argv.append("-h")
with pytest.raises(SystemExit):
main()
captured = capsys.readouterr()
assert (
"positional arguments:\n age_in_years type: <int>" in captured.out
) # note that default arguments keep there underscores
# should look like this:
"""
usage: tutorial_001.py [-h] age_in_years
positional arguments:
age_in_years type: <int>
optional arguments:
-h, --help show this help message and exit
"""
def test_args_inputted_properly(set_cli_sys_argv, capsys):
"""
no args with when one ar is mandatory will fail on argparser
"""
sys.argv.append("7")
main()
captured = capsys.readouterr()
assert captured.out == "I am 7 years old.\n"
def test_cli_input_equals_pure_input():
from_cli = clime(Dude, args=["1"])
from_python = Dude(1)
assert from_cli == from_python
|
from fate_manager.utils.base_utils import current_timestamp
from fate_manager.db.db_models import TokenInfo, AccountInfo, FateSiteInfo, FateUserInfo, AccountSiteInfo
from fate_manager.entity import item
from fate_manager.entity.status_code import UserStatusCode, SiteStatusCode
from fate_manager.entity.types import ActivateStatus, UserRole, PermissionType, IsValidType, RoleType, SiteStatusType
from fate_manager.operation.db_operator import SingleOperation
from fate_manager.operation.db_operator import DBOperator
from fate_manager.service.site_service import get_other_site_list
from fate_manager.settings import user_logger as logger
from fate_manager.utils.request_cloud_utils import request_cloud_manager
def get_user_info(token):
token_infos = DBOperator.query_entity(TokenInfo, token=token)
if not token_infos:
raise Exception(UserStatusCode.NoFoundToken, "token does not exist, please login again")
token_info = token_infos[0]
accounts = DBOperator.query_entity(AccountInfo, status=ActivateStatus.YES, user_name=token_info.user_name)
if not accounts:
raise Exception(UserStatusCode.NoFoundAccount, "no found account")
account = accounts[0]
role = {
"roleId": account.role,
"roleName": UserRole.to_str(account.role)
}
permission_list = []
for permission_id in account.permission_list:
permission_list.append({
"PermissionId": int(permission_id),
"PermissionName": PermissionType.to_str(int(permission_id))
})
return {"permissionList": permission_list, "role": role, "userName": account.user_name, "userId":""}
def get_user_list(request_data):
user_list = SingleOperation.get_user_list(request_data.get("context"))
return [{"userId": user.user_id, "userName": user.user_name} for user in user_list]
def get_user_access_list(request_data):
logger.info(f"request data: {request_data}")
query_info = {"status": IsValidType.YES}
if request_data.get("userName"):
query_info["user_name"] = request_data.get("userName")
if request_data.get("roleId"):
query_info["role"] = request_data.get("roleId")
account_list = DBOperator.query_entity(AccountInfo, **query_info)
logger.info(f"account list: {account_list}")
if request_data.get("partyId"):
account_site_list = DBOperator.query_entity(AccountSiteInfo, **{"party_id": request_data.get("partyId")})
account_list_temp = []
for account_site in account_site_list:
for account in account_list:
if account.user_name == account_site.user_name and account.fate_manager_id == account_site.fate_manager_id:
account_list_temp.append(account)
account_list = account_list_temp
data = []
for account in account_list:
permission_pair_list = []
for permission_id in account.permission_list:
permission_pair_list.append({
"permissionId": int(permission_id),
"permissionName": PermissionType.to_str(int(permission_id))
})
account_site_list = DBOperator.query_entity(AccountSiteInfo, **{"user_name": account.user_name})
account_site = account_site_list[0] if account_site_list else None
user_access_list_item = {
"userId": "",
"userName": account.user_name,
"site": item.SitePair(partyId=account_site.party_id, siteName=account_site.site_name).to_dict() if account_site else {},
"role": item.Role(roleId=account.role, roleName=UserRole.to_str(account.role)).to_dict(),
"cloudUser": True if account.cloud_user else False,
"permissionList": permission_pair_list,
"creator": account.creator,
"createTime": account.create_time
}
data.append(user_access_list_item)
return data
def add_user(request_data, token):
token_info_list = DBOperator.query_entity(TokenInfo, **{"token": token})
user_name = token_info_list[0].user_name
request_account = DBOperator.query_entity(AccountInfo, **{"user_name": user_name, "status": IsValidType.YES})[0]
logger.info(f"request data: {request_data}")
if request_data.get("roleId") == UserRole.BUSINESS and request_data.get("partyId", 0) == 0:
raise Exception(UserStatusCode.AddUserFailed, f"add user failed:role id {request_data.get('roleId')} party id"
f"{request_data.get('partyId')}")
logger.info("start check user")
account_info_list = SingleOperation.check_user(user_name=request_data.get("userName"))
if account_info_list and account_info_list[0].status == IsValidType.YES:
raise Exception(UserStatusCode.AddUserFailed, f'check user failed: user {request_data.get("userName")} '
f'already exists')
logger.info("check user success")
account_info = {
"user_id": request_data.get("userId"),
"user_name": request_data.get("userName", ""),
"role": request_data.get('roleId'),
"creator": request_data.get("creator"),
"status": IsValidType.YES,
"permission_list": request_data.get("permissionList"),
"institutions": request_data.get("institution", request_account.institutions),
"fate_manager_id": request_account.fate_manager_id,
"active_url": request_account.active_url,
"app_key": request_account.app_key,
"app_secret": request_account.app_secret
}
DBOperator.safe_save(AccountInfo, account_info)
if request_data.get("partyId", 0):
account_site_info = {
"party_id": request_data.get("partyId", 0),
"user_name": request_data.get("userName", ""),
"fate_manager_id": request_account.fate_manager_id,
"site_name": request_data.get("siteName")
}
DBOperator.create_entity(AccountSiteInfo, account_site_info)
def delete_user(token, request_data):
token_info_list = DBOperator.query_entity(TokenInfo, **{"token": token})
if not token_info_list:
raise Exception(UserStatusCode.NoFoundToken, f"no found token: {token}")
token_info = token_info_list[0]
account_info = {"user_name": request_data.get("userName")}
account_info_list = DBOperator.query_entity(AccountInfo, **account_info)
if not account_info_list:
raise Exception(UserStatusCode.NoFoundUser, "no found user")
account = account_info_list[0]
if account.cloud_user:
raise Exception(UserStatusCode.DeleteUserFailed, f"Cloud User {request_data.get('userName')} Could Not Be Delete")
if account.user_name == token_info.user_name:
raise Exception(UserStatusCode.DeleteUserFailed, "Could Not Be Delete Self")
account_info["status"] = IsValidType.NO
DBOperator.update_entity(AccountInfo, account_info)
token_info = {
"user_name": request_data.get("userName"),
"expire_time": current_timestamp()
}
DBOperator.update_entity(TokenInfo, token_info)
try:
DBOperator.delete_entity(AccountSiteInfo, **{"user_name": request_data.get("userName")})
except:
logger.info("account no found site")
def edit_user(request_data):
account_info_list = SingleOperation.check_user(user_name=request_data.get("userName"))
if not account_info_list:
raise Exception(UserStatusCode.CheckUserFailed, f'check user failed: request_data.get("userName")')
is_admin = False
for account in account_info_list:
if account.cloud_user and account.fate_manager_id:
is_admin = True
break
if is_admin:
raise Exception(UserStatusCode.EditUserFailed, f"Admin User Could Not Edite")
account_info = {
"user_name": request_data.get("userName"),
"role": request_data.get("roleId"),
"site_name": request_data.get("siteName"),
"permission_list": request_data.get("permissionList"),
"update_time": current_timestamp()
}
logger.info(f"update accont info: {account_info}")
DBOperator.update_entity(AccountInfo, account_info)
if request_data.get("roleId") == UserRole.BUSINESS and request_data.get("partyId"):
account_site_info = {"user_name": request_data.get("userName"),
"fate_manager_id": account.fate_manager_id,
"party_id": request_data.get("partyId"),
"site_name": request_data.get("siteName")}
logger.info(f"save account info: {account_site_info}")
DBOperator.safe_save(AccountSiteInfo, account_site_info)
else:
try:
DBOperator.delete_entity(AccountSiteInfo, **{"user_name": request_data.get("userName")})
except:
logger.info("account no found site")
def get_user_site_list():
federated_info_list = SingleOperation.get_federated_info()
site_info_list = DBOperator.query_entity(FateSiteInfo, **{"federated_id": federated_info_list[0].federated_id})
data = []
for site in site_info_list:
if site.status != SiteStatusType.JOINED:
continue
data.append(item.SitePair(partyId=site.party_id, siteName=site.site_name).to_dict())
return data
def get_site_info_user_list(request_data):
user_info_list = DBOperator.query_entity(AccountSiteInfo, **{"party_id": request_data["partyId"]})
data = []
for account in user_info_list:
account_info = DBOperator.query_entity(AccountInfo, **{"user_name": account.user_name})[0]
permission_list = account_info.permission_list
permission_str = ""
for permission in permission_list:
permission_name = PermissionType.to_str(int(permission))
permission_str += permission_name + ";"
data.append({
"userName": account_info.user_name,
"role": UserRole.to_str(account_info.role),
"permission": permission_str
})
return data
def get_login_user_manager_list(request_data):
account_info_list = DBOperator.query_entity(AccountInfo, **{"user_name": request_data.get("userName"),
"status": IsValidType.YES})
if not account_info_list:
raise Exception(UserStatusCode.NoFoundAccount, f"no found account by user name {request_data.get('userName')}, "
f"status {IsValidType.YES}")
site_list = DBOperator.query_entity(FateSiteInfo, **{"status": SiteStatusType.JOINED})
data = []
for site in site_list:
if site.fate_flow_info:
data.append({
"partyId": site.party_id,
"siteName": site.site_name,
"role": {"code": site.role, "desc": RoleType.to_str(site.role)},
"address": "http://" + site.fate_flow_info
})
return data
def sublogin(request_data):
user_info_list = DBOperator.query_entity(FateUserInfo, **{"user_name": request_data.get("userName"),
"status": IsValidType.YES})
if not user_info_list:
raise Exception(UserStatusCode.NoFoundUser, f"no found account by user name {request_data.get('userName')}, "
f"status {IsValidType.YES}")
account_info_list = DBOperator.query_entity(AccountInfo, **{
"user_name": request_data.get("userName"),
"status": IsValidType.YES
})
if not account_info_list:
raise Exception(UserStatusCode.NoFoundAccount, f"no found account by user name {request_data.get('userName')}, "
f"status {IsValidType.YES}")
account_info = account_info_list[0]
site_info_list = DBOperator.query_entity(FateSiteInfo, **{"party_id": request_data.get("partyId"),
"status": SiteStatusType.JOINED})
if not site_info_list:
raise Exception(SiteStatusCode.NoFoundSite, 'no found site')
resp = {
"partyId": request_data.get("partyId"),
"siteName": site_info_list[0].site_name,
"roleId": site_info_list[0].role,
"roleName": RoleType.to_str(site_info_list[0].role)
}
return resp
def change_login(request_data):
account_info_list = DBOperator.query_entity(AccountInfo, **{"user_name": request_data.get("userName"),
"status": IsValidType.YES})
if not account_info_list:
raise Exception(UserStatusCode.NoFoundUser, f"no found account by user name {request_data.get('userName')}, "
f"status {IsValidType.YES}, party id {request_data.get('partyId')}")
account_info = account_info_list[0]
site_info_list = DBOperator.query_entity(FateSiteInfo, **{"party_id": request_data.get('partyId'),
"status": SiteStatusType.JOINED})
if not site_info_list:
raise Exception(SiteStatusCode.NoFoundSite, 'no found site')
resp = {
"partyId": request_data.get("partyId"),
"siteName": site_info_list[0].site_name,
"roleId": site_info_list[0].role,
"roleName": RoleType.to_str(site_info_list[0].role)
}
return resp
def permission_authority(request_data):
account = SingleOperation.get_admin_info()
logger.info("start request cloud CheckPartyUri")
institution_signature_item = item.InstitutionSignatureItem(fateManagerId=account.fate_manager_id,
appKey=account.app_key,
appSecret=account.app_secret).to_dict()
resp = request_cloud_manager(uri_key="CheckPartyUri", data=institution_signature_item,
body={"institutions": account.institutions,
"partyId": request_data.get("partyId")},
url=None)
logger.info(f"request cloud success:{resp}")
def get_allow_party_list(request_data):
federated_item_list = get_other_site_list(request_data)
if not request_data.get("roleName"):
return federated_item_list
data = []
for federated_item in federated_item_list:
tag = True
for site_item in federated_item.get("siteList"):
if request_data.get("roleName") == site_item.get("role").get("desc"):
tag = False
break
if tag:
data.append(federated_item) |
from enum import IntEnum
class Node(IntEnum):
POTENTIAL = 0
TIME = 1
ISVALID = 2
|
from flask import Flask, request, make_response, Response
from flask_restful import Api, Resource, reqparse
import time, json, os, yaml, io
import tarfile
from werkzeug.utils import secure_filename
from constants import queue
file_error_string = '''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<error>
<code>1308</code>
<description>Given app package file is invalid: Unsupported Format</description>
</error>'''
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in set(["gz", "tar"])
class Applications(Resource):
# /api/v1/appmgr/localapps/upload
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('x-token-id', location='headers')
parser.add_argument("x-publish-on-upload", location="headers")
args = parser.parse_args()
file = request.files['file']
if 'file' not in request.files or request.files["file"].filename == '' or not (file and allowed_file(file.filename)):
# Thank you CISCO for returning an XML here...
return file_error_string, 400, {"Content-Type": "application/xml"} # TODO: resolve output, not XML but string now
filename = secure_filename(file.filename)
uploadDir = "./fileApplication"
if not os.path.exists(uploadDir):
os.makedirs(uploadDir)
path = os.path.join(uploadDir, filename)
file.save(path)
identifier = queue.add_for_processing(("Applications", "post"), args, request, uploadDir, filename)
return queue.get_result(identifier)
# /api/v1/appmgr/localapps/<appid>:<appversion>
def put(self, appURL):
parser = reqparse.RequestParser()
parser.add_argument('x-token-id', location='headers')
args = parser.parse_args()
data = request.json
identifier = queue.add_for_processing(("Applications", "put"), args, data, appURL)
return queue.get_result(identifier)
# /api/v1/appmgr/apps/<appid> <-- WTF? Why apps and not localapps?!!! CISCOOOOO!!!!
def delete(self, appURL):
parser = reqparse.RequestParser()
parser.add_argument('x-token-id', location='headers')
parser.add_argument('x-unpublish-on-delete', location='headers')
args = parser.parse_args()
identifier = queue.add_for_processing(("Applications", "delete"), args, appURL)
return queue.get_result(identifier)
# /api/v1/appmgr/localapps/ Undocumented but works!
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('x-token-id', location='headers')
parser.add_argument("limit")
args = parser.parse_args()
identifier = queue.add_for_processing(("Applications", "get"), args)
return queue.get_result(identifier)
|
#! /usr/bin/python
import sys
import socket
import threading
import re
import getopt
REQUEST = False
RESPONSE = False
OUTFILE = False
INTERCEPT = False
def main():
global REQUEST
global RESPONSE
global OUTFILE
global INTERCEPT
local_address = False
if len(sys.argv[1:]) < 1:
usage()
try:
opts, args = getopt.getopt(sys.argv[1:], 'hl:q:s:o:i', ['help', 'listener_address', 'request', 'response', 'output', 'intercept'])
except getopt.GetoptError as err:
print str(err)
usage()
for o, a in opts:
if o in ('-h', '--help'):
usage()
elif o in ('-l', '--listener-address'):
local_address = a.split(':')
local_host = local_address[0]
local_port = int(local_address[1])
elif o in ('-q', '--request'):
REQUEST = a
elif o in ('-s', '--response'):
RESPONSE = a
elif o in ('-o', '--output'):
OUTFILE = a
elif o in ('-i', '--intercept'):
INTERCEPT = True
else:
assert False,'Unhandled Option'
if local_address == False:
print "Please specify the -l option"
usage()
try:
server_loop(local_host, local_port)
except KeyboardInterrupt:
print "[!!] Exiting.."
exit(0)
def usage():
print "BHP Proxy Modified"
print
print "Usage: ./bproxym.py -l listener_address [options]"
print "-h --help - displays this usage text"
print "-l --listener-address - listen on host:port for incoming connections"
print "-q --request - use this to specify http request options. supported arguments are: hex(displays the http request headers in hexadecimal), show(displays the http request headers), edit(gives the user an opportunity to edit the http request before the request is forwarded)"
print "-s --response - use this to specify http response options. supported arguments are: hex(displays the http response headers bytes in hexadecimal), show(displays the http response headers), edit(gives the user an opportunity to edit the http response before the response is forwarded)"
print "-o --output - write the request headers to a file. the argument is the name of the file to save to"
print "-i --intercept - intercept the http request/response. to specify an output file for a single request/response enter '-o/--output test.txt' into the prompt"
print
print
print "Examples: "
print "./bproxym -l 127.0.0.1:9000 (listens on local port 9000 and forwards http traffic)"
print "./bproxym -l 127.0.0.1:9000 -q edit (listens on local port 9000 and gives the user an opportunity to edit the http request before forwarding the traffic)"
print "./bproxym -l 127.0.0.1:9000 -q edit -s show (listens on local port 9000, gives the user an opportunity to edit the http request before forwarding the traffic and then displays the http response)"
print "./bproxym -l 127.0.0.1:8000 -i (listens on local port 8000, intercepts all http requests/responses and waits for user input. hitting the return key forwards the http request/response)"
sys.exit(0)
def server_loop(local_host, local_port):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
server.bind((local_host, local_port))
except:
print "[!!] Failed to listen on %s:%d" % (local_host, local_port)
print "[!!] Check for other listening sockets or correct permissions."
sys.exit(0)
print "[*] Listening on %s:%d" % (local_host, local_port)
server.listen(5)
while True:
client_socket, addr = server.accept()
print "[==>] Received incoming connection from %s:%d" % (addr[0], addr[1])
proxy_thread = threading.Thread(target=proxy_handler, args=(client_socket, addr))
proxy_thread.start()
def proxy_handler(client_socket, addr):
global RESPONSE_HEADERS
global OUTFILE
global INTERCEPT
file_name = OUTFILE
remote_port = 80
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
data = receive_from(client_socket)
if data:
remote_host, remote_port = get_remote(data)
try:
remote_socket.connect((remote_host, remote_port))
except:
pass
local_buffer = data
if len(local_buffer):
print "[==>] Received %d bytes from %s(localhost).\n" % (len(local_buffer), addr[0])
local_buffer = request_handler(local_buffer)
if file_name:
write_file(file_name, local_buffer)
try:
remote_socket.send(local_buffer)
except Exception as ex: # ignore background traffic
print ex
print "[==>] Sent to %s(remote)." % remote_host
remote_buffer = receive_from(remote_socket)
if len(remote_buffer):
print "[<==] Received %d bytes from %s(remote).\n" % (len(remote_buffer), remote_host)
remote_buffer = response_handler(remote_buffer)
client_socket.send(remote_buffer)
print "[<==] Sent to %s(localhost)." % addr[0]
if not len(local_buffer) or not len(remote_buffer):
client_socket.close()
remote_socket.close()
print "[*] No more data. Closing connections."
break
def receive_from(connection):
buffer = ""
connection.settimeout(2)
try:
while True:
data = connection.recv(8192)
if not data:
break
buffer += data
except:
pass
return buffer
def request_handler(buffer):
if REQUEST:
if REQUEST == 'hex':
hexdump(buffer)
elif REQUEST == 'show':
show_request_headers(buffer)
if INTERCEPT:
intercept(buffer)
return buffer
def response_handler(buffer):
if RESPONSE:
if RESPONSE == 'hex':
hexdump(buffer)
elif RESPONSE == 'show':
show_response_headers(buffer)
if INTERCEPT:
intercept(buffer)
return buffer
def get_headers(buffer):
try:
headers_raw = buffer[:buffer.index("\r\n\r\n")+2]
# headers = dict(re.findall(r"(?P<name>.*?): (?P<value>.*?)\r\n", headers_raw))
except:
return None
return headers_raw
def get_remote(buffer):
remote_port = 80
try:
headers = dict(re.findall(r"(?P<name>.*?): (?P<value>.*?)\r\n", get_headers(buffer)))
except:
return None
if headers:
host_addr = headers['Host'].split(':')
remote_host = host_addr[0]
if len(host_addr) > 1:
remote_port = int(host_addr[1])
return remote_host, remote_port
def write_file(outfile, buffer):
global OUTFILE
headers = get_headers(buffer)
try:
file_descriptor = open(outfile, 'a')
file_descriptor.write(headers + '\n')
file_descriptor.close()
OUTFILE = False
except OSError as err:
print err
def hexdump(src, length=16):
result = []
digits = 4 if isinstance(src, unicode) else 2
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = b' '.join(["%0*X" % (digits, ord(x)) for x in s])
text = b''.join([x if 0x20 <= ord(x) < 0x7f else b'.' for x in s])
result.append(b" %04X %-*s %s" % (i, length*(digits + 1), hexa, text))
print b'\n'.join(result) + '\n'
return src
def intercept(src):
while True:
command = raw_input('>>> ').split()
if command == []:
break
elif command[0] == '-o' or command[0] == '--output':
try:
file_descriptor = open(command[1], 'w')
file_descriptor.write(src)
file_descriptor.close()
break
except Exception as ex:
print ex
break
else:
print "[!!] Unsupported/unknown command. Commands currently supported are:\n -o/--output <file name>"
return src
def show_response_headers(src):
headers = get_headers(src).split('\r\n')
for header in headers:
print " " + header
return src
def show_request_headers(src):
headers = get_headers(src).split('\r\n')
for header in headers:
print " " + header
return src
main()
|
firstnum = input("Enter first number : ")
secondnum = input("Enter second number : ")
try :
firstnum = int(firstnum)
secondnum = int(secondnum)
if secondnum <= 0:
print("Second number must be greater than zero")
elif secondnum < 1 or secondnum > 10:
print("Second number must be between 0-10")
else:
print(firstnum,"divided by",secondnum)
print(firstnum/secondnum)
except ValueError:
print("Please Enter number only")
|
from os import environ
import boto3
from botocore.exceptions import ClientError
from .pogfs import Pogfs
BUCKET_NAME = environ.get('S3_BUCKET_NAME')
class s3fs(Pogfs):
def __init__(self, bucket_name=None, **kwargs):
self.bucket_name = bucket_name or BUCKET_NAME
def exists(self, remote_path):
resource_s3 = boto3.resource('s3')
try:
resource_s3.Object(self.bucket_name, remote_path).load()
return True
except ClientError as e:
if e.response['Error']['Code'] == "404":
return False
else:
raise
def upload_file(self, local_path, remote_path):
s3 = boto3.client('s3')
s3.upload_file(local_path, self.bucket_name, remote_path)
def download_file(self, local_path, remote_path):
s3 = boto3.client('s3')
s3.download_file(self.bucket_name, remote_path, local_path)
def remove_file(self, remote_path):
s3 = boto3.client('s3')
s3.delete_object(Bucket=self.bucket_name, Key=remote_path)
def list_files(self, remote_path='', pattern=None, recursive=False):
s3 = boto3.client('s3')
pager = s3.get_paginator("list_objects_v2")
kwargs = {
'Bucket': self.bucket_name,
'Prefix': remote_path,
}
if not recursive:
kwargs['Delimiter'] = '/'
for p in pager.paginate(**kwargs):
for d in p.get('CommonPrefixes', []):
yield d['Prefix']
for f in p.get('Contents', []):
filename = f['Key']
if pattern and not self._match(filename, pattern):
continue
yield filename
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Han"
__email__ = "liuhan132@foxmail.com"
import os
import sys
import yaml
import json
import torch.multiprocessing
from torch.utils.tensorboard import SummaryWriter
import logging
import logging.config
from utils.functions import set_seed
logger = logging.getLogger(__name__)
def init_env(config_path, in_infix, out_infix, writer_suffix, gpuid):
logger.info('loading config file...')
game_config = read_config(config_path, in_infix, out_infix)
# config in logs
logger.debug(json.dumps(game_config, indent=2))
# set multi-processing: bugs in `list(dataloader)`
# see more on `https://github.com/pytorch/pytorch/issues/973`
torch.multiprocessing.set_sharing_strategy('file_system')
# set random seed
set_seed(game_config['global']['random_seed'])
# gpu
enable_cuda = torch.cuda.is_available() and gpuid is not None
device = torch.device("cuda" if enable_cuda else "cpu")
if enable_cuda:
torch.cuda.set_device(gpuid)
torch.backends.cudnn.deterministic = True
logger.info("CUDA #{} is avaliable".format(gpuid)
if enable_cuda else "CUDA isn't avaliable")
# summary writer
writer = SummaryWriter(log_dir=game_config['checkpoint'][writer_suffix])
return game_config, enable_cuda, device, writer
def init_logging(config_path='config/logging_config.yaml', out_infix='default'):
"""
initial logging module with config
:param out_infix:
:param config_path:
:return:
"""
try:
with open(config_path, 'r') as f:
config = yaml.load(f.read(), Loader=yaml.Loader)
out_prefix = 'outputs/' + out_infix + '/'
if not os.path.exists(out_prefix):
os.makedirs(out_prefix)
config['handlers']['info_file_handler']['filename'] = out_prefix + 'debug.log'
config['handlers']['time_file_handler']['filename'] = out_prefix + 'debug.log'
config['handlers']['error_file_handler']['filename'] = out_prefix + 'error.log'
logging.config.dictConfig(config)
except IOError:
sys.stderr.write('logging config file "%s" not found' % config_path)
logging.basicConfig(level=logging.DEBUG)
def read_config(config_path='config/game_config.yaml', in_infix='default', out_infix='default'):
"""
store the global parameters in the project
:param in_infix:
:param out_infix:
:param config_path:
:return:
"""
try:
with open(config_path, 'r') as f:
config = yaml.load(f.read(), Loader=yaml.Loader)
out_prefix = 'outputs/' + out_infix + '/'
if not os.path.exists(out_prefix):
os.makedirs(out_prefix)
in_prefix = 'outputs/' + in_infix + '/'
# assert os.path.exists(in_prefix)
checkpoint = {'dialog_data_path': out_prefix + 'dialog_data.json',
'false_dialog_data_path': out_prefix + 'dialog_data_false.json',
'main_log_path': out_prefix + 'main_logs',
'state_log_path': out_prefix + 'state_logs',
'in_state_weight_path': in_prefix + 'state_weight.pt',
'in_state_checkpoint_path': in_prefix + 'state_checkpoint',
'out_state_weight_path': out_prefix + 'state_weight.pt',
'out_state_checkpoint_path': out_prefix + 'state_checkpoint',
'policy_log_path': out_prefix + 'policy_logs',
'in_policy_weight_path': in_prefix + 'policy_weight.pt',
'in_policy_checkpoint_path': in_prefix + 'policy_checkpoint',
'out_policy_weight_path': out_prefix + 'policy_weight.pt',
'out_policy_checkpoint_path': out_prefix + 'policy_checkpoint',
'mrc_log_path': out_prefix + 'mrc_logs',
'in_mrc_weight_path': in_prefix + 'mrc_weight.pt',
'in_mrc_checkpoint_path': in_prefix + 'mrc_checkpoint',
'out_mrc_weight_path': out_prefix + 'mrc_weight.pt',
'out_mrc_checkpoint_path': out_prefix + 'mrc_checkpoint',
'pt_log_path': out_prefix + 'pt_logs',
'in_pt_weight_path': in_prefix + 'pt_weight.pt',
'in_pt_checkpoint_path': in_prefix + 'pt_checkpoint',
'out_pt_weight_path': out_prefix + 'pt_weight.pt',
'out_pt_checkpoint_path': out_prefix + 'pt_checkpoint'}
config['checkpoint'] = checkpoint
# add prefix to dataset path
data_prefix = config['dataset']['data_prefix']
config['dataset']['vocab_path'] = data_prefix + config['dataset']['vocab_path']
config['dataset']['embedding_path'] = data_prefix + config['dataset']['embedding_path']
config['dataset']['doc_id_path'] = data_prefix + config['dataset']['doc_id_path']
config['dataset']['cand_doc_path'] = data_prefix + config['dataset']['cand_doc_path']
return config
except IOError:
sys.stderr.write('logging config file "%s" not found' % config_path)
exit(-1)
|
import logging
from webapp import create_app, db
def create():
logger = logging.getLogger(__name__)
try:
db.create_all(app=create_app())
logger.info('Database and table were created;')
except Exception:
logger.exception('The database creation failed.')
|
from __future__ import print_function
import unittest
import numpy as np
from openmdao.api import Problem, IndepVarComp, Group
from openmdao.utils.assert_utils import assert_rel_error, assert_check_partials
from CADRE.temperature_rate_collect_comp import TemperatureRateCollectComp
class TestTemperatureRateCollect(unittest.TestCase):
@classmethod
def setUpClass(cls):
nn = cls.nn = 6
cls.p = Problem(model=Group())
ivc = cls.p.model.add_subsystem('ivc', IndepVarComp(), promotes_outputs=['*'])
ivc.add_output('dXdt:T_bat', val=np.ones(nn,))
ivc.add_output('dXdt:T_fins', val=np.ones((nn, 4)))
cls.p.model.add_subsystem('temperature_rate_collect',
TemperatureRateCollectComp(num_nodes=nn),
promotes_inputs=['*'], promotes_outputs=['*'])
cls.p.setup(check=True, force_alloc_complex=True)
cls.p['dXdt:T_bat'] = np.random.rand(nn)
cls.p['dXdt:T_fins'] = np.random.rand(nn, 4)
cls.p.run_model()
def test_results(self):
expected = np.zeros((self.nn, 5))
expected[:, :4] = self.p['dXdt:T_fins']
expected[:, 4] = self.p['dXdt:T_bat']
assert_rel_error(self, self.p['dXdt:temperature'], expected)
def test_partials(self):
np.set_printoptions(linewidth=1024)
cpd = self.p.check_partials(method='cs')
assert_check_partials(cpd)
|
from src.tesla.Model3.body import Sedan
from src.baseClasses.virtualvehicle import Vehicle
class Model3Performance(Vehicle):
name = "Tesla Model 3 Performance"
def __init__(self):
# body of the constructor
self.body = Sedan()
def simulateStep(self, speed_meters_per_second, acceleration_meters_per_square_second=0, timestep_seconds=1):
print(f'Simulating {self.name}: v={speed_meters_per_second} m/s; a={acceleration_meters_per_square_second} m/s^2; delta_t={timestep_seconds} s')
return super().simulateStep(speed_meters_per_second, acceleration_meters_per_square_second=acceleration_meters_per_square_second, timestep_seconds=timestep_seconds)
|
import math
import torch
import torch.nn as nn
import random
import numpy as np
import torch.nn.functional as F
import argparse
import os
import shutil
import time
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.autograd import Variable
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
#if device.type == "cuda":
# print(torch.cuda.get_device_name(0))
# print("Memory Usage")
# print("Allocated: "
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
self.softmax = nn.Softmax()
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
out = self.fc(out)
out = self.softmax(out)
return out
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def metrics_print(net,expert_fn, n_classes, loader):
'''
Computes metrics for deferal
-----
Arguments:
net: model
expert_fn: expert model
n_classes: number of classes
loader: data loader
'''
correct = 0
correct_sys = 0
exp = 0
exp_total = 0
total = 0
real_total = 0
alone_correct = 0
with torch.no_grad():
for data in loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
batch_size = outputs.size()[0] # batch_size
exp_prediction = expert_fn(images, labels)
for i in range(0, batch_size):
r = (predicted[i].item() == n_classes)
prediction = predicted[i]
if predicted[i] == n_classes:
max_idx = 0
# get second max
for j in range(0, n_classes):
if outputs.data[i][j] >= outputs.data[i][max_idx]:
max_idx = j
prediction = max_idx
else:
prediction = predicted[i]
alone_correct += (prediction == labels[i]).item()
if r == 0:
total += 1
correct += (predicted[i] == labels[i]).item()
correct_sys += (predicted[i] == labels[i]).item()
if r == 1:
exp += (exp_prediction[i] == labels[i].item())
correct_sys += (exp_prediction[i] == labels[i].item())
exp_total += 1
real_total += 1
cov = str(total) + str(" out of") + str(real_total)
to_print = {"coverage": cov, "system accuracy": 100 * correct_sys / real_total,
"expert accuracy": 100 * exp / (exp_total + 0.0002),
"classifier accuracy": 100 * correct / (total + 0.0001),
"alone classifier": 100 * alone_correct / real_total}
print(to_print, flush=True)
return to_print
def metrics_print_baseline(net_class, expert_fn, n_classes, loader):
correct = 0
correct_sys = 0
exp = 0
exp_total = 0
total = 0
real_total = 0
with torch.no_grad():
for data in loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs_class = net_class(images)
_, predicted = torch.max(outputs_class.data, 1)
batch_size = outputs_class.size()[0] # batch_size
exp_prediction = expert_fn(images, labels)
for i in range(0, batch_size):
r = (exp_prediction[i] == labels[i].item())
if r == 0:
total += 1
prediction = predicted[i]
if predicted[i] == n_classes:
max_idx = 0
for j in range(0, n_classes):
if outputs_class.data[i][j] >= outputs_class.data[i][max_idx]:
max_idx = j
prediction = max_idx
else:
prediction = predicted[i]
correct += (prediction == labels[i]).item()
correct_sys += (prediction == labels[i]).item()
if r == 1:
exp += (exp_prediction[i] == labels[i].item())
correct_sys += (exp_prediction[i] == labels[i].item())
exp_total += 1
real_total += 1
cov = str(total) + str(" out of") + str(real_total)
to_print = {"coverage": cov, "system accuracy": 100 * correct_sys / real_total,
"expert accuracy": 100 * exp / (exp_total + 0.0002),
"classifier accuracy": 100 * correct / (total + 0.0001)}
print(to_print)
def reject_CrossEntropyLoss(outputs, m, labels, m2, n_classes):
'''
The L_{CE} loss implementation for CIFAR
----
outputs: network outputs
m: cost of deferring to expert cost of classifier predicting (alpha* I_{m\neq y} + I_{m =y})
labels: target
m2: cost of classifier predicting (alpha* I_{m\neq y} + I_{m =y})
n_classes: number of classes
'''
batch_size = outputs.size()[0] # batch_size
rc = [n_classes] * batch_size
outputs = -m * torch.log2(outputs[range(batch_size), rc]) - m2 * torch.log2(
outputs[range(batch_size), labels])
return torch.sum(outputs) / batch_size
def my_CrossEntropyLoss(outputs, labels):
# Regular Cross entropy loss
batch_size = outputs.size()[0] # batch_size
outputs = - torch.log2(outputs[range(batch_size), labels]) # regular CE
return torch.sum(outputs) / batch_size
def train_reject(train_loader, model, optimizer, scheduler, epoch, expert_fn, n_classes, alpha):
"""Train for one epoch on the training set with deferral"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
target = target.to(device)
input = input.to(device)
# compute output
output = model(input)
# get expert predictions and costs
batch_size = output.size()[0] # batch_size
m = expert_fn(input, target)
m2 = [0] * batch_size
for j in range(0, batch_size):
if m[j] == target[j].item():
m[j] = 1
m2[j] = alpha
else:
m[j] = 0
m2[j] = 1
m = torch.tensor(m)
m2 = torch.tensor(m2)
m = m.to(device)
m2 = m2.to(device)
# done getting expert predictions and costs
# compute loss
criterion = nn.CrossEntropyLoss()
loss = reject_CrossEntropyLoss(output, m, target, m2, n_classes)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, top1=top1), flush=True)
def train_reject_class(train_loader, model, optimizer, scheduler, epoch, expert_fn, n_classes, alpha):
"""Train for one epoch on the training set without deferral"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
target = target.to(device)
input = input.to(device)
# compute output
output = model(input)
# compute loss
loss = my_CrossEntropyLoss(output, target)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, top1=top1))
def validate_reject(val_loader, model, epoch, expert_fn, n_classes):
"""Perform validation on the validation set with deferral"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.to(device)
input = input.to(device)
# compute output
with torch.no_grad():
output = model(input)
# expert prediction
batch_size = output.size()[0] # batch_size
m = expert_fn(input, target)
alpha = 1
m2 = [0] * batch_size
for j in range(0, batch_size):
if m[j] == target[j].item():
m[j] = 1
m2[j] = alpha
else:
m[j] = 0
m2[j] = 1
m = torch.tensor(m)
m2 = torch.tensor(m2)
m = m.to(device)
m2 = m2.to(device)
# compute loss
loss = reject_CrossEntropyLoss(output, m, target, m2, n_classes)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1), flush=True)
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1), flush=True)
return top1.avg
def run_reject(model, data_aug, n_dataset, expert_fn, epochs, alpha, batch_size):
'''
model: WideResNet model
data_aug: boolean to use data augmentation in training
n_dataset: number of classes
expert_fn: expert model
epochs: number of epochs to train
alpha: alpha parameter in L_{CE}^{\alpha}
'''
# Data loading code
normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
if data_aug:
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4, 4, 4, 4), mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize
])
if n_dataset == 10:
dataset = 'cifar10'
elif n_dataset == 100:
dataset = 'cifar100'
kwargs = {'num_workers': 0, 'pin_memory': True}
train_dataset_all = datasets.__dict__[dataset.upper()]('../data', train=True, download=True,
transform=transform_train)
train_size = int(0.90 * len(train_dataset_all))
test_size = len(train_dataset_all) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(train_dataset_all, [train_size, test_size])
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size, shuffle=True, **kwargs)
# get the number of model parameters
print('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])), flush=True)
# for training on multiple GPUs.
# Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use
# model = torch.nn.DataParallel(model).cuda()
model = model.to(device)
# optionally resume from a checkpoint
cudnn.benchmark = True
# define loss function (criterion) and optimizer
optimizer = torch.optim.SGD(model.parameters(), 0.1,
momentum=0.9, nesterov=True,
weight_decay=5e-4)
# cosine learning rate
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader) * epochs)
for epoch in range(0, epochs):
# train for one epoch
train_reject(train_loader, model, optimizer, scheduler, epoch, expert_fn, n_dataset, alpha)
if epoch % 10 == 0:
_ = metrics_print(model, expert_fn, n_dataset, test_loader)
n_dataset = 10 # cifar-10
class synth_expert:
'''
simple class to describe our synthetic expert on CIFAR-10
----
k: number of classes expert can predict
n_classes: number of classes (10+1 for CIFAR-10)
'''
def __init__(self, k, n_classes):
self.k = k
self.n_classes = n_classes
def predict(self, input, labels):
batch_size = labels.size()[0] # batch_size
outs = [0] * batch_size
for i in range(0, batch_size):
if labels[i].item() <= self.k:
outs[i] = labels[i].item()
else:
prediction_rand = random.randint(0, self.n_classes - 1)
outs[i] = prediction_rand
return outs
if __name__ == "__main__":
for k in [2,3,4,7]:
print("k: {}".format(k), flush=True)
alpha = 1
bsz=1024
expert = synth_expert(k, n_dataset)
model = WideResNet(28, n_dataset + 1, 4, dropRate=0)
run_reject(model, True, n_dataset, expert.predict, 200, alpha, bsz) # train for 200 epochs
torch.save(model.state_dict(), 'checkpointK_' + str(k) + '.pt')
|
"""提取变量后,用字符串格式话的方式打印信息"""
confirmed=int(input("请输入确证人数:"))
cured=int(input("请输入治愈人数:"))
cured_rate=float(input("请输入治愈率:"))
print("湖北确诊人数%.2d人,治愈%.2d人,治愈率%.2f" %(confirmed, cured, cured_rate)) |
#! /usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from barf.arch import ARCH_ARM_MODE_ARM
from barf.arch import ARCH_ARM_MODE_THUMB
from barf.barf import BARF
if __name__ == "__main__":
# x86
# ======================================================================= #
#
# Open file
#
filename = "./samples/bin/branch4.x86"
barf = BARF(filename)
#
# Recover CFG
#
print("[+] Recovering program CFG...")
cfg = barf.recover_cfg(start=0x40052d, end=0x400560)
cfg.save(filename + "_cfg", print_ir=True)
# ARM
# ======================================================================= #
#
# Open file
#
filename = "./samples/bin/branch4.arm"
barf = BARF(filename)
#
# Recover CFG
#
print("[+] Recovering program CFG...")
cfg = barf.recover_cfg(start=0x000083c8, end=0x00008404 + 0x4, arch_mode=ARCH_ARM_MODE_ARM)
cfg.save(filename + "_cfg", print_ir=True)
# ARM Thumb
# ======================================================================= #
#
# Open file
#
filename = "./samples/bin/branch4.arm_thumb"
barf = BARF(filename)
#
# Recover CFG
#
print("[+] Recovering program CFG...")
cfg = barf.recover_cfg(start=0x00010434, end=0x0001046a + 0x2, arch_mode=ARCH_ARM_MODE_THUMB)
cfg.save(filename + "_cfg", print_ir=True)
|
from functools import wraps
def with_lock(lock):
def wrapper(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
lock.acquire()
try:
response = fn(*args, **kwargs)
finally:
lock.release()
return response
return wrapped_fn
return wrapper
|
from music21 import *
### Universal Helper Functions
def addBass(inChord, root):
lowNote = inChord.bass()
while(lowNote.midi <= root.midi):
root = pitch.Pitch(root.midi-12)
octaveHigh = root
fifth = pitch.Pitch(root.midi - 5)
octaveLow = pitch.Pitch(root.midi - 12)
for note in [octaveHigh, fifth, octaveLow]:
if note.octave != None:
inChord.add(note)
return inChord
def getOutChord(inDegs, outDegs):
for deg in inDegs:
if (inDegs[deg] == None or outDegs[deg] == None) and len(outDegs) > 1:
del outDegs[deg]
print(outDegs)
res = list(outDegs.values())
while None in res:
res.remove(None);
if len(res) == 0:
return list(inDegs.values())
return res
def makeMidiString(outChord):
outMidi = []
print(outChord)
print(outChord.pitchedCommonName)
for n in outChord.pitches:
outMidi.append(n.midi)
outStr = "["
for mid in outMidi:
outStr += str(mid)
outStr += ","
outStr = outStr[0:-1]
outStr += "]"
return outStr
### Main Cadential Function
def minorPlagalToMinor(inChord):
outRoot = outThird = outFifth = outSeventh = None
try:
outThird = pitch.Pitch(inChord.root().midi-2)
except:
print("no 3rd")
try:
outFifth = pitch.Pitch(inChord.third.midi-1)
except:
print("no 5th")
try:
outRoot = pitch.Pitch(inChord.fifth)
except:
print("no root")
try:
outSeventh = pitch.Pitch(inChord.third.midi+2)
except:
print("no 7th")
inDegs = {3:inChord.root(), 5: inChord.third,
1: inChord.fifth, 7: inChord.seventh}
outChordDict = {1:outRoot, 3:outThird, 5:outFifth, 7:outSeventh}
outChordList = getOutChord(inDegs, outChordDict)
outChord = chord.Chord(outChordList)
outChord = addBass(outChord, pitch.Pitch(inChord.root().midi-5))
return makeMidiString(outChord)
def minorPlagalToMajor(inChord):
outRoot = outThird = outFifth = outSeventh = None
try:
outThird = pitch.Pitch(inChord.root().midi-1)
except:
print("no 3rd")
try:
outFifth = pitch.Pitch(inChord.third.midi-1)
except:
print("no 5th")
try:
outRoot = pitch.Pitch(inChord.fifth)
except:
print("no root")
try:
outSeventh = pitch.Pitch(inChord.third.midi+1)
except:
print("no 7th")
inDegs = {3:inChord.root(), 5: inChord.third,
1: inChord.fifth, 7: inChord.seventh}
outChordDict = {1:outRoot, 3:outThird, 5:outFifth, 7:outSeventh}
outChordList = getOutChord(inDegs, outChordDict)
outChord = chord.Chord(outChordList)
outChord = addBass(outChord, pitch.Pitch(inChord.root().midi-5))
return makeMidiString(outChord)
def majorPlagalToMinor(inChord):
outRoot = outThird = outFifth = outSeventh = None
try:
outThird = pitch.Pitch(inChord.root().midi-2)
except:
print("no 3rd")
try:
outFifth = pitch.Pitch(inChord.third.midi-2)
except:
print("no 5th")
try:
outRoot = pitch.Pitch(inChord.fifth)
except:
print("no root")
try:
outSeventh = pitch.Pitch(inChord.root().midi-10)
except:
print("no 7th")
inDegs = {3:inChord.root(), 5: inChord.third,
1: inChord.fifth, 7: inChord.seventh}
outChordDict = {1:outRoot, 3:outThird, 5:outFifth, 7:outSeventh}
outChordList = getOutChord(inDegs, outChordDict)
outChord = chord.Chord(outChordList)
outChord = addBass(outChord, pitch.Pitch(inChord.root().midi-5))
return makeMidiString(outChord)
def majorPlagalToMajor(inChord):
outRoot = outThird = outFifth = outSeventh = None
try:
outThird = pitch.Pitch(inChord.root().midi-1)
except:
print("no 3rd")
try:
outFifth = pitch.Pitch(inChord.third.midi-2)
except:
print("no 5th")
try:
outRoot = pitch.Pitch(inChord.fifth)
except:
print("no root")
try:
outSeventh = pitch.Pitch(inChord.root().midi-10)
except:
print("no 7th")
inDegs = {3:inChord.root(), 5: inChord.third,
1: inChord.fifth, 7: inChord.seventh}
outChordDict = {1:outRoot, 3:outThird, 5:outFifth, 7:outSeventh}
outChordList = getOutChord(inDegs, outChordDict)
outChord = chord.Chord(outChordList)
outChord = addBass(outChord, pitch.Pitch(inChord.root().midi-5))
return makeMidiString(outChord)
def randomQuintal(inChord):
outRoot = pitch.Pitch(inChord.bass().midi-2)
outFifth = pitch.Pitch(outRoot.midi+7)
outNinth = pitch.Pitch(outFifth.midi+7)
outSixth = pitch.Pitch(outNinth.midi+7)
outThird = pitch.Pitch(outSixth.midi+7)
outChord = chord.Chord([outRoot, outFifth, outNinth, outSixth, outThird])
outChord = addBass(outChord, outRoot)
return makeMidiString(outChord)
def randomQuartal(inChord):
outRoot = pitch.Pitch(inChord.bass().midi-2)
outFourth = pitch.Pitch(outRoot.midi+5)
outSeventh = pitch.Pitch(outFourth.midi+5)
outMin3 = pitch.Pitch(outSeventh.midi+5)
outMin6 = pitch.Pitch(outMin3.midi+5)
outChord = chord.Chord([outRoot, outFourth, outSeventh, outMin3, outMin6])
outChord = addBass(outChord, outRoot)
return makeMidiString(outChord)
def randomHarmonic(inChord):
outRoot = pitch.Pitch(inChord.bass().midi-5)
outOct1 = pitch.Pitch(outRoot.midi+12)
outFifth = pitch.Pitch(outOct1.midi+7)
outOct2 = pitch.Pitch(outFifth.midi+5)
outThird = pitch.Pitch(outOct2.midi+4)
outSeventh = pitch.Pitch(outThird.midi+6)
outChord = chord.Chord([outRoot, outOct1, outFifth, outOct2, outThird, outSeventh])
outChord = addBass(outChord, outRoot)
return makeMidiString(outChord)
def majorPerfectToMajor(inChord):
outRoot = outThird = outFifth = outSeventh = None
try:
if (inChord.seventh != None):
outThird = pitch.Pitch(inChord.seventh.midi-1)
else:
outThird = pitch.Pitch(inChord.root().midi-3)
except:
print("no 3rd")
try:
outFifth = pitch.Pitch(inChord.root().midi)
except:
print("no 5th")
try:
outRoot = pitch.Pitch(inChord.third+1)
except:
print("no root")
try:
outSeventh = pitch.Pitch(inChord.third.midi-2)
except:
print("no 7th")
inDegs = {3:inChord.root(), 5: inChord.third,
1: inChord.fifth, 7: inChord.seventh}
outChordDict = {1:outRoot, 3:outThird, 5:outFifth, 7:outSeventh}
outChordList = getOutChord(inDegs, outChordDict)
outChord = chord.Chord(outChordList)
outChord = addBass(outChord, pitch.Pitch(inChord.root().midi-7))
return makeMidiString(outChord)
def majorPerfectToMinor(inChord):
outRoot = outThird = outFifth = outSeventh = None
try:
if (inChord.seventh != None):
outThird = pitch.Pitch(inChord.seventh.midi-2)
else:
outThird = pitch.Pitch(inChord.root().midi-4)
except:
print("no 3rd")
try:
outFifth = pitch.Pitch(inChord.root().midi)
except:
print("no 5th")
try:
outRoot = pitch.Pitch(inChord.third+1)
except:
print("no root")
try:
outSeventh = pitch.Pitch(inChord.third.midi-1)
except:
print("no 7th")
inDegs = {3:inChord.root(), 5: inChord.third,
1: inChord.fifth, 7: inChord.seventh}
outChordDict = {1:outRoot, 3:outThird, 5:outFifth, 7:outSeventh}
outChordList = getOutChord(inDegs, outChordDict)
outChord = chord.Chord(outChordList)
outChord = addBass(outChord, pitch.Pitch(inChord.root().midi-7))
return makeMidiString(outChord)
def minorPerfectToMajor(inChord):
outRoot = outThird = outFifth = outSeventh = None
try:
if (inChord.seventh != None):
outThird = pitch.Pitch(inChord.seventh.midi-1)
else:
outThird = pitch.Pitch(inChord.root().midi-3)
except:
print("no 3rd")
try:
outFifth = pitch.Pitch(inChord.root().midi)
except:
print("no 5th")
try:
outRoot = pitch.Pitch(inChord.third+2)
except:
print("no root")
try:
outSeventh = pitch.Pitch(inChord.third.midi-1)
except:
print("no 7th")
inDegs = {3:inChord.root(), 5: inChord.third,
1: inChord.fifth, 7: inChord.seventh}
outChordDict = {1:outRoot, 3:outThird, 5:outFifth, 7:outSeventh}
outChordList = getOutChord(inDegs, outChordDict)
outChord = chord.Chord(outChordList)
outChord = addBass(outChord, pitch.Pitch(inChord.root().midi-7))
return makeMidiString(outChord)
def minorPerfectToMinor(inChord):
outRoot = outThird = outFifth = outSeventh = None
try:
if (inChord.seventh != None):
outThird = pitch.Pitch(inChord.seventh.midi-2)
else:
outThird = pitch.Pitch(inChord.root().midi-4)
except:
print("no 3rd")
try:
outFifth = pitch.Pitch(inChord.root().midi)
except:
print("no 5th")
try:
outRoot = pitch.Pitch(inChord.third+2)
except:
print("no root")
try:
outSeventh = pitch.Pitch(inChord.third.midi)
except:
print("no 7th")
inDegs = {3:inChord.root(), 5: inChord.third,
1: inChord.fifth, 7: inChord.seventh}
outChordDict = {1:outRoot, 3:outThird, 5:outFifth, 7:outSeventh}
outChordList = getOutChord(inDegs, outChordDict)
outChord = chord.Chord(outChordList)
outChord = addBass(outChord, pitch.Pitch(inChord.root().midi-7))
return makeMidiString(outChord)
def majorBackdoorToMajor(inChord):
outRoot = outThird = outFifth = outSeventh = None
try:
outThird = pitch.Pitch(inChord.third.midi+2)
except:
print("no 3rd")
try:
outFifth = pitch.Pitch(inChord.fifth.midi+2)
except:
print("no 5th")
try:
outRoot = pitch.Pitch(inChord.root()+2)
except:
print("no root")
try:
outSeventh = pitch.Pitch(inChord.root()+13)
except:
print("no 7th")
inDegs = {3:inChord.root(), 5: inChord.third,
1: inChord.fifth, 7: inChord.seventh}
outChordDict = {1:outRoot, 3:outThird, 5:outFifth, 7:outSeventh}
outChordList = getOutChord(inDegs, outChordDict)
outChord = chord.Chord(outChordList)
outChord = addBass(outChord, pitch.Pitch(inChord.root().midi-10))
return makeMidiString(outChord)
def majorBackdoorToMinor(inChord):
outRoot = outThird = outFifth = outSeventh = None
try:
outThird = pitch.Pitch(inChord.third.midi+1)
except:
print("no 3rd")
try:
outFifth = pitch.Pitch(inChord.fifth.midi+2)
except:
print("no 5th")
try:
outRoot = pitch.Pitch(inChord.root()+2)
except:
print("no root")
try:
outSeventh = pitch.Pitch(inChord.root()+12)
except:
print("no 7th")
inDegs = {3:inChord.root(), 5: inChord.third,
1: inChord.fifth, 7: inChord.seventh}
outChordDict = {1:outRoot, 3:outThird, 5:outFifth, 7:outSeventh}
outChordList = getOutChord(inDegs, outChordDict)
outChord = chord.Chord(outChordList)
outChord = addBass(outChord, pitch.Pitch(inChord.root().midi-10))
return makeMidiString(outChord)
def minorBackdoorToMajor(inChord):
outRoot = outThird = outFifth = outSeventh = None
try:
outThird = pitch.Pitch(inChord.fifth.midi-1)
except:
print("no 3rd")
try:
outFifth = pitch.Pitch(inChord.fifth.midi+2)
except:
print("no 5th")
try:
outRoot = pitch.Pitch(inChord.root()+2)
except:
print("no root")
try:
outSeventh = pitch.Pitch(inChord.root()+13)
except:
print("no 7th")
inDegs = {3:inChord.root(), 5: inChord.third,
1: inChord.fifth, 7: inChord.seventh}
outChordDict = {1:outRoot, 3:outThird, 5:outFifth, 7:outSeventh}
outChordList = getOutChord(inDegs, outChordDict)
outChord = chord.Chord(outChordList)
outChord = addBass(outChord, pitch.Pitch(inChord.root().midi-10))
return makeMidiString(outChord)
def minorBackdoorToMinor(inChord):
outRoot = outThird = outFifth = outSeventh = None
try:
outThird = pitch.Pitch(inChord.third.midi+2)
except:
print("no 3rd")
try:
outFifth = pitch.Pitch(inChord.fifth.midi+2)
except:
print("no 5th")
try:
outRoot = pitch.Pitch(inChord.root()+2)
except:
print("no root")
try:
outSeventh = pitch.Pitch(inChord.root()+12)
except:
print("no 7th")
inDegs = {3:inChord.root(), 5: inChord.third,
1: inChord.fifth, 7: inChord.seventh}
outChordDict = {1:outRoot, 3:outThird, 5:outFifth, 7:outSeventh}
outChordList = getOutChord(inDegs, outChordDict)
outChord = chord.Chord(outChordList)
outChord = addBass(outChord, pitch.Pitch(inChord.root().midi-10))
return makeMidiString(outChord) |
from __future__ import print_function
import codecs
import gzip
import os
import os.path
import warnings
import numpy as np
import torch
import torch.utils.data as data
from PIL import Image
from torchvision.datasets.utils import download_url, makedir_exist_ok
class MNIST(data.Dataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
urls = [
'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
training_file = 'training.pt'
test_file = 'test.pt'
classes = ['0 - normal', '1 - abnormal']
@property
def train_labels(self):
warnings.warn("train_labels has been renamed targets")
return self.targets
@property
def test_labels(self):
warnings.warn("test_labels has been renamed targets")
return self.targets
@property
def train_data(self):
warnings.warn("train_data has been renamed data")
return self.data
@property
def test_data(self):
warnings.warn("test_data has been renamed data")
return self.data
def __init__(self, root, train=True, transform=None, target_transform=None, download=False, anomaly_class=None):
self.test_percentage = 0.2
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
if anomaly_class is None:
raise RuntimeError('Please fill the anomaly_class argument' +
'anomaly_class=<listOfClass or integer>')
if (isinstance(anomaly_class, list)):
self.anomaly_class = anomaly_class
if (isinstance(anomaly_class, int)):
self.anomaly_class = [anomaly_class]
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other dataloader
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__,
'processed_with_anomaly_' + "_".join([str(i) for i in self.anomaly_class]))
@property
def class_to_idx(self):
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return os.path.exists(os.path.join(self.processed_folder, self.training_file)) and \
os.path.exists(os.path.join(self.processed_folder, self.test_file))
@staticmethod
def extract_gzip(gzip_path, remove_finished=False):
print('Extracting {}'.format(gzip_path))
with open(gzip_path.replace('.gz', ''), 'wb') as out_f, \
gzip.GzipFile(gzip_path) as zip_f:
out_f.write(zip_f.read())
if remove_finished:
os.unlink(gzip_path)
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
makedir_exist_ok(self.raw_folder)
makedir_exist_ok(self.processed_folder)
# download files
for url in self.urls:
filename = url.rpartition('/')[2]
file_path = os.path.join(self.raw_folder, filename)
download_url(url, root=self.raw_folder, filename=filename, md5=None)
self.extract_gzip(gzip_path=file_path, remove_finished=False)
# process and save as torch files
print('Processing...')
normal_data = []
normal_targets = []
abnormal_data = []
abnormal_targets = []
tmp_data = np.append(read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')), 0)
tmp_targets = np.append(read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte')), 0)
for d, t in zip(tmp_data, tmp_targets):
if (t in self.anomaly_class):
abnormal_data.append(d)
abnormal_targets.append(t)
else:
normal_data.append(d)
normal_targets.append(t)
# convert all label to 0 - normal and 1 - abnormal
normal_data = np.asarray(normal_data)
normal_targets = np.zeros_like(normal_targets)
abnormal_data = np.asarray(abnormal_data)
abnormal_targets = np.ones_like(abnormal_targets)
# Create new anomaly dataset based on the following data structure:
# - anomaly dataset
# . -> train
# . -> normal
# . -> test
# . -> normal
# . -> abnormal
test_idx = int(normal_targets.shape[0] * self.test_percentage)
training_data = normal_data[test_idx:, ]
training_targets = normal_targets[test_idx:, ]
test_data = np.append(normal_data[:test_idx, ], abnormal_data, 0)
test_targets = np.append(normal_targets[:test_idx, ], abnormal_targets, 0)
training_set = (
torch.from_numpy(training_data).view(*training_data.shape),
torch.from_numpy(training_targets).view(*training_targets.shape).long()
)
test_set = (
torch.from_numpy(test_data).view(*test_data.shape),
torch.from_numpy(test_targets).view(*test_targets.shape).long()
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class FashionMNIST(MNIST):
"""`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
classes = classes = ['0 - normal', '1 - abnormal']
class KMNIST(MNIST):
"""`Kuzushiji-MNIST <https://github.com/rois-codh/kmnist>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
urls = [
'http://codh.rois.ac.jp/kmnist/dataset/kmnist/train-images-idx3-ubyte.gz',
'http://codh.rois.ac.jp/kmnist/dataset/kmnist/train-labels-idx1-ubyte.gz',
'http://codh.rois.ac.jp/kmnist/dataset/kmnist/t10k-images-idx3-ubyte.gz',
'http://codh.rois.ac.jp/kmnist/dataset/kmnist/t10k-labels-idx1-ubyte.gz',
]
classes = ['0 - normal', '1 - abnormal']
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def read_label_file(path):
with open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2049
length = get_int(data[4:8])
parsed = np.frombuffer(data, dtype=np.uint8, offset=8)
return torch.from_numpy(parsed).view(length).long()
def read_image_file(path):
with open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2051
length = get_int(data[4:8])
num_rows = get_int(data[8:12])
num_cols = get_int(data[12:16])
parsed = np.frombuffer(data, dtype=np.uint8, offset=16)
return torch.from_numpy(parsed).view(length, num_rows, num_cols)
|
from flask_restplus import Namespace, Resource
from polylogyx.wrappers.v1 import parent_wrappers
from polylogyx.blueprints.v1.utils import *
ns = Namespace('dashboard', description='dashboard data related operation')
@ns.route('', endpoint='dashboard_data')
class Dashboard(Resource):
"""
Getting the Index Data
"""
@ns.marshal_with(parent_wrappers.common_response_wrapper)
def get(self):
alert_data = fetch_alert_node_query_status()
distribution_and_status = fetch_dashboard_data()
delete_setting = db.session.query(Settings).filter(Settings.name == 'data_retention_days').first()
purge_duration = None
if delete_setting:
purge_duration = delete_setting.setting
chart_data = {'alert_data': alert_data, "purge_duration": purge_duration,
'distribution_and_status': distribution_and_status
}
status = 'success'
message = 'Data is fetched successfully'
return marshal(prepare_response(message, status, chart_data),
parent_wrappers.common_response_wrapper, skip_none=True)
def count(distribution_and_status):
for count in distribution_and_status['hosts_platform_count']:
if count['count'] > 0:
return True
else:
return
|
import factory
from ..models import *
# 親から子のファクトリ
# http://factoryboy.readthedocs.org/en/latest/recipes.html#dependent-objects-foreignkey
class ParentToChild_ChildFactory(factory.django.DjangoModelFactory):
class Meta:
model = Child
name = 'child1'
class ParentToChild_ParentFactory(factory.django.DjangoModelFactory):
class Meta:
model = Parent
name = 'parent1'
# 第二引数には、外部キーのフィールド名をセット
# このプロパティ名は、関連先にアクセスする際に使う(relatedparent__name)
# http://stackoverflow.com/questions/21564878/factory-boy-add-several-dependent-objects
relatedparent = factory.RelatedFactory(ParentToChild_ChildFactory, 'parent')
# 子から親のファクトリ
class ChildToParent_ParentFactory(factory.django.DjangoModelFactory):
class Meta:
model = Parent
name = 'parent2'
class ChildToParent_ChildFactory(factory.django.DjangoModelFactory):
class Meta:
model = Child
name = 'child2'
parent = factory.SubFactory(ChildToParent_ParentFactory, name='parent_value')
class ChildToParent_ChildWithCopyFactory(factory.django.DjangoModelFactory):
class Meta:
model = Child
name = 'child2'
# `SelfAttribute(..<親のフィールド名>)`により、子のフィールド値を親のフィールド値へとコピーできる
# http://factoryboy.readthedocs.org/en/latest/recipes.html#copying-fields-to-a-subfactory
parent = factory.SubFactory(
ChildToParent_ParentFactory,
name=factory.SelfAttribute('..name'))
class SameParent_ParentFactroy(factory.django.DjangoModelFactory):
class Meta:
model = Parent
name = 'parent_same'
class SameParent_ChildFactroy(factory.django.DjangoModelFactory):
class Meta:
model = Child
name = 'child_same'
class M2MSimple_PublicationFactory(factory.django.DjangoModelFactory):
title = factory.Sequence(lambda n: "Title #%s" % n)
class Meta:
model = Publication
class M2MSimple_AuthorFactory(factory.django.DjangoModelFactory):
headline = 'm2m_simple_headline'
class Meta:
model = Author
@factory.post_generation
def publications(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
for publication in extracted:
self.publications.add(publication)
class M2M_Through_PersonFactory(factory.django.DjangoModelFactory):
class Meta:
model = Person
name = 'person_name'
class M2M_Through_GroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = Group
name = 'group_name'
class M2M_Through_MembershipFactory(factory.django.DjangoModelFactory):
class Meta:
model = Membership
person = factory.SubFactory(M2M_Through_PersonFactory)
group = factory.SubFactory(M2M_Through_GroupFactory)
# PersonのFactoryを継承し、1Personで1Groupを持つモデルを生成するFactory
class M2M_Through_PersonWithGroupFactory(M2M_Through_PersonFactory):
membership = factory.RelatedFactory(
M2M_Through_MembershipFactory, 'person')
# PersonのFactoryを継承し、1Personで2Groupを持つモデルを生成するFactory
class M2M_Through_PersonWithTwoGroupFactory(M2M_Through_PersonFactory):
membership1 = factory.RelatedFactory(
M2M_Through_MembershipFactory, 'person')
membership2 = factory.RelatedFactory(
M2M_Through_MembershipFactory, 'person')
# PersonのFactoryを継承し、1Personで3Groupを持つモデルを生成するFactory
class M2M_Through_PersonWithTwoGroup_Update_Factory(M2M_Through_PersonFactory):
membership1 = factory.RelatedFactory(
M2M_Through_MembershipFactory, 'person', group__name='Group1')
membership2 = factory.RelatedFactory(
M2M_Through_MembershipFactory, 'person', group__name='Group2')
membership3 = factory.RelatedFactory(
M2M_Through_MembershipFactory, 'person', group__name='Group3') |
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.utils.functional import cached_property
from djmoney.models.fields import MoneyField
class Foo(models.Model):
char_field = models.CharField(max_length=50, blank=True)
char_field2 = models.CharField(max_length=50, blank=True)
class ReportBuilder:
fields = ('char_field',)
class FooExclude(Foo):
class ReportBuilder:
exclude = ('char_field2',)
class Bar(models.Model):
char_field = models.CharField(max_length=50, blank=True)
foos = models.ManyToManyField(Foo, blank=True)
CHECK = 'CH'
MATE = 'MA'
CHESS_CHOICES = (
(CHECK, 'CHECK'),
(MATE, 'CHECKMATE'),
)
check_mate_status = models.CharField(
max_length=2,
choices=CHESS_CHOICES,
default=CHECK
)
@property
def i_want_char_field(self):
return 'lol no'
@cached_property
def i_need_char_field(self):
return 'lol yes'
class ReportBuilder:
extra = ('i_want_char_field', 'i_need_char_field',)
filters = ('char_field',)
class Account(models.Model):
name = models.CharField(max_length=255, blank=True, null=True, db_index=True)
balance = MoneyField(max_digits=10, decimal_places=2, default_currency='USD', blank=True, null=True)
budget = MoneyField(max_digits=20, decimal_places=4, default_currency='USD', blank=True, null=True)
class ReportBuilder:
fields = ('budget', 'id', 'name', 'balance',)
filters = ('budget',)
defaults = ('budget',)
exclude = ('balance_currency',)
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return "%s the place" % self.name
class Restaurant(models.Model):
place = models.OneToOneField(Place, primary_key=True)
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return "%s the restaurant" % self.place.name
class Waiter(models.Model):
restaurant = models.ForeignKey(Restaurant)
name = models.CharField(max_length=50)
days_worked = models.IntegerField(blank=True, null=True, default=None)
def __str__(self):
return "%s the waiter at %s" % (self.name, self.restaurant)
class Person(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
class Child(models.Model):
parent = models.ForeignKey(Person, related_name='children')
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
age = models.IntegerField(null=True, blank=True, default=None)
color = models.CharField(max_length=1, blank=True, default='', choices=(
('R', 'Red'),
('G', 'Green'),
('B', 'Blue'),
))
class Comment(models.Model):
""" django-contrib-comments like model """
content_type = models.ForeignKey('contenttypes.ContentType')
object_pk = models.TextField()
content_object = GenericForeignKey(
ct_field="content_type", fk_field="object_pk")
|
import flask
from fence.blueprints.login.base import DefaultOAuth2Login, DefaultOAuth2Callback
from fence.models import IdentityProvider
class CognitoLogin(DefaultOAuth2Login):
def __init__(self):
super(CognitoLogin, self).__init__(
idp_name=IdentityProvider.cognito, client=flask.current_app.cognito_client
)
class CognitoCallback(DefaultOAuth2Callback):
def __init__(self):
super(CognitoCallback, self).__init__(
idp_name=IdentityProvider.cognito, client=flask.current_app.cognito_client
)
|
# -*- coding: utf-8 -*-
# Author: Aris Tritas
# License: BSD 3-clause
import numpy as np
from .base import Policy
class Oracle(Policy):
""" Oracle agent for known means """
def __init__(self, means):
self.opt_arms = means.argsort()[::-1]
self.t = 0
def initialize(self):
""" Reset """
self.t = 0
def choose(self):
return self.opt_arms[self.t]
def update(self, it, rt):
self.t += 1
def __str__(self):
return "Oracle"
class SpectralOracle(Policy):
""" Agent implementing the oracle on the graph """
def __init__(self, *args, **kwargs):
self.name = kwargs["alg"]
_, W = kwargs["eig"]
self.optimal_arms = np.dot(W, kwargs["means"]).argsort()[::-1]
self.t = 0
def initialize(self):
""" Reset """
self.t = 0
def choose(self, *args, **kwargs):
""" Return optimal arm """
return self.optimal_arms[self.t]
def update(self, it, rt):
""" Upd. """
self.t += 1
def __str__(self):
return "{}SpectralOracle".format(self.name)
|
import boto3
from moto import mock_s3
from src.exemplo3 import BucketS3
import pytest
class TestS3:
@pytest.fixture
@mock_s3
def resources_aws(self):
conn = boto3.resource('s3', region_name='us-east-1')
return conn
@mock_s3
def test_retornar_um_bucket_criado(self, resources_aws):
BucketS3('Dados')
assert resources_aws.Bucket('Dados') in resources_aws.buckets.all()
@mock_s3
def test_my_model_save(self, resources_aws):
bucket = BucketS3('Dados')
bucket.save('clodonil', 'eh show')
body = resources_aws.Object('Dados', 'clodonil')
msg = body.get()['Body'].read().decode("utf-8")
assert msg == 'eh show'
|
import pytest
from flex.constants import (
ARRAY,
)
from flex.exceptions import (
ValidationError,
)
from flex.loading.definitions.responses.single.headers.single import (
single_header_validator,
)
def test_items_is_not_required(msg_assertions):
try:
single_header_validator({})
except ValidationError as err:
errors = err.detail
else:
errors = {}
msg_assertions.assert_path_not_in_errors('headers', errors)
@pytest.mark.parametrize(
'value',
(None, True, 1, 1.1),
)
def test_items_type_validation(value, MESSAGES, msg_assertions):
with pytest.raises(ValidationError) as err:
single_header_validator({'items': value})
msg_assertions.assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'items.type',
)
def test_items_is_required_if_type_array(msg_assertions, MESSAGES):
with pytest.raises(ValidationError) as err:
single_header_validator({'type': ARRAY})
msg_assertions.assert_message_in_errors(
MESSAGES['required']['required'],
err.value.detail,
'items',
)
|
# -*- coding: utf-8 -*-
import os
import json
from simmate.workflow_engine.error_handler import ErrorHandler
from simmate.calculators.vasp.inputs.incar import Incar
class Posmap(ErrorHandler):
"""
???
"""
# run this while the VASP calculation is still going
is_monitor = True
# we assume that we are checking the vasp.out file
filename_to_check = "vasp.out"
# These are the error messages that we are looking for in the file
possible_error_messages = ["POSMAP"]
def correct(self, directory):
# load the INCAR file to view the current settings
incar_filename = os.path.join(directory, "INCAR")
incar = Incar.from_file(incar_filename)
# load the error-count file if it exists
error_count_filename = os.path.join(directory, "simmate_error_counts.json")
if os.path.exists(error_count_filename):
with open(error_count_filename) as error_count_file:
error_counts = json.load(error_count_file)
# otherwise we are starting with an empty dictionary
else:
error_counts = {}
# The fix is based on the number of times we've already tried to
# fix this. So let's make sure it's in our error_count dictionary.
# If it isn't there yet, set the count to 0 and we'll update it below.
error_counts["posmap"] = error_counts.get("posmap", 0) + 1
# VASP advises to decrease or increase SYMPREC by an order of magnitude
# the default SYMPREC value is 1e-5
if error_counts["posmap"] == 0:
# first, reduce by 10x
current_symprec = incar.get("SYMPREC", 1e-5)
new_symprec = current_symprec / 10
incar["SYMPREC"] = new_symprec
correction = f"switched SYMPREC from {current_symprec} to {new_symprec}"
elif error_counts["posmap"] == 1:
# next, increase by 100x (10x the original because we descreased
# by 10x in the first try.)
current_symprec = incar.get("SYMPREC", 1e-5)
new_symprec = current_symprec * 100
incar["SYMPREC"] = new_symprec
correction = f"switched SYMPREC from {current_symprec} to {new_symprec}"
# increase our attempt count
error_counts["posmap"] += 1
# rewrite the new error count file
with open(error_count_filename, "w") as file:
json.dump(error_counts, file)
# rewrite the INCAR with new settings
incar.to_file(incar_filename)
return correction
|
from mock import Mock
from nose.tools import raises
from testkit.exceptionutils import *
class CustomException(Exception):
pass
@raises(CustomException)
def test_exception_raised_correctly():
exception_info = store_any_exception(a_function)
exception_info.reraise()
def test_exception_raised_correctly_with_args():
mock_function = Mock()
mock_function.side_effect = CustomException
args = ['a', 'b', 'c']
kwargs = dict(alpha='alpha')
store_any_exception(mock_function, args, kwargs)
mock_function.assert_called_with('a', 'b', 'c', alpha='alpha')
def a_function():
raise CustomException('someexception')
|
import os
import sqlite3
CONN = sqlite3.connect('northwind_small.sqlite3')
cursor = CONN.cursor()
#Generating list of available tables
tables = cursor.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;").fetchall()
print("The available tables are: ", tables)
#What are the 10 most expensive items?
query1 = '''SELECT ProductName, UnitPrice FROM Product
ORDER BY UnitPrice DESC
LIMIT 10;'''
expensive = cursor.execute(query1).fetchall()
print(expensive)
#What is the average age of an employee at the time of their hiring?
query2 = '''SELECT AVG (HireDate - BirthDate) FROM Employee'''
avg_age_hire = cursor.execute(query2).fetchall()
avg_age_hire = str(avg_age_hire).strip('[](),')
print("The average age of an employee at the time of hiring is", avg_age_hire, ".")
# fix decimals as time permits.
#query3 = return for stretch as time permits
#Questions from next section
#What are the ten most expesive items(per unit price) and their suppliers?
query2_1 = '''SELECT ProductName, SupplierID,
UnitPrice FROM Product
ORDER BY UnitPrice DESC
LIMIT 10'''
supply = cursor.execute(query2_1).fetchall()
print("The ten most expensive items their supplier numbers, and prices are as follows:", supply)
query2_2 = '''SELECT CategoryID, COUNT (DISTINCT ProductName)
FROM Product
INNER JOIN Category
on Product.ID=Category.Id
GROUP BY (CategoryId)
ORDER BY COUNT(DISTINCT ProductName) DESC
Limit 1;'''
cat_with_prod = cursor.execute(query2_2).fetchall()
print(cat_with_prod)
print ("The category with the largest number and its number of products are: ", cat_with_prod)
cursor.close()
CONN.commit()
|
import pytest
import sys
sys.path.insert(0,'..')
from Scanner import Lexer as Lex
class LexerTests(object):
def __str__(self):
return "test class for lexer"
def __init__(self):
self.lexer = Scanner.Lex()
self.lexer.build()
self.variable = '''LexToken(VAR,'var',1,0)'''
def integerassignmenttest(self):
#interger assignment test
assignmentinteger = '''var x = 10''';
variable = '''LexToken(VAR,'var',1,0)'''
integer = '''LexToken(integer,'10',1,9)'''
assignmentarray = [self.lexer.test_function(assignmentinteger)]
if integer and variable in assignmentarray:
return True
else:
return False
def floatassignmenttest(self):
#float assignment test
assignmentfloat = "var y = 100.0;"
Float = '''LexToken(float,'100.0',1,9)'''
assignmentarray = [self.lexer.test_function(assignmentfloat)]
if self.variable in assignmentarray and Float in assignmentarray:
return True
else:
False
def stringassignmenttest(self):
#string assignment test
assignmentstring = '''var x = "hello";'''
string = '''LexToken(string,'hello;',1,9)'''
assignmentarray = [self.lexer.test_function(assignmentstring)]
if string in assignmentarray and self.variable in assignmentarray:
return True
else:
return False
def nilassignmenttest(self):
#assignment of nill test
assignmentnil = '''var x = nil;'''
nil = '''LexToken(nil,'NIL',1,9)'''
assignmentarray = [self.lexer.test_function(assignmentnil)]
if nil in assignmentarray and self.variable in assignmentarray:
return True
else:
return False
def addingtwovariablestest(self):
#adding two variables together tests
addingtwovariables = ''' var x = 10;
var y = 200;
x + y;'''
plus = '''LexToken(plus,'+',1,27)'''
assignmentarray = [self.lexer.test_function(addingtwovariables)]
if plus in assignmentarray and self.variable in assignmentarray:
return True
else:
return False
def substractingtwovariablestest(self):
#substracting two varables
substractingtwovariables = ''' var x = 10; var y = 10; x-y;'''
subtraction = '''LexToken(minus,'-',1,26)'''
assignmentarray = [self.lexer.test_function(substractiontwovariables)]
if subtraction in assignmentarray and self.variable in assignmentarray:
return True
else:
return False
def mutiplyingtwovariablestest(self):
# mutiply two variables together
mutiplytwovariables = '''var x = 20; var y = 20; x * y;'''
times = '''LexToken(times,'*',1,26)'''
assignmentarray = [self.lexer.test_function(mutiplytwovariables)]
if times in assignmentarray and self.variable in assignmentarray:
return True
else:
return False
def lessthanintegertest(self):
#less than integer value
lessthaninteger = ''' var x = 100; x<200;'''
lessthan = '''LexToken(lessthan,'<',1,14)'''
assignmentarray = [self.lexer.test_function(lessthaninteger)]
if lessthan in assignmentarray and self.variable in assignmentarray:
return True
else:
return False
def lessthanequalintegertest(self):
#less than equal integer
lessthanequalinteger = ''' var x = 200; x<=200;'''
lessthanequal = '''LexToken(lessthanequal,'<=',1,14)'''
assignmentarray = [self.lexer.test_function(lessthanequalinteger)]
if self.variable in assignmentarray and lessthanequal in assignmentarray:
return True
else:
return False
def lessthanfloattest(self):
#less than float test
lessthanfloat = '''var x = 100.0; x<200.0;'''
lessthanequal = '''LexToken(lessthanequal,'<=',1,14)'''
assignmentarray = [self.lexer.test_function(lessthanequal)]
if self.variable in assignmentarray and lessthanequal in assignmentarray:
return True
else:
return False
def lessthanequalfloattest(self):
#lessthan floating point lexer rules
lessthanequalfloat = '''var x = 100.0 x<=200.0'''
lessthanequal = '''LexToken(lessthanequal,'<=',1,14)'''
assignmentarray = [self.lexer.test_function(lessthanequal)]
if self.variable in assignmentarray and lessthanequal in assignmentarray:
return True
else:
return False
def greaterthanintegertest(self):
#greaterthaninteger test
greaterthaninteger = '''var x = 200; x>100;'''
greaterthan = '''LexToken(greaterthan,'>',1.14)'''
assignmentarray = [self.lexer.test_function(greaterthaninteger)]
if self.variable in assignmentarray and greaterthan in assignmentarray:
return True
else:
return False
def greaterthanfloattest(self):
#greaterthanfloat test
greaterthanfloat = '''var x = 100.0; x>200.0;'''
greaterthan = '''LexToken(greaterthan,'>',1.14)'''
assignmentarray = [self.lexer.test_function(greaterthanfloat)]
if self.variable in assignmentarray and greaterthan in assignmentarray:
return True
else:
return False
def greaterthanequalintegertest(self):
#greater than equal integer
greatherthanequalinteger = '''var x = 200; x>=200;'''
greaterthan = '''LexToken(greaterthan,'>',1.14)'''
assignmentarray = [self.lexer.test_function(greaterthaninteger)]
if self.variable in assignmentarray and greaterthan in assignmentarray:
return True
else:
return False
def greaterthanequalfloattest(self):
#greater than equal float test
greatherthaneqaulfloat = '''var x = 200.0; x>=200.0;'''
greaterthan = '''LexToken(greaterthanequal,'>=',1.14)'''
assignmentarray = [self.lexer.test_function(greaterthanequalfloat)]
if self.variable in assignmentarray and greaterthan in assignmentarray:
return True
else:
return False
def booltestoftrue(self):
#true key word test
trueassignment = '''var x = true;'''
truestring = '''LexToken(TRUE,'true',1,9)'''
assignmentarray = [self.lexer.test_function(trueassignment)]
if self.variable in assignmentarray and truestring in assignmentarray:
return True
else:
return False
def booltestfalse(self):
#false key word test
falseassignment = '''var x = false;'''
falsestring = '''LexToken(FALSE,'false',1,9)'''
assignmentarray = [self.lexer.test_function(falseassignment)]
if self.variable in assignmentarray and falsestring in assignmentarray:
return True
else:
return False
tests = LexerTests()
tests.integerassignmenttest()
|
#!/usr/bin/env python
from gherkin.token_scanner import TokenScanner
from gherkin.parser import Parser
def validate_feature_file(feature_file, unallowed_tags):
"""Validates a feature file.
Args:
feature_file_path: the path to the feature file.
Returns:
a list of errors.
"""
file_status, feature_file_path = feature_file
with open(feature_file_path, "r") as fp:
contents = fp.read()
parser = Parser()
try:
feature_file = parser.parse(TokenScanner(contents))
except Exception as e:
return ["[ERROR] Errors exist in " + feature_file_path, "\t- Could not parse the file! " + str(e)]
errors = []
feature_tag_names = [tag["name"] for tag in feature_file["feature"]["tags"]]
scenarios = [feature_child for feature_child in feature_file["feature"]["children"] if feature_child['type'] == 'Scenario' or feature_child['type'] == 'ScenarioOutline']
# validate tags in the feature
for unallowed_tag in set(unallowed_tags).intersection(feature_tag_names):
errors.append("\t- Remove the %s tag from the feature before you commit" % unallowed_tag)
# validate tags in all the scenarios
for scenario in scenarios:
for tag in scenario["tags"]:
if tag["name"] in unallowed_tags:
errors.append("\t- Before you commit, remove the %s tag from the following scenario:\n\t\t'%s'" % (tag["name"], scenario["name"]))
# validate scenario numbers
prev_scenario_num = "0"
for curr_scenario in scenarios:
# validate prescence
if "." not in curr_scenario["name"]:
errors.append("\t- The following scenario needs to start with a number followed by a period: '%s'" % curr_scenario["name"])
break
curr_scenario_num = curr_scenario["name"].split(".")[0].strip()
if not curr_scenario_num or curr_scenario_num.isalpha():
errors.append("\t- The following scenario needs to start with a number: '%s'" % curr_scenario["name"])
break
# validate ordering
if prev_scenario_num.isdigit():
# previous scenario didn't have a letter
if curr_scenario_num.isdigit():
# current scenario doesn't have a letter
if int(curr_scenario_num) != int(prev_scenario_num) + 1:
errors.append("\t- The ordering of the scenarios breaks down on Scenario '%s'" % curr_scenario_num)
break
else:
# current scenario has a letter
if curr_scenario_num[-1] != "a":
errors.append("\t- The ordering of the scenarios breaks down on Scenario '%s'" % curr_scenario_num)
break
else:
# previous scenario had a letter
prev_scenario_letter = prev_scenario_num[-1]
if curr_scenario_num.isdigit():
# current scenario doesn't have a letter
if int(curr_scenario_num) != int(prev_scenario_num[:-1]) + 1:
if ord(curr_scenario_num[-1]) != ord(prev_scenario_letter) + 1:
errors.append("\t- The ordering of the scenarios breaks down on Scenario '%s'" % curr_scenario_num)
break
else:
# current scenario has a letter
if int(curr_scenario_num[:-1]) != int(prev_scenario_num[:-1]) + 1:
# number has not been incremented
if ord(curr_scenario_num[-1]) != ord(prev_scenario_letter) + 1:
errors.append("\t- The ordering of the scenarios breaks down on Scenario '%s'" % curr_scenario_num)
break
else:
# number has been incremented
if curr_scenario_num[-1] != "a":
errors.append("\t- The ordering of the scenarios breaks down on Scenario '%s'" % curr_scenario_num)
break
prev_scenario_num = curr_scenario_num
if errors:
errors.insert(0, "[ERROR] Errors exist in " + feature_file_path)
return errors
|
from EntityEmbedding import EntityEmbedding |
#Screen State class
#This class is basically designed to handle the creation of all the objects, and a few other variables, more effectively:
class ScreenState:
def __init__(self, screen, title_text, endScreen_text, paddle1, paddle2, ball, paddle1Score, paddle2Score, clock, FPS):
self.screen = screen
self.title_text = title_text
self.endScreen_text = endScreen_text
self.paddle1 = paddle1
self.paddle2 = paddle2
self.ball = ball
self.paddle1Score = paddle1Score
self.paddle2Score = paddle2Score
self.clock = clock
self.FPS = FPS
self.winner = " "
#ball collision with paddle 1:
def detectPaddle1Collision(self):
ball = self.ball
paddle1 = self.paddle1
if ball.x - ball.radius < paddle1.x + paddle1.width and ball.y > paddle1.y and ball.y < paddle1.y + paddle1.height:
ball.dx = -ball.dx
#change ball's color to paddle 1's color:
ball.change_color(paddle1.color)
#ball collision with paddle 2:
def detectPaddle2Collision(self):
ball = self.ball
paddle2 = self.paddle2
if ball.x + ball.radius >= paddle2.x and ball.y >= paddle2.y and ball.y < paddle2.y + paddle2.height:
ball.dx = -ball.dx
#change ball's color to paddle 2's color:
ball.change_color(paddle2.color) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'HaiFeng'
__mtime__ = '2016/9/13'
"""
import sys
import os
sys.path.append(os.path.join(sys.path[0], '..')) #调用父目录下的模块
from py_ctp.ctp_struct import *
from py_ctp.trade import Trade
from py_ctp.quote import Quote
import _thread
from time import sleep
class Test:
def __init__(self):
self.Session = ''
self.q = Quote()
self.t = Trade()
self.req = 0
self.ordered = False
self.needAuth = False
self.RelogEnable = True
def q_OnFrontConnected(self):
print('connected')
self.q.ReqUserLogin(BrokerID=self.broker, UserID=self.investor, Password=self.pwd)
def q_OnRspUserLogin(self, rsp, info, req, last):
print(info)
#insts = create_string_buffer(b'cu', 5)
self.q.SubscribeMarketData('rb1810')
def q_OnTick(self, tick):
f = CThostFtdcMarketDataField()
f = tick
#print(tick)
if not self.ordered:
_thread.start_new_thread(self.Order, (f,))
self.ordered = True
def Order(self, f):
print("报单")
self.req += 1
self.t.ReqOrderInsert(
BrokerID= self.broker,
InvestorID=self.investor,
InstrumentID=f.getInstrumentID(),
OrderRef= '{0:>12}'.format(self.req),
UserID= self.investor,
OrderPriceType=OrderPriceTypeType.LimitPrice,
Direction=DirectionType.Buy,
CombOffsetFlag= OffsetFlagType.Open.__char__(),
CombHedgeFlag=HedgeFlagType.Speculation.__char__(),
LimitPrice=f.getLastPrice() - 50,
VolumeTotalOriginal=1,
TimeCondition=TimeConditionType.GFD,
#GTDDate=''
VolumeCondition=VolumeConditionType.AV,
MinVolume=1,
ContingentCondition=ContingentConditionType.Immediately,
StopPrice= 0,
ForceCloseReason=ForceCloseReasonType.NotForceClose,
IsAutoSuspend=0,
IsSwapOrder=0,
UserForceClose=0)
def OnFrontConnected(self):
if not self.RelogEnable:
return
print('connected')
if self.needAuth:
self.t.ReqAuthenticate(self.broker, self.investor, '@haifeng', '8MTL59FK1QGLKQW2')
else:
self.t.ReqUserLogin(BrokerID=self.broker, UserID=self.investor, Password=self.pwd, UserProductInfo='@haifeng')
def OnRspAuthenticate(self, pRspAuthenticateField=CThostFtdcRspAuthenticateField, pRspInfo=CThostFtdcRspInfoField, nRequestID=int, bIsLast=bool):
print('auth:{0}:{1}'.format(pRspInfo.getErrorID(), pRspInfo.getErrorMsg()))
self.t.ReqUserLogin(BrokerID=self.broker, UserID=self.investor, Password=self.pwd, UserProductInfo='@haifeng')
def OnRspUserLogin(self, rsp, info, req, last):
i = CThostFtdcRspInfoField()
i = info
print(i.getErrorMsg())
if i.getErrorID() == 0:
self.Session = rsp.getSessionID()
self.t.ReqSettlementInfoConfirm(BrokerID = self.broker, InvestorID = self.investor)
else:
self.RelogEnable = False
def OnRspSettlementInfoConfirm(self, pSettlementInfoConfirm = CThostFtdcSettlementInfoConfirmField, pRspInfo = CThostFtdcRspInfoField, nRequestID = int, bIsLast = bool):
#print(pSettlementInfoConfirm)
_thread.start_new_thread(self.StartQuote, ())
def StartQuote(self):
api = self.q.CreateApi()
spi = self.q.CreateSpi()
self.q.RegisterSpi(spi)
self.q.OnFrontConnected = self.q_OnFrontConnected
self.q.OnRspUserLogin = self.q_OnRspUserLogin
self.q.OnRtnDepthMarketData = self.q_OnTick
self.q.RegCB()
self.q.RegisterFront(self.frontAddr.split(',')[1])
self.q.Init()
#self.q.Join()
def Qry(self):
sleep(1.1)
self.t.ReqQryInstrument()
while True:
sleep(1.1)
self.t.ReqQryTradingAccount(self.broker, self.investor)
sleep(1.1)
self.t.ReqQryInvestorPosition(self.broker, self.investor)
return
def OnRtnInstrumentStatus(self, pInstrumentStatus = CThostFtdcInstrumentStatusField):
pass
def OnRspOrderInsert(self, pInputOrder = CThostFtdcInputOrderField, pRspInfo = CThostFtdcRspInfoField, nRequestID = int, bIsLast = bool):
print(pRspInfo)
print(pInputOrder)
print(pRspInfo.getErrorMsg())
def OnRtnOrder(self, pOrder = CThostFtdcOrderField):
#print(pOrder)
if pOrder.getSessionID() == self.Session and pOrder.getOrderStatus() == OrderStatusType.NoTradeQueueing:
print("撤单")
self.t.ReqOrderAction(
self.broker, self.investor,
InstrumentID=pOrder.getInstrumentID(),
OrderRef=pOrder.getOrderRef(),
FrontID=pOrder.getFrontID(),
SessionID=pOrder.getSessionID(),
ActionFlag=ActionFlagType.Delete)
def Run(self):
#CreateApi时会用到log目录,需要在程序目录下创建**而非dll下**
api = self.t.CreateApi()
spi = self.t.CreateSpi()
self.t.RegisterSpi(spi)
self.t.OnFrontConnected = self.OnFrontConnected
self.t.OnRspUserLogin = self.OnRspUserLogin
self.t.OnRspSettlementInfoConfirm = self.OnRspSettlementInfoConfirm
self.t.OnRspAuthenticate = self.OnRspAuthenticate
self.t.OnRtnInstrumentStatus = self.OnRtnInstrumentStatus
self.t.OnRspOrderInsert = self.OnRspOrderInsert
self.t.OnRtnOrder = self.OnRtnOrder
#_thread.start_new_thread(self.Qry, ())
self.t.RegCB()
self.frontAddr = 'tcp://180.168.146.187:10000,tcp://180.168.146.187:10010'
self.broker = '9999'
self.investor = '008105'
self.pwd = '1'
self.t.RegisterFront(self.frontAddr.split(',')[0])
self.t.SubscribePrivateTopic(nResumeType=2)#quick
self.t.SubscribePrivateTopic(nResumeType=2)
self.t.Init()
self.t.Join()
if __name__ == '__main__':
t = Test()
t.Run()
input()
|
# Copyright 2016 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from lcm.pub.database.models import FPInstModel
from lcm.pub.msapi import extsys
from lcm.pub.msapi import sdncdriver
from lcm.ns_sfcs.biz.utils import get_fp_model_by_fp_inst_id
logger = logging.getLogger(__name__)
class CreateFlowClassifier(object):
def __init__(self, data):
self.ns_model_data = data["ns_model_data"]
self.fp_inst_id = data["fpinstid"]
self.flow_classifiers_model = get_fp_model_by_fp_inst_id(data["ns_model_data"], self.fp_inst_id)["properties"][
"policy"]
self.sdnControllerId = ""
self.url = ""
self.dscp = ""
self.ip_proto = ""
self.source_port_range = ""
self.dest_port_range = ""
self.source_ip_range = ""
self.dest_ip_range = ""
self.flow_classfier_id = ""
def do_biz(self):
logger.info("CreateFlowClassifier start:")
self.init_data(self.flow_classifiers_model)
self.create_flow_classfier()
self.update_fp_inst()
logger.info("CreateFlowClassifier end:")
def init_data(self, flow_classifiers_model):
fp_database_info = FPInstModel.objects.filter(fpinstid=self.fp_inst_id).get()
self.sdnControllerId = fp_database_info.sdncontrollerid
self.url = extsys.get_sdn_controller_by_id(self.sdnControllerId)["url"]
self.dscp = flow_classifiers_model["criteria"]["dscp"]
self.ip_proto = flow_classifiers_model["criteria"]["ip_protocol"]
self.source_port_range = flow_classifiers_model["criteria"]["source_port_range"]
self.dest_port_range = flow_classifiers_model["criteria"]["dest_port_range"]
self.dest_ip_range = flow_classifiers_model["criteria"]["dest_ip_range"]
self.source_ip_range = flow_classifiers_model["criteria"]["source_ip_range"]
def update_fp_inst(self):
fp_inst_info = FPInstModel.objects.filter(fpinstid=self.fp_inst_id).get()
fp_inst_info.flowclassifiers = self.flow_classfier_id
FPInstModel.objects.filter(fpinstid=self.fp_inst_id).update(flowclassifiers=fp_inst_info.flowclassifiers)
def create_flow_classfier(self):
data = {
"sdnControllerId": self.sdnControllerId,
"url": self.url,
"name": "",
"description": "",
"dscp": self.dscp,
"ip_proto": self.ip_proto,
"source_port_range": self.source_port_range,
"dest_port_range": self.dest_port_range,
"source_ip_range": self.concat_str(self.source_ip_range),
"dest_ip_range": self.concat_str(self.dest_ip_range)
}
# req_param = json.JSONEncoder().encoding(data)
# url = "/api/sdncdriver/v1.0/createflowclassfier"
# ret = req_by_msb(url,"POST", data)
# if ret[0] > 0:
# logger.error('Send Flow Classifier request to Driver failed.')
# utils.sfc_inst_failed_handle(self.fp_inst_id, "Send Flow Classifier request to Driver failed.")
# raise NSLCMException('Send Flow Classifier request to Driver failed.')
# resp_body = json.loads(ret[1])
self.flow_classfier_id = sdncdriver.create_flow_classfier(data)
def concat_str(self, str_list):
final_str = ""
for str in str_list:
final_str += str + ","
return final_str[:-1]
|
#!/usr/bin/env python3
from python_custom_message.msg import CustomMessage
cm = CustomMessage()
print("hello world")
print(cm)
|
from JumpScale import j
ActionsBase = j.atyourservice.getActionsBaseClass()
class Actions(ActionsBase):
pass
|
from ...models.nodes import DockerImage
from ...serialisers.nodes import DockerImageSerialiser
from ...permissions import IsAuthenticated, AllowNone
from .._UFDLBaseViewSet import UFDLBaseViewSet
class DockerImageViewSet(UFDLBaseViewSet):
queryset = DockerImage.objects.all()
serializer_class = DockerImageSerialiser
permission_classes = {
"list": IsAuthenticated,
"create": AllowNone,
"retrieve": IsAuthenticated,
"update": AllowNone,
"partial_update": AllowNone,
"destroy": AllowNone
}
|
import ecs
import eks
import iam
import mwaa
import s3
import vpc
|
desc = """# JustWatchAPI
JustWatch.com Python 3 API
How To
----------------------------------------------------------
search for an item
----------------------------------------------------------
from justwatchapi import JustWatch
just_watch = JustWatch()
results = just_watch.search_for_item(query='the matrix')
----------------------------------------------------------
or search for combination of genres
----------------------------------------------------------
just_watch = JustWatch(genres=['act', 'scf', 'hrr'])
results_by_genres = just_watch.search_for_item()
----------------------------------------------------------
or maybe search by provider
----------------------------------------------------------
just_watch = JustWatch()
results_by_providers = just_watch.search_for_item(providers=['nfx', 'stn'])
----------------------------------------------------------
or possibly a combination of the above
----------------------------------------------------------
just_watch = JustWatch()
results_by_multiple = just_watch.search_for_item(
providers=['nfx', 'stn'],
content_types=['movie'],
monetization_types=['free'])
----------------------------------------------------------
Read api_payload.txt for more information"""
import os
from setuptools import setup
setup(
name = "JustWatch",
version = "0.5.1",
author = "Dawoud Tabboush",
author_email = "dtabboush@gmail.com",
description = ("A simple api for justwatch.com"),
license = "MIT",
keywords = "movies tv api",
url = "https://github.com/dawoudt/JustWatchAPI",
packages=['justwatch'],
long_description=desc,
platforms='any',
install_requires=[
'requests>=2.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from toscaparser import tosca_template
import unittest
import yaml
from apmec.common import utils
from apmec.plugins.common import constants as evt_constants
from apmec.tests import constants
from apmec.tests.functional import base
from apmec.tests.utils import read_file
from apmec.catalogs.tosca import utils as toscautils
CONF = cfg.CONF
SOFTWARE_DEPLOYMENT = 'OS::Heat::SoftwareDeployment'
class MeaTestToscaMEAC(base.BaseApmecTest):
@unittest.skip("Until BUG 1673012")
def test_create_delete_tosca_meac(self):
input_yaml = read_file('sample_tosca_meac.yaml')
tosca_dict = yaml.safe_load(input_yaml)
path = os.path.abspath(os.path.join(
os.path.dirname(__file__), "../../etc/samples"))
mead_name = 'sample-tosca-meac'
tosca_dict['topology_template']['node_templates'
]['firewall_meac'
]['interfaces'
]['Standard']['create'] = path \
+ '/install_meac.sh'
tosca_arg = {'mead': {'name': mead_name,
'attributes': {'mead': tosca_dict}}}
# Create mead with tosca template
mead_instance = self.client.create_mead(body=tosca_arg)
self.assertIsNotNone(mead_instance)
# Create mea with mead_id
mead_id = mead_instance['mead']['id']
mea_arg = {'mea': {'mead_id': mead_id, 'name':
"test_tosca_meac"}}
mea_instance = self.client.create_mea(body=mea_arg)
mea_id = mea_instance['mea']['id']
self.wait_until_mea_active(mea_id,
constants.MEAC_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
self.assertEqual('ACTIVE',
self.client.show_mea(mea_id)['mea']['status'])
self.validate_mea_instance(mead_instance, mea_instance)
self.verify_mea_crud_events(
mea_id, evt_constants.RES_EVT_CREATE, evt_constants.PENDING_CREATE,
cnt=2)
self.verify_mea_crud_events(
mea_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE)
# Validate mgmt_url with input yaml file
mgmt_url = self.client.show_mea(mea_id)['mea']['mgmt_url']
self.assertIsNotNone(mgmt_url)
mgmt_dict = yaml.safe_load(str(mgmt_url))
input_dict = yaml.safe_load(input_yaml)
toscautils.updateimports(input_dict)
tosca = tosca_template.ToscaTemplate(parsed_params={}, a_file=False,
yaml_dict_tpl=input_dict)
vdus = toscautils.findvdus(tosca)
self.assertEqual(len(vdus), len(mgmt_dict.keys()))
for vdu in vdus:
self.assertIsNotNone(mgmt_dict[vdu.name])
self.assertEqual(True, utils.is_valid_ipv4(mgmt_dict[vdu.name]))
# Check the status of SoftwareDeployment
heat_stack_id = self.client.show_mea(mea_id)['mea']['instance_id']
resource_types = self.h_client.resources
resources = resource_types.list(stack_id=heat_stack_id)
for resource in resources:
resource = resource.to_dict()
if resource['resource_type'] == \
SOFTWARE_DEPLOYMENT:
self.assertEqual('CREATE_COMPLETE',
resource['resource_status'])
break
# Delete mea_instance with mea_id
try:
self.client.delete_mea(mea_id)
except Exception:
assert False, "mea Delete of test_mea_with_multiple_vdus failed"
self.wait_until_mea_delete(mea_id,
constants.MEA_CIRROS_DELETE_TIMEOUT)
self.verify_mea_crud_events(mea_id, evt_constants.RES_EVT_DELETE,
evt_constants.PENDING_DELETE, cnt=2)
# Delete mead_instance
self.addCleanup(self.client.delete_mead, mead_id)
|
# -*- coding: utf-8 -*-
"""
id command module.
"""
__author__ = 'Michal Ernst, Marcin Usielski'
__copyright__ = 'Copyright (C) 2018-2020, Nokia'
__email__ = 'michal.ernst@nokia.com, marcin.usielski@nokia.com'
import re
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import ParsingDone
from moler.util.converterhelper import ConverterHelper
class Id(GenericUnixCommand):
def __init__(self, connection, user=None, prompt=None, newline_chars=None, runner=None):
"""
:param connection: Moler connection to device, terminal when command is executed.
:param user: user name in system.
:param prompt: prompt (on system where command runs).
:param newline_chars: Characters to split lines - list.
:param runner: Runner to run command.
"""
super(Id, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
# Parameters defined by calling the command
self.user = user
self._converter_helper = ConverterHelper.get_converter_helper()
def build_command_string(self):
"""
Builds command string from parameters passed to object.
:return: String representation of command to send over connection to device.
"""
cmd = "id"
if self.user:
cmd = "{} {}".format(cmd, self.user)
return cmd
def on_new_line(self, line, is_full_line):
"""
Put your parsing code here.
:param line: Line to process, can be only part of line. New line chars are removed from line.
:param is_full_line: True if line had new line chars, False otherwise
:return: None
"""
if is_full_line:
try:
self._parse_uid_gid_groups(line)
except ParsingDone:
pass # line has been fully parsed by one of above parse-methods
return super(Id, self).on_new_line(line, is_full_line)
# uid=1000(user) gid=1000(user) groups=1000(user),24(cdrom),25(floppy),29(audio),30(dip),44(video),46(plugdev),
# 108(netdev),110(lpadmin),113(scanner),118(bluetooth)
_re_uid_gid_groups = re.compile(r"uid=(?P<UID>\S+)\s+gid=(?P<GID>\S+)\s+groups=(?P<GROUPS>\S+)")
_ret_dict_key = ['UID', 'GID', 'GROUPS']
def _parse_uid_gid_groups(self, line):
return self._process_line_uid_gid_groups(line, Id._re_uid_gid_groups)
def _process_line_uid_gid_groups(self, line, regexp):
if self._regex_helper.search_compiled(regexp, line):
self._parse_single_group()
raise ParsingDone
def _parse_single_group(self, ):
_re_id_name = re.compile(r"((\d+?)\((\S+?)\)\,?)")
for key in Id._ret_dict_key:
self.current_ret[key] = []
_id_name_values = self._regex_helper.group(key)
_id_name_list = re.findall(_re_id_name, _id_name_values)
self._add_single_entry_to_ret_dict(_id_name_list, key)
def _add_single_entry_to_ret_dict(self, _id_name_list, key):
for _id_name_entry in _id_name_list:
self.current_ret[key].append(
{
"ID": self._converter_helper.to_number(_id_name_entry[1]),
"NAME": _id_name_entry[2]
}
)
# -----------------------------------------------------------------------------
# Following documentation is required for library CI.
# It is used to perform command self-test.
# Parameters:
# user is Optional.User for Unix id command
# -----------------------------------------------------------------------------
COMMAND_OUTPUT_ver_execute = """
host:~ # id user
uid=1000(user) gid=1000(user) groups=1000(user),24(cdrom),25(floppy),29(audio),30(dip),44(video),46(plugdev),108(netdev),110(lpadmin),113(scanner),118(bluetooth)
host:~ #
"""
COMMAND_KWARGS_ver_execute = {'user': 'user'}
COMMAND_RESULT_ver_execute = {
'UID': [
{
'ID': 1000,
'NAME': 'user'
},
],
'GID': [
{
'ID': 1000,
'NAME': 'user'
}
],
'GROUPS': [
{
'ID': 1000,
'NAME': 'user'
},
{
'ID': 24,
'NAME': 'cdrom'
},
{
'ID': 25,
'NAME': 'floppy'
},
{
'ID': 29,
'NAME': 'audio'
},
{
'ID': 30,
'NAME': 'dip'
},
{
'ID': 44,
'NAME': 'video'
},
{
'ID': 46,
'NAME': 'plugdev'
},
{
'ID': 108,
'NAME': 'netdev'
},
{
'ID': 110,
'NAME': 'lpadmin'
},
{
'ID': 113,
'NAME': 'scanner'
},
{
'ID': 118,
'NAME': 'bluetooth'
}
]
}
|
from socket import inet_ntoa
from struct import pack
def calcDottedNetmask(mask):
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return inet_ntoa(pack('>I', bits))
def _convertCiscoCrypt(crypt):
return str(crypt).replace(' ', '-',1)
def _convertCiscoHash(hash):
return str(hash).replace('1','',1)
def _convertCiscoP2hash(hash):
p2hash = {
'hmac_sha1': 'esp-sha-hmac',
'hmac_md5' : 'esp-md5-hmac',
}
return p2hash.get(hash, hash)
def _convertCiscoP2Crypt(crypt):
p2Crypt = {
'aes 128': 'esp-aes-128',
'aes 192': 'esp-aes-192',
'aes 256': 'esp-aes-256',
'3des': 'esp-3des-hmac',
'des': 'esp-des-hmac',
}
return p2Crypt.get(crypt, crypt)
def exportAsaProfile(exportedvpn, subnets):
num = 0
allAcls = []
for v in exportedvpn:
ciscoP1Crypt = _convertCiscoCrypt(v['ipsec_policies']['config']['phase1_crypt_algo'])
ciscoP1Hash = _convertCiscoHash(v['ipsec_policies']['config']['phase1_hash_algo'])
ciscoP2Crypt = _convertCiscoP2Crypt(v['ipsec_policies']['config']['phase2_crypt_algo'][0])
ciscoP2Hash = _convertCiscoP2hash(v['ipsec_policies']['config']['phase2_auth_algo'][0])
vpnProfile = """isakmp policy 1
authentication pre-share
encryption {p1enc}
hash {p1hash}
group {p1group}
lifetime {p1lifetime}
!""".format(p1enc=ciscoP1Crypt,p1hash=ciscoP1Hash,p1group=v['ipsec_policies']['config']['dh_group'],p1lifetime=v['ipsec_policies']['config']['phase1_lifetime'])
vpnP2Profile = """crypto ipsec transform-set meraki {p2crypt} {p2hash}""".format(p2crypt=ciscoP2Crypt, p2hash=ciscoP2Hash )
print(vpnProfile)
print(vpnP2Profile)
print('!')
for sn in subnets:
num+= 1
srcNet = str(sn).split('/')[0]
srcMask = calcDottedNetmask(int((sn).split('/')[1]))
dstMask = calcDottedNetmask(int((v['privateSubnets'][0]).split('/')[1]))
dstNet = str(v['privateSubnets'][0]).split('/')[0]
acl = """access-list meraki{num} extended permit ip {destIp} {destMask} {sourceIp} {sourceMask}""".format(num=num, sourceIp=srcNet, sourceMask=srcMask, destIp=dstNet, destMask=dstMask)
allAcls.append('meraki' + str(num),)
print(acl)
tunnelGroup = """tunnel-group {remoteIP} type ipsec-l2l
tunnel-group {remoteIP} ipsec-attributes
pre-shared-key {secret}""".format(remoteIP=v['publicIp'], secret=v['secret'])
print('!')
print(tunnelGroup)
print('!')
num = 0
for acl in allAcls:
num += 1
cryptoMap = """crypto map L2L {num} match address {acl}
crypto map L2L {num} set peer {remoteIP}
crypto map L2L {num} set transform-set meraki
crypto map L2L interface outside""". format(acl=acl, num=num, remoteIP=v['publicIp'])
print(cryptoMap)
print('!') |
class Post:
def __init__(self, post_id, name, tagline, created_at, day, comments_count, votes_count, discussion_url,
redirect_url, screenshot_url, maker_inside, user, current_user, comments=None, votes=None,
related_links=None, install_links=None, related_posts=None, media=None, description=None, topics=None,
external_links=None, featured=None, exclusive=None, product_state=None,
category_id=None, badges=None, reviews_count=None, positive_reviews_count=None,
negative_reviews_count=None, neutral_reviews_count=None, makers=None, platforms=None):
from .. import helpers
self.id = post_id
self.name = name
self.tagline = tagline
self.created_at = created_at
self.day = day
self.comments_count = comments_count
self.votes_count = votes_count
self.discussion_url = discussion_url
self.redirect_url = redirect_url
self.screenshot_url = screenshot_url
self.maker_inside = maker_inside
self.current_user = current_user
self.user = helpers.parse_users(user)
self.comments = helpers.parse_comments(comments)
self.votes = helpers.parse_votes(votes)
self.related_links = helpers.parse_related_links(related_links)
self.install_links = helpers.parse_install_links(install_links)
#
self.description = description
self.featured = featured
self.exclusive = exclusive
self.product_state = product_state
self.category_id = category_id
self.reviews_count = reviews_count
self.positive_reviews_count = positive_reviews_count
self.negative_reviews_count = negative_reviews_count
self.neutral_reviews_count = neutral_reviews_count
self.makers = helpers.parse_users(makers)
self.platforms = helpers.parse_platforms(platforms)
self.topics = helpers.parse_topics(topics)
self.external_links = helpers.parse_external_links(external_links) # around the web
self.badges = helpers.parse_badges(badges)
self.related_posts = helpers.parse_related_posts(related_posts)
self.media = helpers.parse_media(media)
|
from flask import Flask
import json
import jwt
from pytest import fixture, mark, raises as py_raises
from pytest_bdd import scenarios, given, when, then, parsers
import werkzeug
from api import app, identities
import bearers
from users import Users
scenarios('../features/personas.feature')
pytestmark = mark.api
@fixture
def _context():
print("getting a new context")
identities.store = Users()
class Context:
pass
return Context()
@fixture
def _api():
with app.test_client() as client:
yield client
@given("a surfer navigating the site")
def no_user_record(_context, _api):
_context.proxy_bearer = ''
@given(parsers.parse("a surfer navigating the site protected by secret '{secret}'"))
def set_secret(_context, _api, secret):
identities.store.set_bearer_secret(secret)
_context.bearer_secret = secret
@given(parsers.parse("a user authenticated as '{name}' and as persona '{persona}'"))
def add_proxy(_context, _api, name, persona, password='P455w@rd'):
identities.store.write(id=name, password=password, persona=persona, e_mail='a@b.c')
response = _api.post('/login',
json=dict(id=name, password=password),
content_type='application/json')
assert response.status_code == 200
payload = json.loads(response.get_data().decode())
_context.proxy_name = name
_context.proxy_bearer = payload.get('bearer', None)
@when(parsers.parse("surfer '{name}' registers with password '{password}'"))
def self_registrant(_context, _api, name, password):
e_mail = None if name == '-' else 'a@b.c'
response = _api.post('/register',
json=dict(id=name, e_mail=e_mail, password=password),
content_type='application/json')
assert response.status_code in [201, 400]
_context.status_code = response.status_code
@when(parsers.parse("surfer '{name}' registers with secret '{secret}' and password '{password}'"))
def self_support(_context, _api, name, secret, password):
assert _context.bearer_secret == secret
identities.store.set_bearer_secret(_context.bearer_secret)
response = _api.post('/register',
json=dict(id=name, e_mail='a@b.c', secret=secret, password=password),
content_type='application/json')
assert response.status_code == 201
@when(parsers.parse("user '{support}' registers identity '{name}' with password '{password}' and persona '{persona}'"))
def add_registrant(_context, _api, support, name, password, persona):
assert _context.proxy_name == support
_api.environ_base['HTTP_X_BEARER'] = _context.proxy_bearer
response = _api.post('/register',
json=dict(id=name, e_mail='a@b.c', persona=persona, password=password),
content_type='application/json')
_context.status_code = response.status_code
@when(parsers.parse("user '{promoter}' promotes '{name}' to '{persona}'"))
def promote_registrant(_context, _api, promoter, name, persona):
assert _context.proxy_name == promoter
_api.environ_base['HTTP_X_BEARER'] = _context.proxy_bearer
response = _api.put(f"/users/{name}",
json=dict(id=name, persona=persona),
content_type='application/json')
_context.status_code = response.status_code
@when(parsers.parse("user '{support}' modifies profile of '{name}' with password '{password}'"))
def modify_profile(_context, _api, support, name, password):
assert _context.proxy_name == support
_api.environ_base['HTTP_X_BEARER'] = _context.proxy_bearer
response = _api.put(f"/users/{name}",
json=dict(id=name, password=password),
content_type='application/json')
_context.status_code = response.status_code
@when(parsers.parse("user '{support}' deletes profile of '{name}'"))
def delete_profile(_context, _api, support, name, password):
assert _context.proxy_name == support
_api.environ_base['HTTP_X_BEARER'] = _context.proxy_bearer
response = _api.delete(f"/users/{name}")
_context.status_code = response.status_code
@then("the operation is denied")
def deny_operation(_context):
assert _context.status_code in [400, 403]
@then(parsers.parse("surfer '{name}' has persona '{persona}'"))
def check_persona(_context, _api, name, persona):
record = identities.store.read(id=name)
assert record['id'] == name
assert record['persona'] == persona
@then(parsers.parse("surfer '{name}' does NOT have persona '{persona}'"))
def persona_has_not_been_modified(_context, _api, name, persona):
record = identities.store.read(id=name)
assert record['id'] == name
assert record['persona'] != persona
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.