hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
407d1646bf146f4c4bf795716d395c1a55d649fd | 1,913 | py | Python | breveIDE_windows_2.7.2_2/breveIDE_2.7.2/demos/Test/test2.py | Lamouse/Evolutionary-Creativity | 9e9a4094285241d0541e0b87a3bd2c5e4ba804d3 | [
"MIT"
] | null | null | null | breveIDE_windows_2.7.2_2/breveIDE_2.7.2/demos/Test/test2.py | Lamouse/Evolutionary-Creativity | 9e9a4094285241d0541e0b87a3bd2c5e4ba804d3 | [
"MIT"
] | null | null | null | breveIDE_windows_2.7.2_2/breveIDE_2.7.2/demos/Test/test2.py | Lamouse/Evolutionary-Creativity | 9e9a4094285241d0541e0b87a3bd2c5e4ba804d3 | [
"MIT"
] | null | null | null | import breve
class Test( breve.Control ):
'''def iterate( self ):
self.object.energy -= self.increm
if 0 >= self.object.energy or self.object.energy >= 1:
self.increm = 0
self.object.adjustSize()
if self.increm != 0:
print self.object.temp'''
breve.Test = Test
breve.CustomObject = CustomObject
breve.myCustomShape = myCustomShape
Test() | 23.9125 | 102 | 0.639833 | import breve
class Test( breve.Control ):
def __init__( self ):
breve.Control.__init__( self )
self.object = None
self.increm = 0.1
Test.init( self )
def init( self ):
self.setBackgroundColor( breve.vector( 0, 0, 0 ) )
self.setDisplayTextColor( breve.vector( 1, 1, 1 ) )
self.setIterationStep(1.0)
self.object = breve.createInstances( breve.CustomObject, 1)
'''def iterate( self ):
self.object.energy -= self.increm
if 0 >= self.object.energy or self.object.energy >= 1:
self.increm = 0
self.object.adjustSize()
if self.increm != 0:
print self.object.temp'''
breve.Test = Test
class CustomObject(breve.Stationary ):
def __init__( self ):
breve.Stationary.__init__( self )
self.shape = None
self.lastScale = 1
self.energy = 1
self.temp = 1
CustomObject.init( self )
def adjustSize( self ):
newScale = ( ( (self.energy+0.5) * 10 ) + 0.500000 )
self.temp *= (newScale / self.lastScale)
self.shape.scale( breve.vector( ( newScale / self.lastScale ), 1, ( newScale / self.lastScale ) ) )
self.lastScale = newScale
def init( self ):
self.shape = breve.createInstances( breve.myCustomShape, 1 )
self.setShape( self.shape )
#self.adjustSize()
self.temp = 1
shape = 4.5
self.shape.scale( breve.vector( shape , 1, shape ) )
breve.CustomObject = CustomObject
class myCustomShape( breve.CustomShape ):
def __init__( self ):
breve.CustomShape.__init__( self )
self.vertices = breve.objectList()
myCustomShape.init( self )
def init( self ):
self.vertices[ 0 ] = breve.vector( 0.1, 0, 0 )
self.vertices[ 1 ] = breve.vector( -0.1, 0, 0 )
self.vertices[ 2 ] = breve.vector( 0, 0.5, 0 )
self.addFace( [ self.vertices[ 0 ], self.vertices[ 1 ], self.vertices[ 2 ] ] )
self.finishShape( 1.000000 )
breve.myCustomShape = myCustomShape
Test() | 1,264 | 37 | 226 |
a166d7c4f62757432e3b3ef641cd72b383c41da8 | 1,925 | py | Python | development/analyze_table.py | gunny26/datalogger | 7bd29ab88f2e2749284d80a6a834c94c0955a7e0 | [
"Apache-2.0"
] | null | null | null | development/analyze_table.py | gunny26/datalogger | 7bd29ab88f2e2749284d80a6a834c94c0955a7e0 | [
"Apache-2.0"
] | null | null | null | development/analyze_table.py | gunny26/datalogger | 7bd29ab88f2e2749284d80a6a834c94c0955a7e0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
"""
Tool to analyze some datalogger raw data
"""
from __future__ import print_function
import os
import sys
import argparse
import json
parser = argparse.ArgumentParser(description="Tool to analyze some datalogger raw data")
parser.add_argument("-i", "--input-file", help="file to read from", required=True)
options = parser.parse_args("-i /var/rrd/snmp/raw/ifTable_2017-11-15.csv".split())
if not os.path.isfile(options.input_file):
print("file %s does not exist" % options.input_file)
sys.exit(1)
data = {}
meta = {}
meta["delimiter"] = "\t"
meta["index_keynames"] = ("hostname", "ifDescr")
meta["ts_keyname"] = "ts"
meta["interval"] = 300
headers = None
with open(options.input_file, "rt") as infile:
for line in infile.read().split("\n"):
if line == "" or line == "\n":
continue
if headers is None:
headers = line.split(meta["delimiter"])
meta["headers"] = headers
data["length"] = len(headers)
for header in headers:
data[header] = {
"isnumeric" : True,
"interval" : 0
}
assert meta["ts_keyname"] in headers
assert all((index_key in headers for index_key in meta["index_keynames"]))
else:
columns = line.split(meta["delimiter"])
assert len(columns) == data["length"]
for index, column in enumerate(columns):
data[headers[index]]["isnumeric"] = all((data[headers[index]]["isnumeric"], column.isnumeric()))
print(line)
meta["value_keynames"] = dict([(header, "asis") for header in headers if data[header]["isnumeric"] == True])
meta["blacklist"] = [header for header in headers if (data[header]["isnumeric"] == False) and (header not in meta["index_keynames"]) and (header != meta["ts_keyname"])]
print(json.dumps(meta, indent=4, sort_keys=True))
| 40.104167 | 168 | 0.618701 | #!/usr/bin/python
"""
Tool to analyze some datalogger raw data
"""
from __future__ import print_function
import os
import sys
import argparse
import json
parser = argparse.ArgumentParser(description="Tool to analyze some datalogger raw data")
parser.add_argument("-i", "--input-file", help="file to read from", required=True)
options = parser.parse_args("-i /var/rrd/snmp/raw/ifTable_2017-11-15.csv".split())
if not os.path.isfile(options.input_file):
print("file %s does not exist" % options.input_file)
sys.exit(1)
data = {}
meta = {}
meta["delimiter"] = "\t"
meta["index_keynames"] = ("hostname", "ifDescr")
meta["ts_keyname"] = "ts"
meta["interval"] = 300
headers = None
with open(options.input_file, "rt") as infile:
for line in infile.read().split("\n"):
if line == "" or line == "\n":
continue
if headers is None:
headers = line.split(meta["delimiter"])
meta["headers"] = headers
data["length"] = len(headers)
for header in headers:
data[header] = {
"isnumeric" : True,
"interval" : 0
}
assert meta["ts_keyname"] in headers
assert all((index_key in headers for index_key in meta["index_keynames"]))
else:
columns = line.split(meta["delimiter"])
assert len(columns) == data["length"]
for index, column in enumerate(columns):
data[headers[index]]["isnumeric"] = all((data[headers[index]]["isnumeric"], column.isnumeric()))
print(line)
meta["value_keynames"] = dict([(header, "asis") for header in headers if data[header]["isnumeric"] == True])
meta["blacklist"] = [header for header in headers if (data[header]["isnumeric"] == False) and (header not in meta["index_keynames"]) and (header != meta["ts_keyname"])]
print(json.dumps(meta, indent=4, sort_keys=True))
| 0 | 0 | 0 |
3f075b961c96ec481153e8a963e1124cc22fa1f5 | 640 | py | Python | .hooks/python/this.py | Matej-Chmel/pwman | 6ca7aff2ebe51be703647d67fc87ac4b3862e68a | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | .hooks/python/this.py | Matej-Chmel/pwman | 6ca7aff2ebe51be703647d67fc87ac4b3862e68a | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | .hooks/python/this.py | Matej-Chmel/pwman | 6ca7aff2ebe51be703647d67fc87ac4b3862e68a | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | from os.path import *
REPO_DIR = abspath(join(dirname(realpath(__file__)), pardir, pardir))
TOKEN_PATH = join(REPO_DIR, '.hooks', '.token')
VERSION_PATH = join(REPO_DIR, 'res', 'version.txt')
| 29.090909 | 80 | 0.723438 | from os.path import *
REPO_DIR = abspath(join(dirname(realpath(__file__)), pardir, pardir))
TOKEN_PATH = join(REPO_DIR, '.hooks', '.token')
VERSION_PATH = join(REPO_DIR, 'res', 'version.txt')
class this:
version = None
def read_local_version(reason=None):
if reason is not None:
print(f'{reason}\nReading latest version from local file.')
try:
with open(VERSION_PATH) as file:
this.version = int(file.read())
except OSError:
print('File res/version.txt not found. Assumed version 0.')
this.version = 0
except ValueError:
print('File content could not be converted to an integer. Assumed version 0.')
this.version = 0
| 395 | 6 | 46 |
5010b73121964b1e2039f7c97a78c65b09242001 | 21,089 | py | Python | CalibTracker/SiStripChannelGain/test/Cosmic_B38/InputFiles_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | CalibTracker/SiStripChannelGain/test/Cosmic_B38/InputFiles_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | CalibTracker/SiStripChannelGain/test/Cosmic_B38/InputFiles_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | 'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/80C4285C-779E-DD11-9889-001617E30CA4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/A83BF5EE-6E9E-DD11-8082-000423D94700.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/8266853E-999E-DD11-8B73-001D09F2432B.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/2AAFE9A9-A19E-DD11-821B-000423D99F3E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/067E98F3-489F-DD11-B309-000423D996B4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/64C1D6F5-489F-DD11-90B7-000423D986A8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0012/3C084F93-679C-DD11-A361-000423D9989E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/9C14E69F-069D-DD11-AC41-001617DBCF1E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/5439B4CA-309D-DD11-84E5-000423D944F8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/D20BB375-AE9D-DD11-BF49-000423D944FC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/E2E8FE03-A69D-DD11-8699-000423D98750.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/B40C29EC-B69D-DD11-A665-000423D6A6F4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/843C7874-1F9F-DD11-8E03-000423D98804.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7EB3DF8E-0E9F-DD11-A451-001D09F29146.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/CEB001F9-169F-DD11-A5E6-000423D94494.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/382BAEE2-D39E-DD11-A0A4-000423D98EC8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/CC5B37A1-A99E-DD11-816F-001617DBD230.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/6EDD168B-2F9F-DD11-ADF5-001617C3B79A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/EE4B4C82-999C-DD11-86EC-000423D99F3E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/1CC8332F-459E-DD11-BFE1-001617C3B65A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7A6A133C-999E-DD11-9155-001D09F2462D.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/F292BE7F-409F-DD11-883A-001617C3B6FE.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/B870AA81-409F-DD11-B549-001617C3B78C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/9003F328-899C-DD11-83D7-000423D986C4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/500B13D3-6F9C-DD11-8745-001617DC1F70.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/4CBAEDCC-309D-DD11-A617-001617E30D06.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/AED19458-399D-DD11-B9AC-000423D9A2AE.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/A6688D1F-959D-DD11-B5B7-000423D6A6F4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/F0076F20-F59E-DD11-8B57-000423D944F0.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/EC6C6EA4-499D-DD11-AC7D-000423D98DB4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/DA099105-639D-DD11-9C3E-001617E30F50.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/2E40EDED-1A9E-DD11-9014-001617DBD5AC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/7647004C-F19D-DD11-8BAA-001617DBD224.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/38881706-5E9E-DD11-B487-000423D98868.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/1098901B-569E-DD11-BE60-000423D985E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/5E4E7508-919C-DD11-AEB1-000423D9853C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/060DD475-179D-DD11-A003-000423D94908.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/8A563E55-289D-DD11-BA24-000423D6BA18.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/545F9B54-D09D-DD11-A58B-000423D6B5C4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/68795DEE-D79D-DD11-ADB7-000423D98DB4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/3AD49E1B-F59E-DD11-81C4-000423D94700.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/548891AB-8C9D-DD11-8989-001617C3B69C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/745CD91D-529D-DD11-8908-000423D6B48C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/3EF1CC87-2F9F-DD11-9EFC-001617DF785A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/FCB4F2BA-3C9E-DD11-82C7-000423D99160.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/ECC4D018-569E-DD11-80C4-001617C3B6FE.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/20C97175-669E-DD11-8ADD-00161757BF42.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/52683098-A99E-DD11-BCD0-000423D94AA8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/F6C17BA7-A19E-DD11-B57C-000423D98634.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/D844B765-519F-DD11-96F9-001617E30D0A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/02EB3FD3-6F9C-DD11-8C35-001617C3B6FE.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/0EB355C8-309D-DD11-85B7-001617C3B64C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/8E478481-BA9E-DD11-9573-000423D6B358.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/A4775BE3-739D-DD11-843D-001617C3B778.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/8E8B21C6-F99D-DD11-BF05-000423D986A8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/0EF20D52-139E-DD11-9473-000423D6B5C4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7C00B404-389F-DD11-AB81-000423D985E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/AE67CFF1-279F-DD11-B6DC-000423D98804.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7400A101-389F-DD11-B540-000423D60FF6.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/2A630CF2-279F-DD11-942A-000423D985E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/1CD3DEA6-F59C-DD11-986D-000423D98BC4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/D809EECD-7F9E-DD11-B4D7-00161757BF42.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/64451F65-779E-DD11-869D-001617E30D40.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/BA532E6C-519F-DD11-8DE7-000423D98FBC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/021AEBFE-7A9F-DD11-863E-0019DB29C620.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/CA5A90F6-489F-DD11-8F60-000423D6B2D8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/028578E0-809C-DD11-AF7D-001617C3B6E8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0012/08A6038E-679C-DD11-A4B9-001617E30D0A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0012/18BA3290-679C-DD11-B9A1-001617C3B77C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/B0CD45DB-D39E-DD11-BC03-000423D985E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/125FE86B-CB9E-DD11-B054-000423DD2F34.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/6E783236-849D-DD11-A9FF-001617C3B654.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/800FA4BD-5A9D-DD11-ACBB-001617DBD5AC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/760FB963-7C9D-DD11-B812-001D09F231C9.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/52CBD0DE-0A9E-DD11-B583-000423D6B358.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/905B1953-349E-DD11-8022-001D09F2AD7F.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7A7A6D05-389F-DD11-9D08-000423D98804.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/C223029D-E59C-DD11-A125-001617E30D40.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/3293D2A6-4D9E-DD11-81D1-000423D98B5C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/4E5AEDFC-5D9E-DD11-BD7D-001617C3B5F4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/2A9CA4B8-909E-DD11-857B-001617E30D38.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/9E30F47D-409F-DD11-A947-001617C3B6E8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/745BB069-519F-DD11-A8F9-000423D94700.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/4493CD28-899C-DD11-AF14-000423D6CA02.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/0085D14F-289D-DD11-862E-000423D6006E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/841A2D63-E09D-DD11-BDA5-001617DF785A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/5658C98B-9D9D-DD11-9B46-000423D99F1E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/3ABEFDFB-169F-DD11-94E3-000423D98BC4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/FC20EEFC-059F-DD11-A7CA-001617C3B5F4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/CE7B883A-ED9E-DD11-A737-0019DB29C614.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/4261BA6C-CB9E-DD11-AE94-000423D986A8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/9A134C3C-849D-DD11-8A1C-000423D98C20.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/FE7A7A73-1F9F-DD11-A841-001617DBD230.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/6097BB22-FE9C-DD11-AA3C-000423D944F0.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/F4AE9DE3-DC9C-DD11-9223-000423D6B42C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/3CA664CA-309D-DD11-A642-000423D951D4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/2C9D3EE0-C79D-DD11-AAF0-000423D94534.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/7E81C76A-BF9D-DD11-9970-001617E30F50.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/4EA98C8F-0E9F-DD11-A48E-001D09F253FC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/0A4BBAF5-C29E-DD11-967D-0016177CA778.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/4253601B-B29E-DD11-9725-001617DBD224.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/E00BC4D5-D39E-DD11-861A-001617C3B5E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/EA733333-419D-DD11-9B49-000423D99660.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/40A87664-239E-DD11-8ABC-000423D944F8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/DA448F60-239E-DD11-8347-000423D98DD4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/0E9D6303-389F-DD11-8C22-001617E30D0A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/8A40053E-889E-DD11-9442-000423D944F0.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/082ED767-999E-DD11-962C-0019B9F70607.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/205DAE07-0F9D-DD11-9FD4-000423D9890C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/041B05FD-059F-DD11-871E-001617E30D52.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/FE7823F2-C29E-DD11-81F1-0019DB29C614.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/BC834BD5-2B9E-DD11-A8D9-001617C3B706.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/966DFADC-E89D-DD11-A90E-000423D99264.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/B4FD3F7C-409F-DD11-8F2B-001617DBCF90.root'
| 183.382609 | 184 | 0.843283 | 'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/80C4285C-779E-DD11-9889-001617E30CA4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/A83BF5EE-6E9E-DD11-8082-000423D94700.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/8266853E-999E-DD11-8B73-001D09F2432B.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/2AAFE9A9-A19E-DD11-821B-000423D99F3E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/067E98F3-489F-DD11-B309-000423D996B4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/64C1D6F5-489F-DD11-90B7-000423D986A8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0012/3C084F93-679C-DD11-A361-000423D9989E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/9C14E69F-069D-DD11-AC41-001617DBCF1E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/5439B4CA-309D-DD11-84E5-000423D944F8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/D20BB375-AE9D-DD11-BF49-000423D944FC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/E2E8FE03-A69D-DD11-8699-000423D98750.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/B40C29EC-B69D-DD11-A665-000423D6A6F4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/843C7874-1F9F-DD11-8E03-000423D98804.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7EB3DF8E-0E9F-DD11-A451-001D09F29146.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/CEB001F9-169F-DD11-A5E6-000423D94494.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/382BAEE2-D39E-DD11-A0A4-000423D98EC8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/CC5B37A1-A99E-DD11-816F-001617DBD230.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/6EDD168B-2F9F-DD11-ADF5-001617C3B79A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/EE4B4C82-999C-DD11-86EC-000423D99F3E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/1CC8332F-459E-DD11-BFE1-001617C3B65A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7A6A133C-999E-DD11-9155-001D09F2462D.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/F292BE7F-409F-DD11-883A-001617C3B6FE.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/B870AA81-409F-DD11-B549-001617C3B78C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/9003F328-899C-DD11-83D7-000423D986C4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/500B13D3-6F9C-DD11-8745-001617DC1F70.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/4CBAEDCC-309D-DD11-A617-001617E30D06.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/AED19458-399D-DD11-B9AC-000423D9A2AE.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/A6688D1F-959D-DD11-B5B7-000423D6A6F4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/F0076F20-F59E-DD11-8B57-000423D944F0.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/EC6C6EA4-499D-DD11-AC7D-000423D98DB4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/DA099105-639D-DD11-9C3E-001617E30F50.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/2E40EDED-1A9E-DD11-9014-001617DBD5AC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/7647004C-F19D-DD11-8BAA-001617DBD224.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/38881706-5E9E-DD11-B487-000423D98868.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/1098901B-569E-DD11-BE60-000423D985E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/5E4E7508-919C-DD11-AEB1-000423D9853C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/060DD475-179D-DD11-A003-000423D94908.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/8A563E55-289D-DD11-BA24-000423D6BA18.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/545F9B54-D09D-DD11-A58B-000423D6B5C4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/68795DEE-D79D-DD11-ADB7-000423D98DB4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/3AD49E1B-F59E-DD11-81C4-000423D94700.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/548891AB-8C9D-DD11-8989-001617C3B69C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/745CD91D-529D-DD11-8908-000423D6B48C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/3EF1CC87-2F9F-DD11-9EFC-001617DF785A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/FCB4F2BA-3C9E-DD11-82C7-000423D99160.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/ECC4D018-569E-DD11-80C4-001617C3B6FE.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/20C97175-669E-DD11-8ADD-00161757BF42.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/52683098-A99E-DD11-BCD0-000423D94AA8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/F6C17BA7-A19E-DD11-B57C-000423D98634.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/D844B765-519F-DD11-96F9-001617E30D0A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/02EB3FD3-6F9C-DD11-8C35-001617C3B6FE.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/0EB355C8-309D-DD11-85B7-001617C3B64C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/8E478481-BA9E-DD11-9573-000423D6B358.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/A4775BE3-739D-DD11-843D-001617C3B778.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/8E8B21C6-F99D-DD11-BF05-000423D986A8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/0EF20D52-139E-DD11-9473-000423D6B5C4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7C00B404-389F-DD11-AB81-000423D985E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/AE67CFF1-279F-DD11-B6DC-000423D98804.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7400A101-389F-DD11-B540-000423D60FF6.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/2A630CF2-279F-DD11-942A-000423D985E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/1CD3DEA6-F59C-DD11-986D-000423D98BC4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/D809EECD-7F9E-DD11-B4D7-00161757BF42.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/64451F65-779E-DD11-869D-001617E30D40.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/BA532E6C-519F-DD11-8DE7-000423D98FBC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/021AEBFE-7A9F-DD11-863E-0019DB29C620.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/CA5A90F6-489F-DD11-8F60-000423D6B2D8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/028578E0-809C-DD11-AF7D-001617C3B6E8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0012/08A6038E-679C-DD11-A4B9-001617E30D0A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0012/18BA3290-679C-DD11-B9A1-001617C3B77C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/B0CD45DB-D39E-DD11-BC03-000423D985E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/125FE86B-CB9E-DD11-B054-000423DD2F34.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/6E783236-849D-DD11-A9FF-001617C3B654.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/800FA4BD-5A9D-DD11-ACBB-001617DBD5AC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/760FB963-7C9D-DD11-B812-001D09F231C9.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/52CBD0DE-0A9E-DD11-B583-000423D6B358.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/905B1953-349E-DD11-8022-001D09F2AD7F.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7A7A6D05-389F-DD11-9D08-000423D98804.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/C223029D-E59C-DD11-A125-001617E30D40.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/3293D2A6-4D9E-DD11-81D1-000423D98B5C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/4E5AEDFC-5D9E-DD11-BD7D-001617C3B5F4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/2A9CA4B8-909E-DD11-857B-001617E30D38.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/9E30F47D-409F-DD11-A947-001617C3B6E8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/745BB069-519F-DD11-A8F9-000423D94700.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/4493CD28-899C-DD11-AF14-000423D6CA02.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/0085D14F-289D-DD11-862E-000423D6006E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/841A2D63-E09D-DD11-BDA5-001617DF785A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/5658C98B-9D9D-DD11-9B46-000423D99F1E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/3ABEFDFB-169F-DD11-94E3-000423D98BC4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/FC20EEFC-059F-DD11-A7CA-001617C3B5F4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/CE7B883A-ED9E-DD11-A737-0019DB29C614.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/4261BA6C-CB9E-DD11-AE94-000423D986A8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/9A134C3C-849D-DD11-8A1C-000423D98C20.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/FE7A7A73-1F9F-DD11-A841-001617DBD230.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/6097BB22-FE9C-DD11-AA3C-000423D944F0.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/F4AE9DE3-DC9C-DD11-9223-000423D6B42C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/3CA664CA-309D-DD11-A642-000423D951D4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/2C9D3EE0-C79D-DD11-AAF0-000423D94534.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/7E81C76A-BF9D-DD11-9970-001617E30F50.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/4EA98C8F-0E9F-DD11-A48E-001D09F253FC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/0A4BBAF5-C29E-DD11-967D-0016177CA778.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/4253601B-B29E-DD11-9725-001617DBD224.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/E00BC4D5-D39E-DD11-861A-001617C3B5E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/EA733333-419D-DD11-9B49-000423D99660.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/40A87664-239E-DD11-8ABC-000423D944F8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/DA448F60-239E-DD11-8347-000423D98DD4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/0E9D6303-389F-DD11-8C22-001617E30D0A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/8A40053E-889E-DD11-9442-000423D944F0.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/082ED767-999E-DD11-962C-0019B9F70607.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/205DAE07-0F9D-DD11-9FD4-000423D9890C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/041B05FD-059F-DD11-871E-001617E30D52.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/FE7823F2-C29E-DD11-81F1-0019DB29C614.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/BC834BD5-2B9E-DD11-A8D9-001617C3B706.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/966DFADC-E89D-DD11-A90E-000423D99264.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/B4FD3F7C-409F-DD11-8F2B-001617DBCF90.root'
| 0 | 0 | 0 |
86cb5b787a863245630cfb96d1a30d227547d206 | 51 | py | Python | py_prod_bootstrap/__init__.py | fgka/python-bootstrap | b48b647a14c387b7fb73bc03e12bc32ab19a340d | [
"MIT"
] | null | null | null | py_prod_bootstrap/__init__.py | fgka/python-bootstrap | b48b647a14c387b7fb73bc03e12bc32ab19a340d | [
"MIT"
] | null | null | null | py_prod_bootstrap/__init__.py | fgka/python-bootstrap | b48b647a14c387b7fb73bc03e12bc32ab19a340d | [
"MIT"
] | null | null | null | # vim: ai:sw=4:ts=4:sta:et:fo=croql
# coding=utf-8
| 17 | 35 | 0.647059 | # vim: ai:sw=4:ts=4:sta:et:fo=croql
# coding=utf-8
| 0 | 0 | 0 |
ac36ee9e654ae7571a290beabcd12dba9a6a0e1c | 2,977 | py | Python | impc_etl/jobs/extract/colony_tracking_extractor.py | ficolo/impc-etl | 3ca0fadaaa2b6e5d6fc424f949a9faa7680cd5f5 | [
"Apache-2.0"
] | 4 | 2021-04-14T09:28:51.000Z | 2022-02-07T10:52:14.000Z | impc_etl/jobs/extract/colony_tracking_extractor.py | ficolo/impc-etl | 3ca0fadaaa2b6e5d6fc424f949a9faa7680cd5f5 | [
"Apache-2.0"
] | 85 | 2018-10-30T10:49:28.000Z | 2022-03-25T13:51:31.000Z | impc_etl/jobs/extract/colony_tracking_extractor.py | ficolo/impc-etl | 3ca0fadaaa2b6e5d6fc424f949a9faa7680cd5f5 | [
"Apache-2.0"
] | 7 | 2018-10-30T11:36:57.000Z | 2021-07-15T15:36:14.000Z | from typing import Type
from pyspark.sql.functions import col, lit, when
from impc_etl.shared import utils
from impc_etl.workflow.config import ImpcConfig
from pyspark.sql.session import SparkSession
import luigi
from luigi.contrib.spark import PySparkTask
from pyspark.sql.types import StringType
| 41.929577 | 88 | 0.658381 | from typing import Type
from pyspark.sql.functions import col, lit, when
from impc_etl.shared import utils
from impc_etl.workflow.config import ImpcConfig
from pyspark.sql.session import SparkSession
import luigi
from luigi.contrib.spark import PySparkTask
from pyspark.sql.types import StringType
class ColonyTrackingExtractor(PySparkTask):
name = "IMPC_Colony_Tracking_Extractor"
imits_colonies_tsv_path = luigi.Parameter()
gentar_colonies_tsv_path = luigi.Parameter()
output_path = luigi.Parameter()
def output(self):
return ImpcConfig().get_target(f"{self.output_path}all_colonies_parquet")
def app_options(self):
return [
self.imits_colonies_tsv_path,
self.gentar_colonies_tsv_path,
self.output().path,
]
def main(self, sc, *args):
spark = SparkSession(sc)
imits_tsv_path = args[0]
gentar_tsv_path = args[1]
output_path = args[2]
imits_df = utils.extract_tsv(spark, imits_tsv_path)
gentar_df = utils.extract_tsv(spark, gentar_tsv_path)
gentar_col_mapping = {
"Phenotyping External Reference": "colony_name",
"Background Strain": "colony_background_strain",
"Mutation Symbol": "allele_symbol",
"Gene Symbol": "marker_symbol",
"MGI Gene Accession ID": "mgi_accession_id",
"MGI Strain Accession ID": "mgi_strain_accession_id",
"Phenotyping Work Unit": "phenotyping_centre",
"Phenotyping Work Group": "phenotyping_consortium",
"Production Work Unit": "production_centre",
"Production Work Group": "production_consortium",
}
new_col_names = []
for col_name in gentar_df.columns:
if col_name in gentar_col_mapping:
new_col_names.append(gentar_col_mapping[col_name])
else:
new_col_names.append(col_name.replace(" ", "_").lower())
gentar_df = gentar_df.toDF(*new_col_names)
imits_df = imits_df.toDF(
*[column_name.replace(" ", "_").lower() for column_name in imits_df.columns]
)
imits_df = imits_df.alias("imits")
gentar_tmp_df = gentar_df.alias("gentar")
imits_df = imits_df.join(gentar_tmp_df, "colony_name", "left_outer")
imits_df = imits_df.where(col("gentar.marker_symbol").isNull())
imits_df = imits_df.select("imits.*")
imits_df = imits_df.drop_duplicates()
for col_name in imits_df.columns:
if col_name not in gentar_df.columns:
gentar_df = gentar_df.withColumn(col_name, lit(None).cast(StringType()))
for col_name in gentar_df.columns:
if col_name not in imits_df.columns:
imits_df = imits_df.withColumn(col_name, lit(None).cast(StringType()))
colonies_df = imits_df.union(gentar_df.select(*imits_df.columns))
colonies_df.write.parquet(output_path)
| 2,375 | 280 | 23 |
5d774960bd0333c394d01e191b493b0b28d848dd | 847 | py | Python | move_ave.py | emguse/nov-2021 | 56cf6a6548801f433b820372ef67971794912d8a | [
"MIT"
] | null | null | null | move_ave.py | emguse/nov-2021 | 56cf6a6548801f433b820372ef67971794912d8a | [
"MIT"
] | null | null | null | move_ave.py | emguse/nov-2021 | 56cf6a6548801f433b820372ef67971794912d8a | [
"MIT"
] | null | null | null | from collections import deque
if __name__ == "__main__":
main() | 27.322581 | 58 | 0.551358 | from collections import deque
class MovingAverage():
def __init__(self, length:int) -> None:
self.length = abs(length)
if self.length == 0:
self.length = 1
self.dq = deque([],maxlen=self.length)
pass
def simple_moving_average(self, new_value) -> float:
self.dq.append(new_value)
sma = sum(self.dq) / self.length
return sma
def weighted_moving_average(self, new_value) -> float:
a = []
b = []
self.dq.append(new_value)
for i in range(len(self.dq)):
a.append(self.dq[i]*(i+1))
b.append(i + 1)
wma = sum(a) / sum(b)
return wma
def main():
ma = MovingAverage(5)
l = [1,2,3,4,5,6,7,8,9,10]
for i in l:
print(ma.weighted_moving_average(i))
if __name__ == "__main__":
main() | 654 | 1 | 124 |
b5604486231496348086a272db2cac36cec603f9 | 1,314 | py | Python | setup.py | RobertoPrevato/Like-a-sir | 64f83955748583734d35c2d24489d4b839d3a2a2 | [
"MIT"
] | 1 | 2022-03-01T12:51:07.000Z | 2022-03-01T12:51:07.000Z | setup.py | RobertoPrevato/Like-a-sir | 64f83955748583734d35c2d24489d4b839d3a2a2 | [
"MIT"
] | 2 | 2022-03-17T06:26:12.000Z | 2022-03-24T17:11:21.000Z | setup.py | RobertoPrevato/Like-a-srt | 64f83955748583734d35c2d24489d4b839d3a2a2 | [
"MIT"
] | null | null | null | from setuptools import setup
from likeasrt import VERSION
setup(
name="like-a-srt",
version=VERSION,
description=(
"CLI to generate SRT subtitles automatically from audio files, "
"using Azure Speech"
),
long_description=readme(),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
],
url="https://github.com/RobertoPrevato/Like-a-srt",
author="RobertoPrevato",
author_email="roberto.prevato@gmail.com",
keywords="azure speech srt subtitles automatic generation",
license="MIT",
packages=[
"likeasrt",
"likeasrt.commands",
"likeasrt.domain",
],
entry_points={
"console_scripts": ["like-a-srt=likeasrt.main:main", "las=likeasrt.main:main"]
},
install_requires=[
"click==8.0.3",
"essentials==1.1.4",
"azure-cognitiveservices-speech==1.19.0",
"python-dotenv==0.19.2",
],
include_package_data=True,
)
| 27.375 | 86 | 0.619482 | from setuptools import setup
from likeasrt import VERSION
def readme():
with open("README.md", encoding="utf8") as readme_file:
return readme_file.read()
setup(
name="like-a-srt",
version=VERSION,
description=(
"CLI to generate SRT subtitles automatically from audio files, "
"using Azure Speech"
),
long_description=readme(),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
],
url="https://github.com/RobertoPrevato/Like-a-srt",
author="RobertoPrevato",
author_email="roberto.prevato@gmail.com",
keywords="azure speech srt subtitles automatic generation",
license="MIT",
packages=[
"likeasrt",
"likeasrt.commands",
"likeasrt.domain",
],
entry_points={
"console_scripts": ["like-a-srt=likeasrt.main:main", "las=likeasrt.main:main"]
},
install_requires=[
"click==8.0.3",
"essentials==1.1.4",
"azure-cognitiveservices-speech==1.19.0",
"python-dotenv==0.19.2",
],
include_package_data=True,
)
| 86 | 0 | 23 |
c18b848e547a708124076b10a8187d217bd04785 | 117 | py | Python | test/factories.py | Unholster/django-lookup | 566590540b27270020ba3728f88f52f8a03d1e03 | [
"MIT"
] | null | null | null | test/factories.py | Unholster/django-lookup | 566590540b27270020ba3728f88f52f8a03d1e03 | [
"MIT"
] | 2 | 2020-06-05T23:24:16.000Z | 2021-06-10T22:03:16.000Z | test/factories.py | Unholster/django-lookup | 566590540b27270020ba3728f88f52f8a03d1e03 | [
"MIT"
] | 1 | 2015-10-22T15:35:44.000Z | 2015-10-22T15:35:44.000Z | from .testapp.models import Thing
| 16.714286 | 33 | 0.65812 | from .testapp.models import Thing
def ThingFactory(): # noqa
thing = Thing()
thing.save()
return thing
| 60 | 0 | 23 |
eee0684bb32299e587765326efa040f5dccabe3d | 4,489 | py | Python | pyjj/__init__.py | achooan/pyjj | deebdd1389b43194d7d02980a50e8cef38c67bb6 | [
"MIT"
] | 4 | 2019-07-01T12:36:32.000Z | 2019-07-28T16:03:57.000Z | pyjj/__init__.py | ach0o/pyjj | deebdd1389b43194d7d02980a50e8cef38c67bb6 | [
"MIT"
] | 2 | 2021-02-02T22:21:15.000Z | 2021-06-02T00:12:11.000Z | pyjj/__init__.py | ach0o/pyjj | deebdd1389b43194d7d02980a50e8cef38c67bb6 | [
"MIT"
] | null | null | null | import click
from .config import PyjjConfig
from .database import Database as Db
from .messages import msg, header, content, division
from .utils import validate_url
pass_config = click.make_pass_decorator(PyjjConfig, ensure=True)
@click.group(help="A CLI tool for bookmark management")
@pass_config
def pyjj(config):
"""A CLI tool for bookmark management
:param object config: an object with the current context
"""
config.parse()
config.db = Db(division=config.division)
config.db.setup()
click.echo(division(config.division))
@pyjj.command(help="Switch to a different table")
@click.argument("division")
@pass_config
def use(config, division=str):
"""Switch to a different table
:param object config: an object with the current context
:param str division: a name of the division
"""
config.update(division=division)
click.echo(f"Switched to {division}")
@pyjj.command(help="Show a list of bookmarks")
@click.option("--tag", "-t")
@pass_config
def list(config, tag: str):
"""Show a list of bookmarks
:param object config: an object with the current context
:param str tag: a tag of urls
"""
status, urls = config.db.list_urls(tag=tag)
if not status:
click.echo(msg(status, urls))
else:
click.echo(header("Bookmarks", f"{'ID':^7} {'URL':60} {'TAGS':20} DATE"))
for url, tags in urls:
click.echo(content(f"{url[0]:^7} {url[1]:60} {','.join(tags):20} {url[2]}"))
# TODO: Pagination
@pyjj.command(help="Add a new bookmark")
@click.argument("url")
@click.option("--tags", "-t")
@pass_config
def add(config, tags: str, url: str):
"""Add a new bookmark
:param object config: an object with the current context
:param str url: an url to add to the database
"""
try:
_url = validate_url(url)
if tags:
result = config.db.add_url(_url, tags=tags.split(","))
else:
result = config.db.add_url(_url)
click.echo(msg(*result))
except Exception as e:
click.echo(msg(False, str(e)))
@pyjj.command(help="Edit a bookmark")
@click.argument("id")
@click.argument("url")
@pass_config
def edit(config, id: int, url: str):
"""Edit a bookmark
:param object config: an object with the current context
:param int id: an id of url to edit
:param str url: an url to add to the database
"""
try:
_url = validate_url(url)
result = config.db.get_url(id)
if result[0]: # Edit url as id exists
result = config.db.edit_url(id, _url)
click.echo(msg(*result))
except Exception as e:
click.echo(msg(False, str(e)))
@pyjj.command(help="Remove a bookmark")
@click.argument("id")
@click.option("--tag", "-t")
@pass_config
def remove(config, id, tag):
"""Remove a bookmark. When given option `-t`, only the tag
associated with the url gets removed.
:param object config: an object with the current context
:param int id: an id of url to delete
:param str tag: a tag of url to delete
"""
result = config.db.get_url(id)
if result[0]: # Remove url as id exists
if tag:
result = config.db.remove_url_tag(id, tag)
else:
is_confirmed = click.confirm(f"Wish to delete {result[1]} ?")
if is_confirmed:
result = config.db.remove_url(id)
else:
result = (False, "aborted.")
click.echo(msg(*result))
@pyjj.command(help="Get a random bookmark")
@click.option("--tag", "-t")
@pass_config
def eureka(config, tag=None):
"""Get a random bookmark. When given option `-t`, returns
a randome bookmark with the given tag.
:param object config: an object with the current context
:param str tag: a tag of a random url
"""
_, url_tags = config.db.get_random_url(tag)
url, tags = url_tags
click.echo(header("Eureka!", f"{'ID':^7} {'URL':60} {'TAGS':20} DATE"))
click.echo(content(f"{url[0]:^7} {url[1]:60} {','.join(tags):20} {url[2]}"))
@pyjj.command(help="Show a list of tags")
@pass_config
def tags(config):
"""Show a list of tags.
:param object config: an object with the current context
"""
status, tags = config.db.list_tags()
click.echo(header("Tags", f"{'ID':^7} {'TAGS':20} DATE"))
if status:
for index, tag in tags:
click.echo(content(f"{index:^7} {tag[0]:20} {tag[1]}"))
if __name__ == "__main__":
pyjj()
| 28.411392 | 88 | 0.63043 | import click
from .config import PyjjConfig
from .database import Database as Db
from .messages import msg, header, content, division
from .utils import validate_url
pass_config = click.make_pass_decorator(PyjjConfig, ensure=True)
@click.group(help="A CLI tool for bookmark management")
@pass_config
def pyjj(config):
"""A CLI tool for bookmark management
:param object config: an object with the current context
"""
config.parse()
config.db = Db(division=config.division)
config.db.setup()
click.echo(division(config.division))
@pyjj.command(help="Switch to a different table")
@click.argument("division")
@pass_config
def use(config, division=str):
"""Switch to a different table
:param object config: an object with the current context
:param str division: a name of the division
"""
config.update(division=division)
click.echo(f"Switched to {division}")
@pyjj.command(help="Show a list of bookmarks")
@click.option("--tag", "-t")
@pass_config
def list(config, tag: str):
"""Show a list of bookmarks
:param object config: an object with the current context
:param str tag: a tag of urls
"""
status, urls = config.db.list_urls(tag=tag)
if not status:
click.echo(msg(status, urls))
else:
click.echo(header("Bookmarks", f"{'ID':^7} {'URL':60} {'TAGS':20} DATE"))
for url, tags in urls:
click.echo(content(f"{url[0]:^7} {url[1]:60} {','.join(tags):20} {url[2]}"))
# TODO: Pagination
@pyjj.command(help="Add a new bookmark")
@click.argument("url")
@click.option("--tags", "-t")
@pass_config
def add(config, tags: str, url: str):
"""Add a new bookmark
:param object config: an object with the current context
:param str url: an url to add to the database
"""
try:
_url = validate_url(url)
if tags:
result = config.db.add_url(_url, tags=tags.split(","))
else:
result = config.db.add_url(_url)
click.echo(msg(*result))
except Exception as e:
click.echo(msg(False, str(e)))
@pyjj.command(help="Edit a bookmark")
@click.argument("id")
@click.argument("url")
@pass_config
def edit(config, id: int, url: str):
"""Edit a bookmark
:param object config: an object with the current context
:param int id: an id of url to edit
:param str url: an url to add to the database
"""
try:
_url = validate_url(url)
result = config.db.get_url(id)
if result[0]: # Edit url as id exists
result = config.db.edit_url(id, _url)
click.echo(msg(*result))
except Exception as e:
click.echo(msg(False, str(e)))
@pyjj.command(help="Remove a bookmark")
@click.argument("id")
@click.option("--tag", "-t")
@pass_config
def remove(config, id, tag):
"""Remove a bookmark. When given option `-t`, only the tag
associated with the url gets removed.
:param object config: an object with the current context
:param int id: an id of url to delete
:param str tag: a tag of url to delete
"""
result = config.db.get_url(id)
if result[0]: # Remove url as id exists
if tag:
result = config.db.remove_url_tag(id, tag)
else:
is_confirmed = click.confirm(f"Wish to delete {result[1]} ?")
if is_confirmed:
result = config.db.remove_url(id)
else:
result = (False, "aborted.")
click.echo(msg(*result))
@pyjj.command(help="Get a random bookmark")
@click.option("--tag", "-t")
@pass_config
def eureka(config, tag=None):
"""Get a random bookmark. When given option `-t`, returns
a randome bookmark with the given tag.
:param object config: an object with the current context
:param str tag: a tag of a random url
"""
_, url_tags = config.db.get_random_url(tag)
url, tags = url_tags
click.echo(header("Eureka!", f"{'ID':^7} {'URL':60} {'TAGS':20} DATE"))
click.echo(content(f"{url[0]:^7} {url[1]:60} {','.join(tags):20} {url[2]}"))
@pyjj.command(help="Show a list of tags")
@pass_config
def tags(config):
"""Show a list of tags.
:param object config: an object with the current context
"""
status, tags = config.db.list_tags()
click.echo(header("Tags", f"{'ID':^7} {'TAGS':20} DATE"))
if status:
for index, tag in tags:
click.echo(content(f"{index:^7} {tag[0]:20} {tag[1]}"))
if __name__ == "__main__":
pyjj()
| 0 | 0 | 0 |
8c1f646db29f15d989fdd2c1cec89576b5179c77 | 7,546 | py | Python | roblopy/user.py | jackprogramsjp/Roblopy | 8190a467edbf27e16de7cf78c0a51b7950085425 | [
"MIT"
] | 1 | 2020-07-07T00:20:10.000Z | 2020-07-07T00:20:10.000Z | roblopy/user.py | jackprogramsjp/Roblopy | 8190a467edbf27e16de7cf78c0a51b7950085425 | [
"MIT"
] | 3 | 2020-04-05T07:15:10.000Z | 2020-07-06T21:21:59.000Z | roblopy/user.py | jackprogramsjp/Roblopy | 8190a467edbf27e16de7cf78c0a51b7950085425 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
from .utils.request import get
from typing import Optional
class User:
"""
Represents a Roblox user.
"""
def __init__(self, user_id: int):
"""
Construct a new user class.
:param user_id: The User's ID.
"""
response = get(f"https://users.roblox.com/v1/users/{user_id}").json()
status = get(f"https://users.roblox.com/v1/users/{user_id}/status").json()["status"]
self.name: str = response["name"]
self.display_name: str = response["displayName"]
self.id: int = response["id"]
self.is_banned: bool = response["isBanned"]
self.created: str = response["created"]
self.description: str = response["description"] if response["description"] else None
self.status: str = status if status else None
| 33.990991 | 119 | 0.555924 | from bs4 import BeautifulSoup
from .utils.request import get
from typing import Optional
class User:
"""
Represents a Roblox user.
"""
def __init__(self, user_id: int):
"""
Construct a new user class.
:param user_id: The User's ID.
"""
response = get(f"https://users.roblox.com/v1/users/{user_id}").json()
status = get(f"https://users.roblox.com/v1/users/{user_id}/status").json()["status"]
self.name: str = response["name"]
self.display_name: str = response["displayName"]
self.id: int = response["id"]
self.is_banned: bool = response["isBanned"]
self.created: str = response["created"]
self.description: str = response["description"] if response["description"] else None
self.status: str = status if status else None
class Users:
@staticmethod
def get_username_from_id(user_id: int) -> str:
"""
Gets the User's name from their User ID.
:param user_id: The User ID.
:return: The User's name.
"""
return get("http://api.roblox.com/users/" + str(user_id)).json()["Username"]
@staticmethod
def get_id_from_username(username: str) -> int:
"""
Gets the User's ID from their username.
:param username: The User's name.
:return: The User's ID.
"""
return get("http://api.roblox.com/users/get-by-username?username=" + str(username)).json()["Id"]
@staticmethod
def is_in_group(user_id: int, group_id: int) -> bool:
"""
Checks if the User is in a specific group.
:param user_id: The User's ID to check for.
:param group_id: The Group's ID to check for.
:return: True or False.
"""
response = get(f"https://api.roblox.com/users/{user_id}/groups").json()
in_group = False
for group in response:
if group["Id"] == group_id:
in_group = True
break
return in_group
@staticmethod
def get_rank_in_group(user_id: int, group_id: int) -> int:
"""
Gets the User's rank of the specific group.
:param user_id: The User's ID.
:param group_id: The Group's ID.
:return: The rank of the User in the group.
"""
response = get(f"https://api.roblox.com/users/{user_id}/groups").json()
rank = None
for group in response:
if group["Id"] == group_id:
rank = group["Rank"]
break
return rank
@staticmethod
def get_role_in_group(user_id: int, group_id: int) -> str:
"""
Gets the User's role of the specific group.
:param user_id: The User's ID.
:param group_id: The Group's ID.
:return: The role of the User in the group.
"""
response = get(f"https://api.roblox.com/users/{user_id}/groups").json()
role = None
for group in response:
if group["Id"] == group_id:
role = group["Role"]
break
return role
@staticmethod
def group_is_primary(user_id: int, group_id: int) -> bool:
"""
Checks if User's specific group is primary.
:param user_id: The User's ID to check for.
:param group_id: The Group's ID to check for.
:return: True or False.
"""
response = get(f"https://api.roblox.com/users/{user_id}/groups").json()
primary = False
for group in response:
if group["Id"] == group_id and group["IsPrimary"]:
primary = True
break
return primary
@staticmethod
def get_profile_description(user_id: int) -> Optional[str]:
"""
Gets the User's bio / description.
:param user_id: The User's ID.
:return: The User's bio / description, but will return None if no bio / description.
"""
response = get(f"https://users.roblox.com/v1/users/{user_id}").json()["description"]
if response == "":
return None
return response
@staticmethod
def get_profile_status(user_id: int) -> Optional[str]:
"""
Gets the User's status.
:param user_id: The User's ID.
:return: The User's status, but will return None if no status.
"""
response = get(f"https://users.roblox.com/v1/users/{user_id}/status").json()["status"]
if response == "":
return None
return response
# @staticmethod
# def GetProfileDescription(userId):
# response = no_data_get(f"https://www.roblox.com/users/{userId}/profile").content
# soup = BeautifulSoup(response, "html.parser")
#
# try:
# description = soup.find("span", {"class": "profile-about-content-text linkify"}, text=True).get_text()
# except AttributeError:
# return None
# else:
# return description
#
# @staticmethod
# def GetProfileStatus(userId):
# response = no_data_get(f"https://www.roblox.com/users/{userId}/profile").content
# soup = BeautifulSoup(response, "html.parser")
# status = soup.find_all("div", {"class": "hidden"})[0]["data-statustext"]
#
# if status.strip() == "":
# status = None
#
# return status
@staticmethod
def get_avatar_image(user_id: int) -> Optional[str]:
"""
Gets the User's avatar image.
:param user_id: The User's ID.
:return: The User's avatar image, but will return None if avatar image cannot be found or it doesn't exist.
"""
response = get(f"https://thumbnails.roblox.com/v1/users/avatar-headshot?userIds={user_id}&size=420x420&format"
"=Png&isCircular=false")
if response.json()["data"]:
return response.json()["data"][0]["imageUrl"]
else:
return None
@staticmethod
def is_banned(user_id: int) -> bool:
"""
Checks if User is banned.
:param user_id: The User's ID to check for.
:return: True or False.
"""
return get(f"https://users.roblox.com/v1/users/{user_id}").json()["isBanned"]
@staticmethod
def is_online(user_id: int) -> bool:
"""
Checks if User is currently online.
:param user_id: The User's ID to check for.
:return: True or False.
"""
return get(f"https://api.roblox.com/users/{user_id}/onlinestatus/").json()["IsOnline"]
@staticmethod
def get_online_status(user_id: int) -> dict:
"""
Get's the User's online status.
:param user_id: The User's ID.
:return: A dictionary of the User's online status.
"""
return get(f"https://api.roblox.com/users/{user_id}/onlinestatus/").json()
@staticmethod
def can_manage_asset(user_id: int, asset_id: int) -> bool:
"""
Checks if the User can manage a given asset.
:param user_id: The User's ID to check for.
:param asset_id: The Asset's ID to check for.
:return: True or False.
"""
return get("http://api.roblox.com/users/" + str(user_id) + "/canmanage/" + str(asset_id)).json() \
["CanManage"]
| 0 | 6,651 | 25 |
d7f6fd72cf75693b0e429580feefa645d9e58921 | 16,043 | py | Python | tests/test_openvas_lib_data.py | abhishekvasishtb/openvas_lib | 8525e05860e012e30ff633652cb5d20050baa032 | [
"BSD-3-Clause"
] | 77 | 2015-04-22T10:43:15.000Z | 2021-02-21T04:14:46.000Z | tests/test_openvas_lib_data.py | zeinlol/openvas_lib | 326bde506ccb2b98fe52f30bfc623e30f4525194 | [
"BSD-3-Clause"
] | 39 | 2015-03-18T04:33:15.000Z | 2022-03-17T06:52:00.000Z | tests/test_openvas_lib_data.py | zeinlol/openvas_lib | 326bde506ccb2b98fe52f30bfc623e30f4525194 | [
"BSD-3-Clause"
] | 100 | 2015-01-18T16:42:55.000Z | 2021-12-01T23:57:14.000Z | import unittest
# def test___init__(self):
# # open_vas_override = OpenVASOverride()
# assert False # TODO: implement your test here
#
# def test_make_object(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.make_object(oid, name, text, text_is_excerpt, threat, new_threat, orphan))
# assert False # TODO: implement your test here
#
# def test_name(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.name())
# assert False # TODO: implement your test here
#
# def test_name_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.name(val))
# assert False # TODO: implement your test here
#
# def test_new_threat(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.new_threat())
# assert False # TODO: implement your test here
#
# def test_new_threat_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.new_threat(val))
# assert False # TODO: implement your test here
#
# def test_oid(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.oid())
# assert False # TODO: implement your test here
#
# def test_oid_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.oid(val))
# assert False # TODO: implement your test here
#
# def test_orphan(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.orphan())
# assert False # TODO: implement your test here
#
# def test_orphan_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.orphan(val))
# assert False # TODO: implement your test here
#
# def test_text(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.text())
# assert False # TODO: implement your test here
#
# def test_text_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.text(val))
# assert False # TODO: implement your test here
#
# def test_text_is_excerpt(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.text_is_excerpt())
# assert False # TODO: implement your test here
#
# def test_text_is_excerpt_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.text_is_excerpt(val))
# assert False # TODO: implement your test here
#
# def test_threat(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.threat())
# assert False # TODO: implement your test here
#
# def test_threat_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.threat(val))
# assert False # TODO: implement your test here
# def test___init__(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# assert False # TODO: implement your test here
#
# def test_name(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.name())
# assert False # TODO: implement your test here
#
# def test_oid(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.oid())
# assert False # TODO: implement your test here
#
# def test_orphan(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.orphan())
# assert False # TODO: implement your test here
#
# def test_text(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.text())
# assert False # TODO: implement your test here
#
# def test_text_is_excerpt(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.text_is_excerpt())
# assert False # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
| 40.718274 | 189 | 0.640778 | import unittest
class TestOpenVASPort(unittest.TestCase):
pass
# def test___init__(self):
# # open_vas_port = OpenVASPort(port_name, number, proto)
# assert False # TODO: implement your test here
#
# def test_number(self):
# # open_vas_port = OpenVASPort(port_name, number, proto)
# # self.assertEqual(expected, open_vas_port.number())
# assert False # TODO: implement your test here
#
# def test_port_name(self):
# # open_vas_port = OpenVASPort(port_name, number, proto)
# # self.assertEqual(expected, open_vas_port.port_name())
# assert False # TODO: implement your test here
#
# def test_proto(self):
# # open_vas_port = OpenVASPort(port_name, number, proto)
# # self.assertEqual(expected, open_vas_port.proto())
# assert False # TODO: implement your test here
class TestOpenVASNVT(unittest.TestCase):
pass
# def test___init__(self):
# # open_vasnv_t = OpenVASNVT()
# assert False # TODO: implement your test here
#
# def test_bid(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.bid())
# assert False # TODO: implement your test here
#
# def test_bid_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.bid(val))
# assert False # TODO: implement your test here
#
# def test_bugtraq(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.bugtraq())
# assert False # TODO: implement your test here
#
# def test_bugtraq_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.bugtraq(val))
# assert False # TODO: implement your test here
#
# def test_category(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.category())
# assert False # TODO: implement your test here
#
# def test_category_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.category(val))
# assert False # TODO: implement your test here
#
# def test_cve(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.cve())
# assert False # TODO: implement your test here
#
# def test_cve_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.cve(val))
# assert False # TODO: implement your test here
#
# def test_cvss_base(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.cvss_base())
# assert False # TODO: implement your test here
#
# def test_cvss_base_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.cvss_base(val))
# assert False # TODO: implement your test here
#
# def test_description(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.raw_description())
# assert False # TODO: implement your test here
#
# def test_description_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.raw_description(val))
# assert False # TODO: implement your test here
#
# def test_family(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.family())
# assert False # TODO: implement your test here
#
# def test_family_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.family(val))
# assert False # TODO: implement your test here
#
# def test_fingerprints(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.fingerprints())
# assert False # TODO: implement your test here
#
# def test_fingerprints_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.fingerprints(val))
# assert False # TODO: implement your test here
#
# def test_make_object(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.make_object(oid, name, cvss_base, risk_factor, summary, raw_description, family, category, cve, bid, bugtraq, xrefs, fingerprints, tags))
# assert False # TODO: implement your test here
#
# def test_name(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.name())
# assert False # TODO: implement your test here
#
# def test_name_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.name(val))
# assert False # TODO: implement your test here
#
# def test_oid(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.oid())
# assert False # TODO: implement your test here
#
# def test_oid_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.oid(val))
# assert False # TODO: implement your test here
#
# def test_risk_factor(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.risk_factor())
# assert False # TODO: implement your test here
#
# def test_risk_factor_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.risk_factor(val))
# assert False # TODO: implement your test here
#
# def test_summary(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.summary())
# assert False # TODO: implement your test here
#
# def test_summary_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.summary(val))
# assert False # TODO: implement your test here
#
# def test_tags(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.tags())
# assert False # TODO: implement your test here
#
# def test_tags_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.tags(val))
# assert False # TODO: implement your test here
#
# def test_xrefs(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.xrefs())
# assert False # TODO: implement your test here
#
# def test_xrefs_case_2(self):
# # open_vasnv_t = OpenVASNVT()
# # self.assertEqual(expected, open_vasnv_t.xrefs(val))
# assert False # TODO: implement your test here
class TestOpenVASOverride(unittest.TestCase):
pass
# def test___init__(self):
# # open_vas_override = OpenVASOverride()
# assert False # TODO: implement your test here
#
# def test_make_object(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.make_object(oid, name, text, text_is_excerpt, threat, new_threat, orphan))
# assert False # TODO: implement your test here
#
# def test_name(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.name())
# assert False # TODO: implement your test here
#
# def test_name_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.name(val))
# assert False # TODO: implement your test here
#
# def test_new_threat(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.new_threat())
# assert False # TODO: implement your test here
#
# def test_new_threat_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.new_threat(val))
# assert False # TODO: implement your test here
#
# def test_oid(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.oid())
# assert False # TODO: implement your test here
#
# def test_oid_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.oid(val))
# assert False # TODO: implement your test here
#
# def test_orphan(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.orphan())
# assert False # TODO: implement your test here
#
# def test_orphan_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.orphan(val))
# assert False # TODO: implement your test here
#
# def test_text(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.text())
# assert False # TODO: implement your test here
#
# def test_text_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.text(val))
# assert False # TODO: implement your test here
#
# def test_text_is_excerpt(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.text_is_excerpt())
# assert False # TODO: implement your test here
#
# def test_text_is_excerpt_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.text_is_excerpt(val))
# assert False # TODO: implement your test here
#
# def test_threat(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.threat())
# assert False # TODO: implement your test here
#
# def test_threat_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.threat(val))
# assert False # TODO: implement your test here
class TestOpenVASNotes(unittest.TestCase):
pass
# def test___init__(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# assert False # TODO: implement your test here
#
# def test_name(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.name())
# assert False # TODO: implement your test here
#
# def test_oid(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.oid())
# assert False # TODO: implement your test here
#
# def test_orphan(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.orphan())
# assert False # TODO: implement your test here
#
# def test_text(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.text())
# assert False # TODO: implement your test here
#
# def test_text_is_excerpt(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.text_is_excerpt())
# assert False # TODO: implement your test here
class TestOpenVASResult(unittest.TestCase):
pass
# def test___init__(self):
# # open_vas_result = OpenVASResult()
# assert False # TODO: implement your test here
#
# def test_description(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.raw_description())
# assert False # TODO: implement your test here
#
# def test_description_case_2(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.raw_description(val))
# assert False # TODO: implement your test here
#
# def test_host(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.host())
# assert False # TODO: implement your test here
#
# def test_host_case_2(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.host(val))
# assert False # TODO: implement your test here
#
# def test_id(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.id())
# assert False # TODO: implement your test here
#
# def test_id_case_2(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.id(val))
# assert False # TODO: implement your test here
#
# def test_make_object(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.make_object(result_id, subnet, host, port, nvt, threat, raw_description, notes, overrides))
# assert False # TODO: implement your test here
#
# def test_notes(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.notes())
# assert False # TODO: implement your test here
#
# def test_notes_case_2(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.notes(val))
# assert False # TODO: implement your test here
#
# def test_nvt(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.nvt())
# assert False # TODO: implement your test here
#
# def test_nvt_case_2(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.nvt(val))
# assert False # TODO: implement your test here
#
# def test_overrides(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.overrides())
# assert False # TODO: implement your test here
#
# def test_overrides_case_2(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.overrides(val))
# assert False # TODO: implement your test here
#
# def test_port(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.port())
# assert False # TODO: implement your test here
#
# def test_port_case_2(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.port(val))
# assert False # TODO: implement your test here
#
# def test_subnet(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.subnet())
# assert False # TODO: implement your test here
#
# def test_subnet_case_2(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.subnet(val))
# assert False # TODO: implement your test here
#
# def test_threat(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.threat())
# assert False # TODO: implement your test here
#
# def test_threat_case_2(self):
# # open_vas_result = OpenVASResult()
# # self.assertEqual(expected, open_vas_result.threat(val))
# assert False # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
| 0 | 11,149 | 115 |
4db4785bd46c81d6467fd0fe7cfc8660df756c1c | 452 | py | Python | migrations/data/manitobarydercup/tournament.py | travisbale/scorecard | c131a87bb9552b2bde2b1adb68ce7a440cb32278 | [
"MIT"
] | null | null | null | migrations/data/manitobarydercup/tournament.py | travisbale/scorecard | c131a87bb9552b2bde2b1adb68ce7a440cb32278 | [
"MIT"
] | null | null | null | migrations/data/manitobarydercup/tournament.py | travisbale/scorecard | c131a87bb9552b2bde2b1adb68ce7a440cb32278 | [
"MIT"
] | null | null | null | """Create a new tournament."""
from scorecard import db
from scorecard.models.tournament import Tournament
def create(name, start_date, end_date):
"""Create a tournament."""
tournament = Tournament.query.filter_by(name=name, start_date=start_date, end_date=end_date).first()
if tournament is None:
tournament = Tournament(name, start_date, end_date)
tournament.save()
db.session.commit()
return tournament
| 25.111111 | 104 | 0.712389 | """Create a new tournament."""
from scorecard import db
from scorecard.models.tournament import Tournament
def create(name, start_date, end_date):
"""Create a tournament."""
tournament = Tournament.query.filter_by(name=name, start_date=start_date, end_date=end_date).first()
if tournament is None:
tournament = Tournament(name, start_date, end_date)
tournament.save()
db.session.commit()
return tournament
| 0 | 0 | 0 |
503a69376129ef2d85059273118ef42eef987c2d | 5,087 | py | Python | models/common/base.py | LukasHedegaard/co3d | b95ca94ee6c01448bd292a6ec8cc6e3d606c74c4 | [
"Apache-2.0"
] | 13 | 2021-06-02T09:44:49.000Z | 2022-03-29T08:41:40.000Z | models/common/base.py | LukasHedegaard/co3d | b95ca94ee6c01448bd292a6ec8cc6e3d606c74c4 | [
"Apache-2.0"
] | null | null | null | models/common/base.py | LukasHedegaard/co3d | b95ca94ee6c01448bd292a6ec8cc6e3d606c74c4 | [
"Apache-2.0"
] | 2 | 2021-10-10T09:56:09.000Z | 2021-12-03T17:31:30.000Z | from operator import attrgetter
import torch
from continual import CoModule
from pytorch_lightning.utilities.parsing import AttributeDict
from ride.core import Configs, RideMixin
from ride.utils.logging import getLogger
from ride.utils.utils import name
logger = getLogger("co3d")
| 35.082759 | 175 | 0.574209 | from operator import attrgetter
import torch
from continual import CoModule
from pytorch_lightning.utilities.parsing import AttributeDict
from ride.core import Configs, RideMixin
from ride.utils.logging import getLogger
from ride.utils.utils import name
logger = getLogger("co3d")
class Co3dBase(RideMixin):
hparams: AttributeDict
module: CoModule
def validate_attributes(self):
for hparam in self.configs().names:
attrgetter(f"hparams.{hparam}")(self)
@staticmethod
def configs() -> Configs:
c = Configs()
c.add(
name="co3d_temporal_fill",
type=str,
default="zeros",
choices=["zeros", "replicate"],
strategy="choice",
description="Fill mode for samples along temporal dimension. This is used at state initialisation and in `forward_steps` as padding along the temporal dimension.",
)
c.add(
name="co3d_forward_mode",
type=str,
default="init_frame",
choices=["clip", "frame", "init_frame", "init_clip", "clip_init_frame"],
strategy="choice",
description="Whether to compute clip or frame during forward. If 'clip_init_frame', the network is initialised with a clip and then frame forwards are applied.",
)
c.add(
name="co3d_num_forward_frames",
type=int,
default=1,
description="The number of frames to predict over",
)
c.add(
name="co3d_forward_frame_delay",
type=int,
default=-1,
strategy="choice",
description="Number of frames forwards prior to final prediction in 'clip_init_frame' mode. If '-1', a delay of clip_length - 1 is used",
)
c.add(
name="co3d_forward_prediction_delay",
type=int,
default=0,
strategy="choice",
description="Number of steps to delay the prediction relative to the frames",
)
c.add(
name="temporal_window_size",
type=int,
default=8,
strategy="choice",
description="Temporal window size for global average pool.",
)
return c
def __init__(self, hparams: AttributeDict, *args, **kwargs):
self.dim_in = 3
self.hparams.frames_per_clip = self.hparams.temporal_window_size
def on_init_end(self, hparams: AttributeDict, *args, **kwargs):
# Determine the frames_per_clip to ask from dataloader
self.hparams.frames_per_clip = self.hparams.temporal_window_size
if "init" in self.hparams.co3d_forward_mode:
num_init_frames = max(
self.module.receptive_field - self.module.padding - 1,
self.hparams.co3d_forward_frame_delay - 1,
)
self.hparams.frames_per_clip = (
num_init_frames
+ self.hparams.co3d_num_forward_frames
+ self.hparams.co3d_forward_prediction_delay
)
# From ActionRecognitionDatasets
if self.hparams.co3d_forward_mode == "frame":
self.hparams.frames_per_clip = 1
self.input_shape = (
self.dim_in,
self.hparams.frames_per_clip,
self.hparams.image_size,
self.hparams.image_size,
)
# Decide inference mode
if "frame" in self.hparams.co3d_forward_mode:
self.module.call_mode = "forward_steps" # default = "forward"
logger.info(f"Model receptive field: {self.module.receptive_field} frames")
logger.info(f"Training loss: {name(self.loss)}")
# If conducting profiling, ensure that the model has been warmed up
# so that it doesn't output placeholder values
if self.hparams.profile_model:
logger.info("Warming model up")
self.module(
torch.randn(
(
self.hparams.batch_size,
self.dim_in,
self.module.receptive_field,
self.hparams.image_size,
self.hparams.image_size,
)
)
)
for m in self.module.modules():
if hasattr(m, "state_index"):
m.state_index = 0
if hasattr(m, "stride_index"):
m.stride_index = 0
def forward(self, x):
result = None
if "init" in self.hparams.co3d_forward_mode:
self.module.clean_state()
num_init_frames = max(
self.module.receptive_field - self.module.padding - 1,
self.hparams.co3d_forward_frame_delay - 1,
)
self.module(x[:, :, :num_init_frames]) # = forward_steps don't save
result = self.module(x[:, :, num_init_frames:])
else:
result = self.module(x)
result = result.mean(dim=-1)
return result
| 4,574 | 206 | 23 |
b1664cffd968bf82490505f180f2edab3159b619 | 1,105 | py | Python | api/api/migrations/0016_many_prepared_pdf_per_application.py | marzmehr/family-law-act-app | f036a23657e117eb04a8a0014e0153654ee97696 | [
"Apache-2.0"
] | 4 | 2020-04-06T23:42:41.000Z | 2022-03-20T18:32:59.000Z | api/api/migrations/0016_many_prepared_pdf_per_application.py | marzmehr/family-law-act-app | f036a23657e117eb04a8a0014e0153654ee97696 | [
"Apache-2.0"
] | 86 | 2020-03-11T01:33:07.000Z | 2022-03-31T21:45:04.000Z | api/api/migrations/0016_many_prepared_pdf_per_application.py | marzmehr/family-law-act-app | f036a23657e117eb04a8a0014e0153654ee97696 | [
"Apache-2.0"
] | 10 | 2020-01-22T17:28:35.000Z | 2021-07-29T20:42:22.000Z | # Generated by Django 3.1.7 on 2021-04-13 20:33
from django.db import migrations, models
| 29.864865 | 125 | 0.59276 | # Generated by Django 3.1.7 on 2021-04-13 20:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0015_application_id_to_prepared_pdf'),
]
operations = [
migrations.RemoveField(
model_name='application',
name='prepared_pdf',
),
migrations.AddField(
model_name='application',
name='version',
field=models.CharField(blank=True, max_length=32, null=True),
),
migrations.AddField(
model_name='preparedpdf',
name='pdf_type',
field=models.CharField(blank=True, max_length=32, null=True),
),
migrations.AddField(
model_name='preparedpdf',
name='version',
field=models.CharField(blank=True, max_length=32, null=True),
),
migrations.AddConstraint(
model_name='preparedpdf',
constraint=models.UniqueConstraint(fields=('application_id', 'pdf_type'), name='unique_pdf_type_application_id'),
),
]
| 0 | 991 | 23 |
6e420ed3b6ec4029fdd935f505d00b2f84d651d4 | 588 | py | Python | opencv/pysource/08.trackbars.py | hainguyenvan/images-processing | c7e9701b2261573ad244c0ebc669d60bd3f0a1a9 | [
"MIT"
] | null | null | null | opencv/pysource/08.trackbars.py | hainguyenvan/images-processing | c7e9701b2261573ad244c0ebc669d60bd3f0a1a9 | [
"MIT"
] | null | null | null | opencv/pysource/08.trackbars.py | hainguyenvan/images-processing | c7e9701b2261573ad244c0ebc669d60bd3f0a1a9 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
cap = cv2.VideoCapture(0)
cv2.namedWindow("frame")
cv2.createTrackbar("test", "frame", 50, 500, nothing)
cv2.createTrackbar("color/gray", "frame", 0, 1, nothing)
while True:
ret, frame = cap.read()
if not ret:
break
test = cv2.getTrackbarPos("test", "frame")
font = cv2.FONT_HERSHEY_COMPLEX
cv2.putText(frame, str(test), (50, 150), font, 4, (0, 0, 255))
s = cv2.getTrackbarPos("color/gray", "frame")
if s == 0:
pass
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
| 21 | 66 | 0.62585 | import cv2
import numpy as np
def nothing(x):
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow("frame")
cv2.createTrackbar("test", "frame", 50, 500, nothing)
cv2.createTrackbar("color/gray", "frame", 0, 1, nothing)
while True:
ret, frame = cap.read()
if not ret:
break
test = cv2.getTrackbarPos("test", "frame")
font = cv2.FONT_HERSHEY_COMPLEX
cv2.putText(frame, str(test), (50, 150), font, 4, (0, 0, 255))
s = cv2.getTrackbarPos("color/gray", "frame")
if s == 0:
pass
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
| 3 | 0 | 23 |
bb15b0bcc5bb96af358e4b9a602b2adad72a8a8e | 32,496 | py | Python | src/test/xos/xosTest.py | huseyinbolt/cord-tester | ed9b79916e6326a45bfaf3227b8ff922d76df4f1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/test/xos/xosTest.py | huseyinbolt/cord-tester | ed9b79916e6326a45bfaf3227b8ff922d76df4f1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/test/xos/xosTest.py | huseyinbolt/cord-tester | ed9b79916e6326a45bfaf3227b8ff922d76df4f1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import subprocess
from docker import Client
from itertools import chain
from nose.tools import *
from CordContainer import *
from CordTestUtils import log_test as log
import threading
import time
import os
import json
import pexpect
import urllib
log.setLevel('INFO')
flatten = lambda l: chain.from_iterable(l)
| 60.066543 | 177 | 0.75597 |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import subprocess
from docker import Client
from itertools import chain
from nose.tools import *
from CordContainer import *
from CordTestUtils import log_test as log
import threading
import time
import os
import json
import pexpect
import urllib
log.setLevel('INFO')
flatten = lambda l: chain.from_iterable(l)
class xos_exchange(unittest.TestCase):
dckr = Client()
test_path = os.path.dirname(os.path.realpath(__file__))
XOS_BASE_CONTAINER_IMAGE = 'xosproject/xos-base:latest'
XOS_BASE_CONTAINER_NAME = 'xos-base'
XOS_BASE_CONTAINER_PORTS = [8000]
XOS_SYN_OPENSTACK_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-openstack'
XOS_SYN_OPENSTACK_CONTAINER_NAME = 'xos-synchronizer'
XOS_SYN_OPENSTACK_CONTAINER_PORTS = [8000]
XOS_POSTGRESQL_CONTAINER_IMAGE = 'xosproject/xos-postgres'
XOS_POSTGRESQL_CONTAINER_NAME = 'xos-db-postgres'
XOS_POSTGRESQL_CONTAINER_PORTS = [5432]
XOS_SYNDICATE_MS_CONTAINER_IMAGE = 'xosproject/syndicate-ms'
XOS_SYNDICATE_MS_CONTAINER_NAME = 'xos-syndicate-ms'
XOS_SYNDICATE_MS_CONTAINER_PORTS = [8080]
XOS_SYNCHRONIZER_VTR_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-vtr'
XOS_SYNCHRONIZER_VTR_CONTAINER_NAME = 'xos-synchronizer-vtr'
XOS_SYNCHRONIZER_VTR_CONTAINER_PORTS = [8080]
XOS_SYNCHRONIZER_VSG_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-vsg'
XOS_SYNCHRONIZER_VSG_CONTAINER_NAME = 'xos-synchronizer-vsg'
XOS_SYNCHRONIZER_VSG_CONTAINER_PORTS = [8080]
XOS_SYNCHRONIZER_ONOS_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-onos'
XOS_SYNCHRONIZER_ONOS_CONTAINER_NAME = 'xos-synchronizer-onos'
XOS_SYNCHRONIZER_ONOS_CONTAINER_PORTS = [8080]
XOS_SYNCHRONIZER_FABRIC_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-fabric'
XOS_SYNCHRONIZER_FABRIC_CONTAINER_NAME = 'xos-synchronizer-fabric'
XOS_SYNCHRONIZER_FABRIC_CONTAINER_PORTS = [8080]
XOS_SYNCHRONIZER_VTN_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-vtn'
XOS_SYNCHRONIZER_VTN_CONTAINER_NAME = 'xos-synchronizer-vtn'
XOS_SYNCHRONIZER_VTN_CONTAINER_PORTS = [8080]
XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-onboarding'
XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_NAME = 'xos-synchronizer-onboarding'
XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_PORTS = [8080]
XOS_API_ERROR_STRING_MATCH_1 = 'The resource you\'re looking for doesn\'t exist'
XOS_API_ERROR_STRING_MATCH_2 = 'Application Error'
XOS_API_UTILS_POST_LOGIN = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/login/'
#XOS_API_UTILS_GET_PORTFORWARDING = 'https://private-anon-873978896e-xos.apiary-mock.com/api/portforwarding/port'
XOS_API_UTILS_GET_PORT_FORWARDING = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/portforwarding/'
XOS_API_UTILS_GET_SLICES_PLUS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/slicesplus/'
XOS_API_UTILS_GET_SYNCHRONIZER = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/synchronizer/'
XOS_API_UTILS_GET_ONBOARDING_STATUS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/onboarding/service/ready'
XOS_API_UTILS_POST_TOSCA_RECIPE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/tosca/run/'
XOS_API_UTILS_GET_SSH_KEYS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/sshkeys/'
XOS_API_TENANT_GET_ALL_SUBSCRIBERS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/'
XOS_API_TENANT_GET_SUBSCRIBER_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/'
XOS_API_TENANT_DELETE_SUBSCRIBER = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/'
XOS_API_TENANT_GET_SUBSCRIBER_FEATURE_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/'
XOS_API_TENANT_GET_READ_SUBSCRIBER_UPLINK_SPEED = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/uplink_speed/'
XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_UPLINK_SPEED = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/uplink_speed/'
XOS_API_TENANT_GET_READ_SUBSCRIBER_DOWNLINK_SPEED = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/downlink_speed/'
XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_DOWNLINK_SPEED = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/downlink_speed/'
XOS_API_TENANT_GET_READ_SUBSCRIBER_FEATURE_CDN = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/cdn/'
XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_FEATURE_CDN = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/cdn/'
XOS_API_TENANT_GET_READ_SUBSCRIBER_FEATURE_UVERSE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/uverse/'
XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_FEATURE_UVERSE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/uverse/'
XOS_API_TENANT_GET_READ_SUBSCRIBER_FEATURE_STATUS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/status/'
XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_FEATURE_STATUS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/status/'
XOS_API_TENANT_GET_ALL_TRUCKROLL = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/truckroll/truckroll_id/'
XOS_API_TENANT_POST_CREATE_TRUCKROLL = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/truckroll/truckroll_id/'
XOS_API_TENANT_GET_TRUCKROLL_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/truckroll/truckroll_id/'
XOS_API_TENANT_DELETE_TRUCKROLL_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/truckroll/truckroll_id/'
XOS_API_TENANT_GET_ALL_vOLT = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/volt/volt_id/'
XOS_API_TENANT_POST_CREATE_vOLT = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/volt/volt_id/'
XOS_API_TENANT_GET_vOLT_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/volt/volt_id/'
XOS_API_TENANT_GET_ALL_ONOS_APPS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/onos/app/'
XOS_API_SERVICE_GET_ALL_EXAMPLE_SERVICE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/service/exampleservice/'
XOS_API_SERVICE_GET_ALL_ONOS_SERVICE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/service/onos/'
XOS_API_SERVICE_GET_ALL_vSG = 'https://private-anon-873978896e-xos.apiary-mock.com/api/service/vsg/'
XOS_API_CORE_GET_ALL_DEPLOYMENTS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/deployments/id/'
XOS_API_CORE_POST_CREATE_DEPLOYMENTS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/deployments/id/'
XOS_API_CORE_GET_DEPLOYMENT_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/deployments/id/'
XOS_API_CORE_DELETE_DEPLOYMENTS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/deployments/id/'
XOS_API_CORE_GET_ALL_FLAVORS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/flavoryys/id/'
XOS_API_CORE_POST_CREATE_FLAVORS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/flavors/id/'
XOS_API_CORE_GET_FLAVOR_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/flavors/id/'
XOS_API_CORE_DELETE_FLAVORS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/flavors/id/'
XOS_API_CORE_GET_ALL_INSTANCES = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/instances/'
XOS_API_CORE_POST_CREATE_INSTANCES = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/instances/?no_hyperlinks=1'
XOS_API_CORE_GET_INSTANCE_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/instances/id/'
XOS_API_CORE_DELETE_INSTANCES= 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/instances/id/'
XOS_API_CORE_GET_ALL_NODES = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/nodes/id/'
XOS_API_CORE_GET_ALL_SERVICES = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/services/id/'
XOS_API_CORE_POST_CREATE_SERVICE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/services/id/'
XOS_API_CORE_GET_SERVICE_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/services/id/'
XOS_API_CORE_DELETE_SERVICE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/services/id/'
XOS_API_CORE_GET_ALL_SITES = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/sites/'
XOS_API_CORE_GET_SITES_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/sites/id/'
XOS_API_CORE_GET_ALL_SLICES = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/slices/id/'
XOS_API_CORE_GET_ALL_USERS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/users/id/'
def setUp(self):
''' Activate the XOS containers'''
self.maxDiff = None ##for assert_equal compare outputs on failure
def tearDown(self):
'''Deactivate the xos containers'''
log.info('Tear down setup')
self.CURRENT_PORT_NUM = 4
def exists(self, name):
return '/{0}'.format(name) in list(flatten(n['Names'] for n in self.dckr.containers()))
def img_exists(self, image):
cnt = filter(lambda c: c['Image'] == image, self.dckr.containers())
return image in [ctn['RepoTags'][0] if ctn['RepoTags'] else '' for ctn in self.dckr.images()]
def xos_containers_check(self, name, image):
if self.exists(name) != True:
if name == self.XOS_BASE_CONTAINER_NAME:
log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
xosBase = Xos_base(prefix = Container.IMAGE_PREFIX, update = False)
if name == self.XOS_SYN_OPENSTACK_CONTAINER_NAME:
log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
xosSynOpenstack = XosSynchronizerOpenstack(prefix = Container.IMAGE_PREFIX, update = False)
if name == self.XOS_POSTGRESQL_CONTAINER_NAME:
log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
xosPostgresql = XosPostgresql(prefix = Container.IMAGE_PREFIX, update = False)
if name == self.XOS_SYNDICATE_MS_CONTAINER_NAME:
log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
xosSyndicateMs = XosSyndicateMs(prefix = Container.IMAGE_PREFIX, update = False)
if name == self.XOS_SYNCHRONIZER_VTR_CONTAINER_NAME:
log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
xosSynOpenstack = XosSyncVtr(prefix = Container.IMAGE_PREFIX, update = False)
if name == self.XOS_SYNCHRONIZER_VSG_CONTAINER_NAME:
log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
xosSynOpenstack = XosSyncVsg(prefix = Container.IMAGE_PREFIX, update = False)
if name == self.XOS_SYNCHRONIZER_ONOS_CONTAINER_NAME:
log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
xosSynOpenstack = XosSyncOnos(prefix = Container.IMAGE_PREFIX, update = False)
if name == self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_NAME:
log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
xosSynOpenstack = XosSyncFabric(prefix = Container.IMAGE_PREFIX, update = False)
if name == self.XOS_SYNCHRONIZER_VTN_CONTAINER_NAME:
log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
xosSynOpenstack = XosSyncVtn(prefix = Container.IMAGE_PREFIX, update = False)
if name == self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_NAME:
log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
xosSynOpenstack = XosSynchronizerOnboarding(prefix = Container.IMAGE_PREFIX, update = False)
if self.img_exists(image) != True:
log.info('%s container image is not built on host' %name)
assert_equal(False, True)
if self.exists(name) != True:
log.info('%s container image is build on host' %name)
assert_equal(False, True)
def container_status(self, image, name):
''' This function is checking that container is up and running'''
self.xos_containers_check(name, image)
container_info = self.dckr.containers(filters ={'name':name, 'status':'running'})
log.info('Xos container info= %s' %container_info)
if not container_info:
## forcely failing test case
log.info('%s container is not running, container info %s' %(name,container_info))
assert_equal(False, True)
else:
container_status = container_info[0]['Status']
log.info('Xos container status= %s' %container_status)
assert_equal(container_status.split(' ')[0], 'Up')
return container_info
def container_ping(self, image, name):
''' This function is checking if container is reachable '''
container_info = self.container_status(image= image, name= name)
container_ip = container_info[0]['NetworkSettings']['Networks']['bridge']['IPAddress']
ping_status = os.system('ping {} -c 3'.format(container_ip))
if ping_status != 0:
log.info('%s container is not reachable, response %s = '%(name,ping_status))
assert_equal(ping_status, 0)
log.info('%s container is not reachable, response = %s'%(name,ping_status))
assert_equal(ping_status, 0)
def container_listening_ports_info(self, image, name, ports_list):
''' This function is checking that container ports are as excpeted '''
container_public_ports = []
container_info = self.container_status(image= image, name= name)
container_ports = container_info[0]['Ports']
container_public_ports.append(container_ports[0]['PublicPort'])
log.info('%s container is listening on these ports = %s'%(name,container_ports))
log.info('%s container is listening on these public ports = %s'%(name,container_public_ports))
for n in range(0,len(ports_list)):
port = ports_list[n]
if port in container_public_ports:
assert_equal(True, True)
else:
log.info('%s container is not listening on %s port which is not expected' %(name,n))
assert_equal(False, True)
def container_stop_start(self):
''' This function is checking if container is stopped and started running again'''
def validate_url_response_data(self, url):
''' This function is checking url responce and cross check errors on it output '''
response = urllib.urlopen(url)
data = response.read()
log.info('This is PORT FORWARDING URL reponse data {}'.format(data))
if not data:
log.info('{} Url did not returned any output from opencloud setup'.format(url))
assert_equal(True, False)
if self.XOS_API_ERROR_STRING_MATCH_1 in data:
log.info('Not an expected output from url'.format(url))
assert_equal(True, False)
if self.XOS_API_ERROR_STRING_MATCH_2 in data:
log.info('Not an expected output from url'.format(url))
assert_equal(True, False)
@nottest
def test_xos_base_container_status(self):
self.container_status(image = self.XOS_BASE_CONTAINER_IMAGE, name = self.XOS_BASE_CONTAINER_NAME)
@nottest
def test_xos_base_container_ping(self):
self.container_ping(image = self.XOS_BASE_CONTAINER_IMAGE, name = self.XOS_BASE_CONTAINER_NAME)
@nottest
def test_xos_base_container_listening_ports(self):
self.container_listening_ports_info(image = self.XOS_BASE_CONTAINER_IMAGE, name = self.XOS_BASE_CONTAINER_NAME,
ports_list = self.XOS_BASE_CONTAINER_PORTS)
def test_xos_sync_openstack_container_status(self):
self.container_status(image = self.XOS_SYN_OPENSTACK_CONTAINER_IMAGE, name = self.XOS_SYN_OPENSTACK_CONTAINER_NAME)
def test_xos_sync_openstack_container_ping(self):
self.container_ping(image = self.XOS_SYN_OPENSTACK_CONTAINER_IMAGE, name = self.XOS_SYN_OPENSTACK_CONTAINER_NAME)
def test_xos_sync_openstack_container_listening_ports(self):
self.container_listening_ports_info(image = self.XOS_SYN_OPENSTACK_CONTAINER_IMAGE,
name = self.XOS_SYN_OPENSTACK_CONTAINER_NAME,
ports_list = self.XOS_SYN_OPENSTACK_CONTAINER_PORTS)
def test_xos_postgresql_container_status(self):
self.container_status(image = self.XOS_POSTGRESQL_CONTAINER_IMAGE, name = self.XOS_POSTGRESQL_CONTAINER_NAME)
def test_xos_postgresql_container_ping(self):
self.container_ping(image = self.XOS_POSTGRESQL_CONTAINER_IMAGE, name = self.XOS_POSTGRESQL_CONTAINER_NAME)
def test_xos_postgresql_container_listening_ports(self):
self.container_listening_ports_info(image = self.XOS_POSTGRESQL_CONTAINER_IMAGE,
name = self.XOS_POSTGRESQL_CONTAINER_NAME,
ports_list = self.XOS_POSTGRESQL_CONTAINER_PORTS)
def test_xos_syndicate_ms_container_status(self):
self.container_status(image = self.XOS_SYNDICATE_MS_CONTAINER_IMAGE, name = self.XOS_SYNDICATE_MS_CONTAINER_NAME)
def test_xos_syndicate_ms_container_ping(self):
self.container_ping(image = self.XOS_SYNDICATE_MS_CONTAINER_IMAGE, name = self.XOS_SYNDICATE_MS_CONTAINER_NAME)
def test_xos_syndicate_ms_container_listening_ports(self):
self.container_listening_ports_info(image = self.XOS_SYNDICATE_MS_CONTAINER_IMAGE,
name = self.XOS_SYNDICATE_MS_CONTAINER_NAME,
ports_list = self.XOS_SYNDICATE_MS_CONTAINER_PORTS)
@nottest
def test_xos_sync_vtr_container_status(self):
self.container_status(image = self.XOS_SYNCHRONIZER_VTR_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_VTR_CONTAINER_NAME)
@nottest
def test_xos_sync_vtr_container_ping(self):
self.container_ping(image = self.XOS_SYNCHRONIZER_VTR_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_VTR_CONTAINER_NAME)
@nottest
def ztest_xos_sync_vtr_container_listening_ports(self):
self.container_listening_ports_info(image = self.XOS_SYNCHRONIZER_VTR_CONTAINER_IMAGE,
name = self.XOS_SYNCHRONIZER_VTR_CONTAINER_NAME,
ports_list = self.XOS_SYNCHRONIZER_VTR_CONTAINER_PORTS)
@nottest
def test_xos_sync_vsg_container_status(self):
self.container_status(image = self.XOS_SYNCHRONIZER_VSG_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_VSG_CONTAINER_NAME)
@nottest
def test_xos_sync_vsg_container_ping(self):
self.container_ping(image = self.XOS_SYNCHRONIZER_VSG_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_VSG_CONTAINER_NAME)
@nottest
def test_xos_sync_vsg_container_listening_ports(self):
self.container_listening_ports_info(image = self.XOS_SYNCHRONIZER_VSG_CONTAINER_IMAGE,
name = self.XOS_SYNCHRONIZER_VSG_CONTAINER_NAME,
ports_list = self.XOS_SYNCHRONIZER_VSG_CONTAINER_PORTS)
@nottest
def test_xos_sync_onos_container_status(self):
self.container_status(image = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_NAME)
@nottest
def test_xos_sync_onos_container_ping(self):
self.container_ping(image = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_NAME)
@nottest
def test_xos_sync_onos_container_listening_ports(self):
self.container_listening_ports_info(image = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_IMAGE,
name = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_NAME,
ports_list = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_PORTS)
@nottest
def test_xos_sync_fabric_container_status(self):
self.container_status(image = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_NAME)
@nottest
def test_xos_sync_fabric_container_ping(self):
self.container_ping(image = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_NAME)
@nottest
def test_xos_sync_fabric_container_listening_ports(self):
self.container_listening_ports_info(image = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_IMAGE,
name = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_NAME,
ports_list = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_PORTS)
@nottest
def test_xos_sync_vtn_container_status(self):
self.container_status(image = self.XOS_SYNCHRONIZER_VTN_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_VTN_CONTAINER_NAME)
@nottest
def test_xos_sync_vtn_container_ping(self):
self.container_ping(image = self.XOS_SYNCHRONIZER_VTN_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_VTN_CONTAINER_NAME)
@nottest
def test_xos_sync_vtn_container_listening_ports(self):
self.container_listening_ports_info(image = self.XOS_SYNCHRONIZER_VTN_CONTAINER_IMAGE,
name = self.XOS_SYNCHRONIZER_VTN_CONTAINER_NAME,
ports_list = self.XOS_SYNCHRONIZER_VTN_CONTAINER_PORTS)
def test_xos_sync_onboarding_container_status(self):
self.container_status(image = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_IMAGE)
def test_xos_sync_onboarding_container_ping(self):
self.container_ping(image = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_IMAGE)
def test_xos_sync_onboarding_container_listening_ports(self):
self.container_listening_ports_info(image = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_IMAGE,
name = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_NAME,
ports_list = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_PORTS)
def test_xos_api_post_login(self):
response = urllib.urlopen(self.XOS_API_UTILS_POST_LOGIN)
data = response.read()
def test_xos_api_get_utils_port_forwarding(self):
self.validate_url_response_data(url = self.XOS_API_UTILS_GET_PORT_FORWARDING)
def test_xos_api_get_utils_slices_plus(self):
self.validate_url_response_data(url = self.XOS_API_UTILS_GET_SLICES_PLUS)
def test_xos_api_get_utils_synchronizer(self):
self.validate_url_response_data(url = self.XOS_API_UTILS_GET_SYNCHRONIZER)
def test_xos_api_get_utils_onboarding_status(self):
self.validate_url_response_data(url = self.XOS_API_UTILS_GET_ONBOARDING_STATUS)
def test_xos_api_post_utils_tosca_recipe(self):
self.validate_url_response_data(url = self.XOS_API_UTILS_POST_TOSCA_RECIPE)
def test_xos_api_get_utils_ssh_keys(self):
self.validate_url_response_data(url = self.XOS_API_UTILS_GET_SSH_KEYS)
def test_xos_api_get_tenant_all_subscribers(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_ALL_SUBSCRIBERS)
def test_xos_api_get_tenant_subscribers_details(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_SUBSCRIBER_DETAILS)
def test_xos_api_get_tenant_subscriber_delete(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_DELETE_SUBSCRIBER)
def test_xos_api_get_tenant_subscribers_feature_details(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_SUBSCRIBER_FEATURE_DETAILS)
def test_xos_api_get_tenant_read_subscribers_feature_uplink_speed(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_READ_SUBSCRIBER_UPLINK_SPEED)
def test_xos_api_tenant_put_update_subscribers_feature_uplink_speed(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_UPLINK_SPEED)
def test_xos_api_get_tenant_read_subscribers_feature_downlink_speed(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_READ_SUBSCRIBER_DOWNLINK_SPEED)
def test_xos_api_tenant_put_update_subscribers_feature_downlink_speed(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_DOWNLINK_SPEED)
def test_xos_api_get_tenant_read_subscribers_feature_cdn(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_READ_SUBSCRIBER_FEATURE_CDN)
def test_xos_api_tenant_put_update_subscribers_feature_cdn(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_FEATURE_CDN)
def test_xos_api_get_tenant_read_subscribers_feature_uverse(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_READ_SUBSCRIBER_FEATURE_UVERSE)
def test_xos_api_tenant_put_update_subscribers_feature_uverse(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_FEATURE_UVERSE)
def test_xos_api_get_tenant_read_subscribers_feature_status(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_READ_SUBSCRIBER_FEATURE_STATUS)
def test_xos_api_tenant_put_update_subscribers_feature_status(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_FEATURE_STATUS)
def test_xos_api_tenant_get_all_truckroll(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_ALL_TRUCKROLL)
def test_xos_api_tenant_post_create_truckroll(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_POST_CREATE_TRUCKROLL)
def test_xos_api_tenant_get_truckroll_details(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_TRUCKROLL_DETAILS)
def test_xos_api_tenant_delete_trucroll(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_DELETE_TRUCKROLL_DETAILS)
def test_xos_api_tenant_get_all_volt(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_ALL_vOLT)
def test_xos_api_tenant_post_create_vOLT(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_POST_CREATE_vOLT)
def test_xos_api_tenant_get_volt_details(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_vOLT_DETAILS)
def test_xos_api_tenant_get_all_onos_apps(self):
self.validate_url_response_data(url = self.XOS_API_TENANT_GET_ALL_ONOS_APPS)
def test_xos_api_service_get_all_example_service(self):
self.validate_url_response_data(url = self.XOS_API_SERVICE_GET_ALL_EXAMPLE_SERVICE)
def test_xos_api_service_get_all_onos_service(self):
self.validate_url_response_data(url = self.XOS_API_SERVICE_GET_ALL_ONOS_SERVICE)
def test_xos_api_service_get_all_vsg(self):
self.validate_url_response_data(url = self.XOS_API_SERVICE_GET_ALL_vSG)
def test_xos_api_core_get_all_deployments(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_DEPLOYMENTS)
def test_xos_api_core_post_create_deployments(self):
self.validate_url_response_data(url = self.XOS_API_CORE_POST_CREATE_DEPLOYMENTS)
def test_xos_api_core_get_deployment_details(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_DEPLOYMENT_DETAILS)
def test_xos_api_core_delete_deployment(self):
self.validate_url_response_data(url = self.XOS_API_CORE_DELETE_DEPLOYMENTS)
def test_xos_api_core_get_all_flavors(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_FLAVORS)
def test_xos_api_core_post_create_flavors(self):
self.validate_url_response_data(url = self.XOS_API_CORE_POST_CREATE_FLAVORS)
def test_xos_api_core_get_flavor_details(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_FLAVOR_DETAILS)
def test_xos_api_core_delete_flavors(self):
self.validate_url_response_data(url = self.XOS_API_CORE_DELETE_FLAVORS)
def test_xos_api_core_get_all_instances(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_INSTANCES)
def test_xos_api_core_post_create_instances(self):
self.validate_url_response_data(url = self.XOS_API_CORE_POST_CREATE_INSTANCES)
def test_xos_api_core_get_instance_details(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_INSTANCE_DETAILS)
def test_xos_api_core_delete_instance(self):
self.validate_url_response_data(url = self.XOS_API_CORE_DELETE_INSTANCES)
def test_xos_api_core_get_all_nodes(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_NODES)
def test_xos_api_core_get_all_services(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_SERVICES)
def test_xos_api_core_post_create_service(self):
self.validate_url_response_data(url = self.XOS_API_CORE_POST_CREATE_SERVICE)
def test_xos_api_core_get_service_details(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_SERVICE_DETAILS)
def test_xos_api_core_delete_service(self):
self.validate_url_response_data(url = self.XOS_API_CORE_DELETE_SERVICE)
def test_xos_api_core_get_all_sites(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_SITES)
def test_xos_api_core_get_site_details(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_SITES_DETAILS)
def test_xos_api_core_get_all_slices(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_SLICES)
def test_xos_api_core_get_all_users(self):
self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_USERS)
| 15,937 | 15,022 | 23 |
e143d64b0edf7e20756c5107443336d3756cb153 | 7,236 | py | Python | src/madpack/utilities.py | spring-operator/madlib | daf67f81b608396d8e3c04a9bf9890449a0a5b3c | [
"Apache-2.0"
] | null | null | null | src/madpack/utilities.py | spring-operator/madlib | daf67f81b608396d8e3c04a9bf9890449a0a5b3c | [
"Apache-2.0"
] | null | null | null | src/madpack/utilities.py | spring-operator/madlib | daf67f81b608396d8e3c04a9bf9890449a0a5b3c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Madpack utilities
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
from itertools import izip_longest
import re
import unittest
def is_rev_gte(left, right):
""" Return if left >= right
Args:
@param left: list. Revision numbers in a list form (as returned by
_get_rev_num).
@param right: list. Revision numbers in a list form (as returned by
_get_rev_num).
Returns:
Boolean
If left and right are all numeric then regular list comparison occurs.
If either one contains a string, then comparison occurs till both have int.
First list to have a string is considered smaller
(including if the other does not have an element in corresponding index)
Examples:
[1, 9, 0] >= [1, 9, 0]
[1, 9, 1] >= [1, 9, 0]
[1, 9, 1] >= [1, 9]
[1, 10] >= [1, 9, 1]
[1, 9, 0] >= [1, 9, 0, 'dev']
[1, 9, 1] >= [1, 9, 0, 'dev']
[1, 9, 0] >= [1, 9, 'dev']
[1, 9, 'rc'] >= [1, 9, 'dev']
[1, 9, 'rc', 0] >= [1, 9, 'dev', 1]
[1, 9, 'rc', '1'] >= [1, 9, 'rc', '1']
"""
if all_numeric(left) and all_numeric(right):
return left >= right
else:
for i, (l_e, r_e) in enumerate(izip_longest(left, right)):
if isinstance(l_e, int) and isinstance(r_e, int):
if l_e == r_e:
continue
else:
return l_e > r_e
elif isinstance(l_e, int) or isinstance(r_e, int):
# [1, 9, 0] > [1, 9, 'dev']
# [1, 9, 0] > [1, 9]
return isinstance(l_e, int)
else:
# both are not int
if r_e is None:
# [1, 9, 'dev'] < [1, 9]
return False
else:
return l_e is None or left[i:] >= right[i:]
return True
# ----------------------------------------------------------------------
def get_rev_num(rev):
"""
Convert version string into number for comparison
@param rev version text
It is expected to follow Semantic Versioning (semver.org)
Valid inputs:
1.9.0, 1.10.0, 2.5.0
1.0.0-alpha, 1.0.0-alpha.1, 1.0.0-0.3.7, 1.0.0-x.7.z.92
1.0.0+20130313144700, 1.0.0-beta+exp.sha.5114f85
Returns:
List. The numeric parts of version string are converted to int and
non-numeric parts are returned as is.
Invalid versions strings returned as [0]
Examples:
'1.9.0' -> [1, 9, 0]
'1.9' -> [1, 9, 0]
'1.9-alpha' -> [1, 9, 'alpha']
'1.9-alpha+dc65ab' -> [1, 9, 'alpha', 'dc65ab']
'a.123' -> [0]
"""
try:
rev_parts = re.split('[-+_]', rev)
# get numeric part of the version string
num = [int(i) for i in rev_parts[0].split('.')]
num += [0] * (3 - len(num)) # normalize num to be of length 3
# get identifier part of the version string
if len(rev_parts) > 1:
num.extend(map(str, rev_parts[1:]))
if not num:
num = [0]
return num
except (ValueError, TypeError):
# invalid revision
return [0]
# ------------------------------------------------------------------------------
# -----------------------------------------------------------------------
# Unit tests
# -----------------------------------------------------------------------
if __name__ == "__main__":
unittest.main()
| 40.424581 | 87 | 0.52681 | #!/usr/bin/env python#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Madpack utilities
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
from itertools import izip_longest
import re
import unittest
def is_rev_gte(left, right):
""" Return if left >= right
Args:
@param left: list. Revision numbers in a list form (as returned by
_get_rev_num).
@param right: list. Revision numbers in a list form (as returned by
_get_rev_num).
Returns:
Boolean
If left and right are all numeric then regular list comparison occurs.
If either one contains a string, then comparison occurs till both have int.
First list to have a string is considered smaller
(including if the other does not have an element in corresponding index)
Examples:
[1, 9, 0] >= [1, 9, 0]
[1, 9, 1] >= [1, 9, 0]
[1, 9, 1] >= [1, 9]
[1, 10] >= [1, 9, 1]
[1, 9, 0] >= [1, 9, 0, 'dev']
[1, 9, 1] >= [1, 9, 0, 'dev']
[1, 9, 0] >= [1, 9, 'dev']
[1, 9, 'rc'] >= [1, 9, 'dev']
[1, 9, 'rc', 0] >= [1, 9, 'dev', 1]
[1, 9, 'rc', '1'] >= [1, 9, 'rc', '1']
"""
def all_numeric(l):
return not l or all(isinstance(i, int) for i in l)
if all_numeric(left) and all_numeric(right):
return left >= right
else:
for i, (l_e, r_e) in enumerate(izip_longest(left, right)):
if isinstance(l_e, int) and isinstance(r_e, int):
if l_e == r_e:
continue
else:
return l_e > r_e
elif isinstance(l_e, int) or isinstance(r_e, int):
# [1, 9, 0] > [1, 9, 'dev']
# [1, 9, 0] > [1, 9]
return isinstance(l_e, int)
else:
# both are not int
if r_e is None:
# [1, 9, 'dev'] < [1, 9]
return False
else:
return l_e is None or left[i:] >= right[i:]
return True
# ----------------------------------------------------------------------
def get_rev_num(rev):
"""
Convert version string into number for comparison
@param rev version text
It is expected to follow Semantic Versioning (semver.org)
Valid inputs:
1.9.0, 1.10.0, 2.5.0
1.0.0-alpha, 1.0.0-alpha.1, 1.0.0-0.3.7, 1.0.0-x.7.z.92
1.0.0+20130313144700, 1.0.0-beta+exp.sha.5114f85
Returns:
List. The numeric parts of version string are converted to int and
non-numeric parts are returned as is.
Invalid versions strings returned as [0]
Examples:
'1.9.0' -> [1, 9, 0]
'1.9' -> [1, 9, 0]
'1.9-alpha' -> [1, 9, 'alpha']
'1.9-alpha+dc65ab' -> [1, 9, 'alpha', 'dc65ab']
'a.123' -> [0]
"""
try:
rev_parts = re.split('[-+_]', rev)
# get numeric part of the version string
num = [int(i) for i in rev_parts[0].split('.')]
num += [0] * (3 - len(num)) # normalize num to be of length 3
# get identifier part of the version string
if len(rev_parts) > 1:
num.extend(map(str, rev_parts[1:]))
if not num:
num = [0]
return num
except (ValueError, TypeError):
# invalid revision
return [0]
# ------------------------------------------------------------------------------
# -----------------------------------------------------------------------
# Unit tests
# -----------------------------------------------------------------------
class RevTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_rev_num(self):
# not using assertGreaterEqual to keep Python 2.6 compatibility
self.assertTrue(get_rev_num('4.3.10') >= get_rev_num('4.3.5'))
self.assertTrue(get_rev_num('1.9.10-dev') >= get_rev_num('1.9.9'))
self.assertNotEqual(get_rev_num('1.9.10-dev'), get_rev_num('1.9.10'))
self.assertEqual(get_rev_num('1.9.10'), [1, 9, 10])
self.assertEqual(get_rev_num('abc1.9.10'), [0])
self.assertEqual(get_rev_num('1.0.0+20130313144700'),
[1, 0, 0, '20130313144700'])
self.assertNotEqual(get_rev_num('1.0.0+20130313144700'),
get_rev_num('1.0.0-beta+exp.sha.5114f85'))
def test_is_rev_gte(self):
# 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
# 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0
self.assertTrue(is_rev_gte([], []))
self.assertTrue(is_rev_gte([1, 9], [1, None]))
self.assertFalse(is_rev_gte([1, None], [1, 9]))
self.assertTrue(is_rev_gte(get_rev_num('4.3.10'), get_rev_num('4.3.5')))
self.assertTrue(is_rev_gte(get_rev_num('1.9.0'), get_rev_num('1.9.0')))
self.assertTrue(is_rev_gte(get_rev_num('1.9.1'), get_rev_num('1.9.0')))
self.assertTrue(is_rev_gte(get_rev_num('1.9.1'), get_rev_num('1.9')))
self.assertTrue(is_rev_gte(get_rev_num('1.9.0'), get_rev_num('1.9.0-dev')))
self.assertTrue(is_rev_gte(get_rev_num('1.9.1'), get_rev_num('1.9-dev')))
self.assertTrue(is_rev_gte(get_rev_num('1.9.0-dev'), get_rev_num('1.9.0-dev')))
self.assertTrue(is_rev_gte([1, 9, 'rc', 1], [1, 9, 'dev', 0]))
self.assertFalse(is_rev_gte(get_rev_num('1.9.1'), get_rev_num('1.10')))
self.assertFalse(is_rev_gte([1, 9, 'dev', 1], [1, 9, 'rc', 0]))
self.assertFalse(is_rev_gte([1, 9, 'alpha'], [1, 9, 'alpha', 0]))
self.assertFalse(is_rev_gte([1, 9, 'alpha', 1], [1, 9, 'alpha', 'beta']))
self.assertFalse(is_rev_gte([1, 9, 'alpha.1'], [1, 9, 'alpha.beta']))
self.assertFalse(is_rev_gte([1, 9, 'beta', 2], [1, 9, 'beta', 4]))
self.assertFalse(is_rev_gte([1, 9, 'beta', '1'], [1, 9, 'rc', '0']))
self.assertFalse(is_rev_gte([1, 9, 'rc', 1], [1, 9, 0]))
self.assertFalse(is_rev_gte([1, 9, '0.2'], [1, 9, '0.3']))
self.assertFalse(is_rev_gte([1, 9, 'build2'], [1, 9, 'build3']))
self.assertFalse(is_rev_gte(get_rev_num('1.0.0+20130313144700'),
get_rev_num('1.0.0-beta+exp.sha.5114f85')))
if __name__ == "__main__":
unittest.main()
| 2,587 | 12 | 156 |
c6e6b13899e482e6b8b0447f59dd9ecafd2a2cb4 | 1,060 | py | Python | python/atexit/offical_doc.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | python/atexit/offical_doc.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | python/atexit/offical_doc.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | """
The following simple example demonstarates how a module can initialize a counter from a file
when it is imported and save the counter's updated value automatically when the program terminates
without relying on the application making an explicit call into this module at termination
"""
try:
with open("counterfile") as infile:
_count = int(infile.read())
except FileNotFoundError:
_count = 0
import atexit
atexit.register(savecounter)
"""
Positional and keyword arguments may also be passwd to `register()`
"""
# Positinoal arguments
atexit.register(goodbye, 'Denny', 'nice')
# Keyword arguments
atexit.register(goodbye, adjective='nice', name='Donny')
"""
Usage as a decorator
"""
@atexit.register | 25.238095 | 98 | 0.718868 | """
The following simple example demonstarates how a module can initialize a counter from a file
when it is imported and save the counter's updated value automatically when the program terminates
without relying on the application making an explicit call into this module at termination
"""
try:
with open("counterfile") as infile:
_count = int(infile.read())
except FileNotFoundError:
_count = 0
def incrcounter(n):
global _count
_count = _count + n
def savecounter():
with open('counterfile', 'w') as outfile:
outfile.write('%d' % _count)
import atexit
atexit.register(savecounter)
"""
Positional and keyword arguments may also be passwd to `register()`
"""
def goodbye(name, adjective):
print('Goodbye, %s, it was %s to meet you.' % (name, adjective))
# Positinoal arguments
atexit.register(goodbye, 'Denny', 'nice')
# Keyword arguments
atexit.register(goodbye, adjective='nice', name='Donny')
"""
Usage as a decorator
"""
@atexit.register
def goodbye2():
print('You are now leaving the Python selector') | 244 | 0 | 90 |
485eee6e44c1156b88a4e8aaf153e8f3f0b30f8a | 31,914 | py | Python | pandas/tools/plotting.py | takluyver/pandas | 6c820b4b1a3b945d52cffbd9a4d40a582c077b5d | [
"BSD-3-Clause"
] | null | null | null | pandas/tools/plotting.py | takluyver/pandas | 6c820b4b1a3b945d52cffbd9a4d40a582c077b5d | [
"BSD-3-Clause"
] | null | null | null | pandas/tools/plotting.py | takluyver/pandas | 6c820b4b1a3b945d52cffbd9a4d40a582c077b5d | [
"BSD-3-Clause"
] | null | null | null | # being a bit too dynamic
# pylint: disable=E1101
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import numpy as np
def scatter_matrix(frame, alpha=0.5, figsize=None, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
df = frame._get_numeric_data()
n = df.columns.size
fig, axes = _subplots(nrows=n, ncols=n, figsize=figsize)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
for i, a in zip(range(n), df.columns):
for j, b in zip(range(n), df.columns):
axes[i, j].scatter(df[b], df[a], alpha=alpha, **kwds)
axes[i, j].yaxis.set_visible(False)
axes[i, j].xaxis.set_visible(False)
# setup labels
if i == 0 and j % 2 == 1:
axes[i, j].set_xlabel(b, visible=True)
axes[i, j].xaxis.set_visible(True)
axes[i, j].xaxis.set_ticks_position('top')
axes[i, j].xaxis.set_label_position('top')
if i == n - 1 and j % 2 == 0:
axes[i, j].set_xlabel(b, visible=True)
axes[i, j].xaxis.set_visible(True)
axes[i, j].xaxis.set_ticks_position('bottom')
axes[i, j].xaxis.set_label_position('bottom')
if j == 0 and i % 2 == 0:
axes[i, j].set_ylabel(a, visible=True)
axes[i, j].yaxis.set_visible(True)
axes[i, j].yaxis.set_ticks_position('left')
axes[i, j].yaxis.set_label_position('left')
if j == n - 1 and i % 2 == 1:
axes[i, j].set_ylabel(a, visible=True)
axes[i, j].yaxis.set_visible(True)
axes[i, j].yaxis.set_ticks_position('right')
axes[i, j].yaxis.set_label_position('right')
# ensure {x,y}lim off diagonal are the same as diagonal
for i in range(n):
for j in range(n):
if i != j:
axes[i, j].set_xlim(axes[j, j].get_xlim())
axes[i, j].set_ylim(axes[i, i].get_ylim())
return axes
def grouped_hist(data, column=None, by=None, ax=None, bins=50, log=False,
figsize=None, layout=None, sharex=False, sharey=False,
rot=90):
"""
Returns
-------
fig : matplotlib.Figure
"""
# if isinstance(data, DataFrame):
# data = data[column]
fig, axes = _grouped_plot(plot_group, data, column=column,
by=by, sharex=sharex, sharey=sharey,
figsize=figsize, layout=layout, rot=rot)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.3, wspace=0.2)
return fig
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_default_rot = 0
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False}
@property
@cache_readonly
_need_to_set_index = False
def plot_frame(frame=None, subplots=False, sharex=True, sharey=False,
use_index=True,
figsize=None, grid=True, legend=True, rot=None,
ax=None, title=None,
xlim=None, ylim=None, logy=False,
xticks=None, yticks=None,
kind='line',
sort_columns=True, fontsize=None, **kwds):
"""
Make line or bar plot of DataFrame's series with the index on the x-axis
using matplotlib / pylab.
Parameters
----------
subplots : boolean, default False
Make separate subplots for each time series
sharex : boolean, default True
In case subplots=True, share x axis
sharey : boolean, default False
In case subplots=True, share y axis
use_index : boolean, default True
Use index as ticks for x axis
stacked : boolean, default False
If True, create stacked bar plot. Only valid for DataFrame input
sort_columns: boolean, default True
Sort column names to determine plot ordering
title : string
Title to use for the plot
grid : boolean, default True
Axis grid lines
legend : boolean, default True
Place legend on axis subplots
ax : matplotlib axis object, default None
kind : {'line', 'bar', 'barh'}
bar : vertical bar plot
barh : horizontal bar plot
logy : boolean, default False
For line plots, use log scaling on y axis
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
ax_or_axes : matplotlib.AxesSubplot or list of them
"""
kind = kind.lower().strip()
if kind == 'line':
klass = LinePlot
elif kind in ('bar', 'barh'):
klass = BarPlot
else:
raise ValueError('Invalid chart type given %s' % kind)
plot_obj = klass(frame, kind=kind, subplots=subplots, rot=rot,
legend=legend, ax=ax, fontsize=fontsize,
use_index=use_index, sharex=sharex, sharey=sharey,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
title=title, grid=grid, figsize=figsize, logy=logy,
sort_columns=sort_columns, **kwds)
plot_obj.generate()
plot_obj.draw()
if subplots:
return plot_obj.axes
else:
return plot_obj.axes[0]
def plot_series(series, label=None, kind='line', use_index=True, rot=None,
xticks=None, yticks=None, xlim=None, ylim=None,
ax=None, style=None, grid=True, logy=False, **kwds):
"""
Plot the input series with the index on the x-axis using matplotlib
Parameters
----------
label : label argument to provide to plot
kind : {'line', 'bar'}
rot : int, default 30
Rotation for tick labels
use_index : boolean, default True
Plot index as axis tick labels
ax : matplotlib axis object
If not passed, uses gca()
style : string, default matplotlib default
matplotlib line style to use
ax : matplotlib axis object
If not passed, uses gca()
kind : {'line', 'bar', 'barh'}
bar : vertical bar plot
barh : horizontal bar plot
logy : boolean, default False
For line plots, use log scaling on y axis
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks
kwds : keywords
Options to pass to matplotlib plotting method
Notes
-----
See matplotlib documentation online for more on this subject
"""
if kind == 'line':
klass = LinePlot
elif kind in ('bar', 'barh'):
klass = BarPlot
if ax is None:
ax = _gca()
# is there harm in this?
if label is None:
label = series.name
plot_obj = klass(series, kind=kind, rot=rot, logy=logy,
ax=ax, use_index=use_index, style=style,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
legend=False, grid=grid, label=label, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.ax
# if use_index:
# # custom datetime/interval plotting
# from pandas import IntervalIndex, DatetimeIndex
# if isinstance(self.index, IntervalIndex):
# return tsp.tsplot(self)
# if isinstance(self.index, DatetimeIndex):
# offset = self.index.freq
# name = datetools._newOffsetNames.get(offset, None)
# if name is not None:
# try:
# code = datetools._interval_str_to_code(name)
# s_ = Series(self.values,
# index=self.index.to_interval(freq=code),
# name=self.name)
# tsp.tsplot(s_)
# except:
# pass
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None):
"""
Make a box plot from DataFrame column optionally grouped b ysome columns or
other inputs
Parameters
----------
data : DataFrame
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
fontsize : int or string
Returns
-------
ax : matplotlib.axes.AxesSubplot
"""
if column == None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
if not isinstance(by, (list, tuple)):
by = [by]
fig, axes = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize)
# Return axes in multiplot case, maybe revisit later # 985
ret = axes
else:
if ax is None:
ax = _gca()
fig = ax.get_figure()
data = data._get_numeric_data()
if columns:
cols = columns
else:
cols = data.columns
keys = [_stringify(x) for x in cols]
# Return boxplot dict in single plot case
bp = ax.boxplot(list(data[cols].values.T))
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
ax.grid(grid)
ret = bp
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return ret
def scatter_plot(data, x, y, by=None, ax=None, figsize=None):
"""
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(str(y))
ax.set_xlabel(str(x))
return fig
def hist_frame(data, grid=True, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None, ax=None, **kwds):
"""
Draw Histogram the DataFrame's series using matplotlib / pylab.
Parameters
----------
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
kwds : other plotting keyword arguments
To be passed to hist function
"""
import matplotlib.pyplot as plt
n = len(data.columns)
k = 1
while k ** 2 < n:
k += 1
_, axes = _subplots(nrows=k, ncols=k, ax=ax)
for i, col in enumerate(com._try_sort(data.columns)):
ax = axes[i / k][i % k]
ax.hist(data[col].dropna().values, **kwds)
ax.set_title(col)
ax.grid(grid)
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
def hist_series(self, ax=None, grid=True, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None, **kwds):
"""
Draw histogram of the input series using matplotlib
Parameters
----------
ax : matplotlib axis object
If not passed, uses gca()
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
kwds : keywords
To be passed to the actual plotting function
Notes
-----
See matplotlib documentation online for more on this
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
values = self.dropna().values
ax.hist(values, **kwds)
ax.grid(grid)
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return ax
# copied from matplotlib/pyplot.py for compatibility with matplotlib < 1.0
def _subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, ax=None, **fig_kw):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
nrows : int
Number of rows of the subplot grid. Defaults to 1.
ncols : int
Number of columns of the subplot grid. Defaults to 1.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharex : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing at all is done: the returned axis object is always
a 2-d array contaning Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
fig_kw : dict
Dict with keywords passed to the figure() call. Note that all keywords
not recognized above will be automatically included here.
ax : Matplotlib axis object, default None
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one supblot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
nplots = nrows*ncols
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw['sharex'] = ax0
if sharey:
subplot_kw['sharey'] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
axarr[i] = fig.add_subplot(nrows, ncols, i+1, **subplot_kw)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots==1:
return fig, axarr[0]
else:
return fig, axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
return fig, axarr.reshape(nrows, ncols)
if __name__ == '__main__':
# import pandas.rpy.common as com
# sales = com.load_data('sanfrancisco.home.sales', package='nutshell')
# top10 = sales['zip'].value_counts()[:10].index
# sales2 = sales[sales.zip.isin(top10)]
# _ = scatter_plot(sales2, 'squarefeet', 'price', by='zip')
# plt.show()
import matplotlib.pyplot as plt
import pandas.tools.plotting as plots
import pandas.core.frame as fr
reload(plots)
reload(fr)
from pandas.core.frame import DataFrame
data = DataFrame([[3, 6, -5], [4, 8, 2], [4, 9, -6],
[4, 9, -3], [2, 5, -1]],
columns=['A', 'B', 'C'])
data.plot(kind='barh', stacked=True)
plt.show()
| 30.221591 | 79 | 0.567776 | # being a bit too dynamic
# pylint: disable=E1101
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import numpy as np
def scatter_matrix(frame, alpha=0.5, figsize=None, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
df = frame._get_numeric_data()
n = df.columns.size
fig, axes = _subplots(nrows=n, ncols=n, figsize=figsize)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
for i, a in zip(range(n), df.columns):
for j, b in zip(range(n), df.columns):
axes[i, j].scatter(df[b], df[a], alpha=alpha, **kwds)
axes[i, j].yaxis.set_visible(False)
axes[i, j].xaxis.set_visible(False)
# setup labels
if i == 0 and j % 2 == 1:
axes[i, j].set_xlabel(b, visible=True)
axes[i, j].xaxis.set_visible(True)
axes[i, j].xaxis.set_ticks_position('top')
axes[i, j].xaxis.set_label_position('top')
if i == n - 1 and j % 2 == 0:
axes[i, j].set_xlabel(b, visible=True)
axes[i, j].xaxis.set_visible(True)
axes[i, j].xaxis.set_ticks_position('bottom')
axes[i, j].xaxis.set_label_position('bottom')
if j == 0 and i % 2 == 0:
axes[i, j].set_ylabel(a, visible=True)
axes[i, j].yaxis.set_visible(True)
axes[i, j].yaxis.set_ticks_position('left')
axes[i, j].yaxis.set_label_position('left')
if j == n - 1 and i % 2 == 1:
axes[i, j].set_ylabel(a, visible=True)
axes[i, j].yaxis.set_visible(True)
axes[i, j].yaxis.set_ticks_position('right')
axes[i, j].yaxis.set_label_position('right')
# ensure {x,y}lim off diagonal are the same as diagonal
for i in range(n):
for j in range(n):
if i != j:
axes[i, j].set_xlim(axes[j, j].get_xlim())
axes[i, j].set_ylim(axes[i, i].get_ylim())
return axes
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def hist(data, column, by=None, ax=None, fontsize=None):
keys, values = zip(*data.groupby(by)[column])
if ax is None:
ax = _gca()
ax.boxplot(values)
ax.set_xticklabels(keys, rotation=0, fontsize=fontsize)
return ax
def grouped_hist(data, column=None, by=None, ax=None, bins=50, log=False,
figsize=None, layout=None, sharex=False, sharey=False,
rot=90):
"""
Returns
-------
fig : matplotlib.Figure
"""
# if isinstance(data, DataFrame):
# data = data[column]
def plot_group(group, ax):
ax.hist(group.dropna(), bins=bins)
fig, axes = _grouped_plot(plot_group, data, column=column,
by=by, sharex=sharex, sharey=sharey,
figsize=figsize, layout=layout, rot=rot)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.3, wspace=0.2)
return fig
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_default_rot = 0
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=True,
sharey=False, use_index=True,
figsize=None, grid=True, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=True, fontsize=None, **kwds):
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
self.rot = rot
self.grid = grid
self.legend = legend
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
self.kwds = kwds
def _iter_data(self):
from pandas.core.frame import DataFrame
from pandas.core.series import Series
if isinstance(self.data, (Series, np.ndarray)):
yield com._stringify(self.label), np.asarray(self.data)
elif isinstance(self.data, DataFrame):
df = self.data
if self.sort_columns:
columns = com._try_sort(df.columns)
else:
columns = df.columns
for col in columns:
empty = df[col].count() == 0
# is this right?
values = df[col].values if not empty else np.zeros(len(df))
col = com._stringify(col)
yield col, values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._post_plot_logic()
self._adorn_subplots()
def _args_adjust(self):
pass
def _setup_subplots(self):
if self.subplots:
nrows, ncols = self._get_layout()
if self.ax is None:
fig, axes = _subplots(nrows=nrows, ncols=ncols,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize)
else:
fig, axes = _subplots(nrows=nrows, ncols=ncols,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
self.ax = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
axes = [self.ax]
self.fig = fig
self.axes = axes
def _get_layout(self):
return (len(self.data.columns), 1)
def _compute_plot_data(self):
pass
def _make_plot(self):
raise NotImplementedError
def _post_plot_logic(self):
pass
def _adorn_subplots(self):
if self.subplots:
to_adorn = self.axes
else:
to_adorn = [self.ax]
# todo: sharex, sharey handling?
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.legend and not self.subplots:
self.ax.legend(loc='best')
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.ax.set_title(self.title)
if self._need_to_set_index:
xticklabels = [_stringify(key) for key in self.data.index]
for ax_ in self.axes:
# ax_.set_xticks(self.xticks)
ax_.set_xticklabels(xticklabels, rotation=self.rot)
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64')
if self.use_index:
if index.is_numeric() or is_datetype:
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
else:
self._need_to_set_index = True
x = range(len(index))
else:
x = range(len(index))
return x
class LinePlot(MPLPlot):
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
def _get_plot_function(self):
if self.logy:
plotf = self.plt.Axes.semilogy
elif self.logx:
plotf = self.plt.Axes.semilogx
elif self.loglog:
plotf = self.plt.Axes.loglog
else:
plotf = self.plt.Axes.plot
return plotf
def _make_plot(self):
# this is slightly deceptive
x = self._get_xticks()
plotf = self._get_plot_function()
for i, (label, y) in enumerate(self._iter_data()):
if self.subplots:
ax = self.axes[i]
style = 'k'
else:
style = '' # empty string ignored
ax = self.ax
if self.style:
style = self.style
plotf(ax, x, y, style, label=label, **self.kwds)
ax.grid(self.grid)
def _post_plot_logic(self):
df = self.data
if self.subplots and self.legend:
self.axes[0].legend(loc='best')
condition = (df.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex))
for ax in self.axes:
if condition:
format_date_labels(ax)
class BarPlot(MPLPlot):
_default_rot = {'bar' : 90, 'barh' : 0}
def __init__(self, data, **kwargs):
self.stacked = kwargs.pop('stacked', False)
self.ax_pos = np.arange(len(data)) + 0.25
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.rot is None:
self.rot = self._default_rot[self.kind]
if self.fontsize is None:
if len(self.data) < 10:
self.fontsize = 12
else:
self.fontsize = 10
@property
def bar_f(self):
if self.kind == 'bar':
def f(ax, x, y, w, start=None, **kwds):
return ax.bar(x, y, w, bottom=start, **kwds)
elif self.kind == 'barh':
def f(ax, x, y, w, start=None, **kwds):
return ax.barh(x, y, w, left=start, **kwds)
else:
raise NotImplementedError
return f
def _make_plot(self):
colors = 'brgyk'
rects = []
labels = []
ax = self.axes[0]
bar_f = self.bar_f
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data()):
kwds = self.kwds.copy()
if 'color' not in kwds:
kwds['color'] = colors[i % len(colors)]
if self.subplots:
ax = self.axes[i]
rect = bar_f(ax, self.ax_pos, y, 0.5, start=pos_prior,
linewidth=1, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
rect = bar_f(ax, self.ax_pos, y, 0.5, start=start,
label=label, linewidth=1, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
rect = bar_f(ax, self.ax_pos + i * 0.75 / K, y, 0.75 / K,
start=pos_prior, label=label, **kwds)
rects.append(rect)
labels.append(label)
if self.legend and not self.subplots:
patches =[r[0] for r in rects]
# Legend to the right of the plot
# ax.legend(patches, labels, bbox_to_anchor=(1.05, 1),
# loc=2, borderaxespad=0.)
# self.fig.subplots_adjust(right=0.80)
ax.legend(patches, labels, loc='best')
self.fig.subplots_adjust(top=0.8)
def _post_plot_logic(self):
for ax in self.axes:
str_index = [_stringify(key) for key in self.data.index]
if self.kind == 'bar':
ax.set_xlim([self.ax_pos[0] - 0.25, self.ax_pos[-1] + 1])
ax.set_xticks(self.ax_pos + 0.375)
ax.set_xticklabels(str_index, rotation=self.rot,
fontsize=self.fontsize)
ax.axhline(0, color='k', linestyle='--')
else:
# horizontal bars
ax.set_ylim([self.ax_pos[0] - 0.25, self.ax_pos[-1] + 1])
ax.set_yticks(self.ax_pos + 0.375)
ax.set_yticklabels(str_index, rotation=self.rot,
fontsize=self.fontsize)
ax.axvline(0, color='k', linestyle='--')
class BoxPlot(MPLPlot):
pass
class HistPlot(MPLPlot):
pass
def plot_frame(frame=None, subplots=False, sharex=True, sharey=False,
use_index=True,
figsize=None, grid=True, legend=True, rot=None,
ax=None, title=None,
xlim=None, ylim=None, logy=False,
xticks=None, yticks=None,
kind='line',
sort_columns=True, fontsize=None, **kwds):
"""
Make line or bar plot of DataFrame's series with the index on the x-axis
using matplotlib / pylab.
Parameters
----------
subplots : boolean, default False
Make separate subplots for each time series
sharex : boolean, default True
In case subplots=True, share x axis
sharey : boolean, default False
In case subplots=True, share y axis
use_index : boolean, default True
Use index as ticks for x axis
stacked : boolean, default False
If True, create stacked bar plot. Only valid for DataFrame input
sort_columns: boolean, default True
Sort column names to determine plot ordering
title : string
Title to use for the plot
grid : boolean, default True
Axis grid lines
legend : boolean, default True
Place legend on axis subplots
ax : matplotlib axis object, default None
kind : {'line', 'bar', 'barh'}
bar : vertical bar plot
barh : horizontal bar plot
logy : boolean, default False
For line plots, use log scaling on y axis
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
ax_or_axes : matplotlib.AxesSubplot or list of them
"""
kind = kind.lower().strip()
if kind == 'line':
klass = LinePlot
elif kind in ('bar', 'barh'):
klass = BarPlot
else:
raise ValueError('Invalid chart type given %s' % kind)
plot_obj = klass(frame, kind=kind, subplots=subplots, rot=rot,
legend=legend, ax=ax, fontsize=fontsize,
use_index=use_index, sharex=sharex, sharey=sharey,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
title=title, grid=grid, figsize=figsize, logy=logy,
sort_columns=sort_columns, **kwds)
plot_obj.generate()
plot_obj.draw()
if subplots:
return plot_obj.axes
else:
return plot_obj.axes[0]
def plot_series(series, label=None, kind='line', use_index=True, rot=None,
xticks=None, yticks=None, xlim=None, ylim=None,
ax=None, style=None, grid=True, logy=False, **kwds):
"""
Plot the input series with the index on the x-axis using matplotlib
Parameters
----------
label : label argument to provide to plot
kind : {'line', 'bar'}
rot : int, default 30
Rotation for tick labels
use_index : boolean, default True
Plot index as axis tick labels
ax : matplotlib axis object
If not passed, uses gca()
style : string, default matplotlib default
matplotlib line style to use
ax : matplotlib axis object
If not passed, uses gca()
kind : {'line', 'bar', 'barh'}
bar : vertical bar plot
barh : horizontal bar plot
logy : boolean, default False
For line plots, use log scaling on y axis
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks
kwds : keywords
Options to pass to matplotlib plotting method
Notes
-----
See matplotlib documentation online for more on this subject
"""
if kind == 'line':
klass = LinePlot
elif kind in ('bar', 'barh'):
klass = BarPlot
if ax is None:
ax = _gca()
# is there harm in this?
if label is None:
label = series.name
plot_obj = klass(series, kind=kind, rot=rot, logy=logy,
ax=ax, use_index=use_index, style=style,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
legend=False, grid=grid, label=label, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.ax
# if use_index:
# # custom datetime/interval plotting
# from pandas import IntervalIndex, DatetimeIndex
# if isinstance(self.index, IntervalIndex):
# return tsp.tsplot(self)
# if isinstance(self.index, DatetimeIndex):
# offset = self.index.freq
# name = datetools._newOffsetNames.get(offset, None)
# if name is not None:
# try:
# code = datetools._interval_str_to_code(name)
# s_ = Series(self.values,
# index=self.index.to_interval(freq=code),
# name=self.name)
# tsp.tsplot(s_)
# except:
# pass
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None):
"""
Make a box plot from DataFrame column optionally grouped b ysome columns or
other inputs
Parameters
----------
data : DataFrame
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
fontsize : int or string
Returns
-------
ax : matplotlib.axes.AxesSubplot
"""
def plot_group(grouped, ax):
keys, values = zip(*grouped)
keys = [_stringify(x) for x in keys]
ax.boxplot(values)
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
if column == None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
if not isinstance(by, (list, tuple)):
by = [by]
fig, axes = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize)
# Return axes in multiplot case, maybe revisit later # 985
ret = axes
else:
if ax is None:
ax = _gca()
fig = ax.get_figure()
data = data._get_numeric_data()
if columns:
cols = columns
else:
cols = data.columns
keys = [_stringify(x) for x in cols]
# Return boxplot dict in single plot case
bp = ax.boxplot(list(data[cols].values.T))
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
ax.grid(grid)
ret = bp
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return ret
def _stringify(x):
if isinstance(x, tuple):
return '|'.join(str(y) for y in x)
else:
return str(x)
def format_date_labels(ax):
# mini version of autofmt_xdate
try:
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
except Exception: # pragma: no cover
pass
def scatter_plot(data, x, y, by=None, ax=None, figsize=None):
"""
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(str(y))
ax.set_xlabel(str(x))
return fig
def hist_frame(data, grid=True, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None, ax=None, **kwds):
"""
Draw Histogram the DataFrame's series using matplotlib / pylab.
Parameters
----------
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
kwds : other plotting keyword arguments
To be passed to hist function
"""
import matplotlib.pyplot as plt
n = len(data.columns)
k = 1
while k ** 2 < n:
k += 1
_, axes = _subplots(nrows=k, ncols=k, ax=ax)
for i, col in enumerate(com._try_sort(data.columns)):
ax = axes[i / k][i % k]
ax.hist(data[col].dropna().values, **kwds)
ax.set_title(col)
ax.grid(grid)
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
def hist_series(self, ax=None, grid=True, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None, **kwds):
"""
Draw histogram of the input series using matplotlib
Parameters
----------
ax : matplotlib axis object
If not passed, uses gca()
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
kwds : keywords
To be passed to the actual plotting function
Notes
-----
See matplotlib documentation online for more on this
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
values = self.dropna().values
ax.hist(values, **kwds)
ax.grid(grid)
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return ax
def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
figsize=None, sharex=True, sharey=True, layout=None,
rot=0, ax=None):
from pandas.core.frame import DataFrame
# allow to specify mpl default with 'default'
if figsize is None or figsize == 'default':
figsize = (10, 5) # our default
grouped = data.groupby(by)
if column is not None:
grouped = grouped[column]
ngroups = len(grouped)
nrows, ncols = layout or _get_layout(ngroups)
if figsize is None:
# our favorite default beating matplotlib's idea of the
# default size
figsize = (10, 5)
fig, axes = _subplots(nrows=nrows, ncols=ncols, figsize=figsize,
sharex=sharex, sharey=sharey, ax=ax)
ravel_axes = []
for row in axes:
ravel_axes.extend(row)
for i, (key, group) in enumerate(grouped):
ax = ravel_axes[i]
if numeric_only and isinstance(group, DataFrame):
group = group._get_numeric_data()
plotf(group, ax)
ax.set_title(str(key))
return fig, axes
def _grouped_plot_by_column(plotf, data, columns=None, by=None,
numeric_only=True, grid=False,
figsize=None, ax=None):
import matplotlib.pyplot as plt
grouped = data.groupby(by)
if columns is None:
columns = data._get_numeric_data().columns - by
ngroups = len(columns)
nrows, ncols = _get_layout(ngroups)
fig, axes = _subplots(nrows=nrows, ncols=ncols,
sharex=True, sharey=True,
figsize=figsize, ax=ax)
if isinstance(axes, plt.Axes):
ravel_axes = [axes]
else:
ravel_axes = []
for row in axes:
if isinstance(row, plt.Axes):
ravel_axes.append(row)
else:
ravel_axes.extend(row)
for i, col in enumerate(columns):
ax = ravel_axes[i]
gp_col = grouped[col]
plotf(gp_col, ax)
ax.set_title(col)
ax.set_xlabel(str(by))
ax.grid(grid)
byline = by[0] if len(by) == 1 else by
fig.suptitle('Boxplot grouped by %s' % byline)
return fig, axes
def _get_layout(nplots):
if nplots == 1:
return (1, 1)
elif nplots == 2:
return (1, 2)
elif nplots < 4:
return (2, 2)
k = 1
while k ** 2 < nplots:
k += 1
if (k - 1) * k >= nplots:
return k, (k - 1)
else:
return k, k
# copied from matplotlib/pyplot.py for compatibility with matplotlib < 1.0
def _subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, ax=None, **fig_kw):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
nrows : int
Number of rows of the subplot grid. Defaults to 1.
ncols : int
Number of columns of the subplot grid. Defaults to 1.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharex : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing at all is done: the returned axis object is always
a 2-d array contaning Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
fig_kw : dict
Dict with keywords passed to the figure() call. Note that all keywords
not recognized above will be automatically included here.
ax : Matplotlib axis object, default None
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one supblot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
nplots = nrows*ncols
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw['sharex'] = ax0
if sharey:
subplot_kw['sharey'] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
axarr[i] = fig.add_subplot(nrows, ncols, i+1, **subplot_kw)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots==1:
return fig, axarr[0]
else:
return fig, axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
return fig, axarr.reshape(nrows, ncols)
if __name__ == '__main__':
# import pandas.rpy.common as com
# sales = com.load_data('sanfrancisco.home.sales', package='nutshell')
# top10 = sales['zip'].value_counts()[:10].index
# sales2 = sales[sales.zip.isin(top10)]
# _ = scatter_plot(sales2, 'squarefeet', 'price', by='zip')
# plt.show()
import matplotlib.pyplot as plt
import pandas.tools.plotting as plots
import pandas.core.frame as fr
reload(plots)
reload(fr)
from pandas.core.frame import DataFrame
data = DataFrame([[3, 6, -5], [4, 8, 2], [4, 9, -6],
[4, 9, -3], [2, 5, -1]],
columns=['A', 'B', 'C'])
data.plot(kind='barh', stacked=True)
plt.show()
| 12,887 | 221 | 840 |
cc122db42745c9467f5007b75022190b121caef5 | 30 | py | Python | flask/src/flask_app/web_app/routes/__init__.py | AlTosterino/FlaskVsFastAPI | db826b1bd19216ff1ae7bdba518244178d8f59bf | [
"MIT"
] | 5 | 2021-04-16T20:00:09.000Z | 2022-01-23T23:39:03.000Z | flask/src/flask_app/web_app/routes/__init__.py | AlTosterino/FlaskVsFastAPI | db826b1bd19216ff1ae7bdba518244178d8f59bf | [
"MIT"
] | null | null | null | flask/src/flask_app/web_app/routes/__init__.py | AlTosterino/FlaskVsFastAPI | db826b1bd19216ff1ae7bdba518244178d8f59bf | [
"MIT"
] | null | null | null | from .news import news_router
| 15 | 29 | 0.833333 | from .news import news_router
| 0 | 0 | 0 |
87edcc5488a97578f51befd6a32d044abb6c5615 | 4,630 | py | Python | watchdog.py | tbma2014us/ops-tools | d6368fedbbf1fea06aea9fc657087b9371dd00cc | [
"Apache-2.0"
] | 2 | 2020-08-13T15:33:35.000Z | 2022-01-22T09:59:15.000Z | watchdog.py | tbma2014us/ops-tools | d6368fedbbf1fea06aea9fc657087b9371dd00cc | [
"Apache-2.0"
] | null | null | null | watchdog.py | tbma2014us/ops-tools | d6368fedbbf1fea06aea9fc657087b9371dd00cc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Monitors the availability of the TCP port, runs external process if port is unavailable,
but not more frequently than cooldown timeout. Persistent information is stored in /tmp
"""
import argparse
import contextlib
import datetime
import logging.handlers
import os
import random
import shelve
import shlex
import socket
import subprocess
import sys
import tempfile
import time
logger = logging.getLogger()
# noinspection PyTypeChecker
if __name__ == '__main__':
main()
| 37.33871 | 112 | 0.62743 | #!/usr/bin/env python
"""
Monitors the availability of the TCP port, runs external process if port is unavailable,
but not more frequently than cooldown timeout. Persistent information is stored in /tmp
"""
import argparse
import contextlib
import datetime
import logging.handlers
import os
import random
import shelve
import shlex
import socket
import subprocess
import sys
import tempfile
import time
logger = logging.getLogger()
# noinspection PyTypeChecker
class ArgsParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
kwargs.setdefault(
'description',
'Monitors the availability of the TCP port, runs external process if port is unavailable,\n'
'but not more frequently than cooldown timeout.\n')
argparse.ArgumentParser.__init__(self, *args, **kwargs)
self.formatter_class = argparse.RawTextHelpFormatter
self.epilog = '''
For example:
{} -a 192.168.1.1 -p 80 -c "restart service"
'''.format(__file__)
self.options = None
self.add_argument('-a', '--address', dest='service_address', default='192.168.1.230')
self.add_argument('-p', '--port', dest='service_port', type=int, default=22)
self.add_argument('-r', '--retry_after', type=int, dest='seconds', default=61)
self.add_argument('-c', '--command', dest='command', default='echo restarting')
self.add_argument('-cd', '--cooldown', type=int, dest='hours', default=4)
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def parse_args(self, *args, **kwargs):
options = argparse.ArgumentParser.parse_args(self, *args, **kwargs)
options.log_format = '%(filename)s:%(lineno)s[%(process)d]: %(levelname)s %(message)s'
options.command_cooldown = datetime.timedelta(hours=options.hours)
options.name = os.path.splitext(__file__)[0]
self.options = options
return options
def execute(command):
try:
exec_errors = subprocess.call(shlex.split(command))
if not exec_errors:
logging.info('Watchdog executed "%s"' % shlex.split(command))
return datetime.datetime.now()
except OSError as e:
logging.error('Exec error: %s' % e)
raise SystemExit(1)
def start_logging(_log_format):
_logger = logging.getLogger()
try:
handler = logging.handlers.SysLogHandler(address='/dev/log')
except socket.error:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(_log_format))
_logger.setLevel(logging.INFO)
_logger.addHandler(handler)
return _logger
def main(args=None):
args = args or sys.argv[1:]
my_parser = ArgsParser()
options = my_parser.parse_args(args)
global logger
logger = start_logging(options.log_format)
try:
logging.info('Starting watchdog run')
with contextlib.closing(shelve.open(os.path.join(tempfile.gettempdir(), options.name), 'c')) as shelf, \
contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(5)
connect_errors = None
executed = shelf.get(options.name)
for _ in range(0, random.randint(3, 5)):
connect_errors = sock.connect_ex(
(options.service_address, options.service_port))
if not connect_errors:
break
else:
logging.info('Trying to connect to %s:%s' % (
options.service_address, options.service_port))
time.sleep(options.seconds)
else:
logging.info('Cannot connect to %s:%s, issuing exec' %
(options.service_address, options.service_port))
if connect_errors and not executed:
shelf[options.name] = execute(options.command)
elif connect_errors and executed:
if datetime.datetime.now() - executed >= options.command_cooldown:
shelf[options.name] = execute(options.command)
else:
next_run = (executed + options.command_cooldown).strftime('%Y-%m-%d %H:%M:%S')
logging.info('Watchdog exec cooldown is in effect until %s' % next_run)
else:
logging.info('%s:%s OK' % (options.service_address, options.service_port))
shelf.clear()
except KeyboardInterrupt:
sys.exit(0)
if __name__ == '__main__':
main()
| 3,922 | 21 | 171 |
79bd071e364ccf486ca5c00e1267b45035455585 | 580 | py | Python | source/domain/__init__.py | seemir/stressa | 7c3b178cf13f74ee010dbd44ce99188de3862ef7 | [
"MIT"
] | null | null | null | source/domain/__init__.py | seemir/stressa | 7c3b178cf13f74ee010dbd44ce99188de3862ef7 | [
"MIT"
] | 337 | 2019-09-27T12:26:14.000Z | 2022-03-31T04:13:04.000Z | source/domain/__init__.py | seemir/stressa | 7c3b178cf13f74ee010dbd44ce99188de3862ef7 | [
"MIT"
] | 1 | 2019-11-08T13:40:30.000Z | 2019-11-08T13:40:30.000Z | # -*- coding: utf-8 -*-
"""
Domain model implementation
"""
__author__ = 'Samir Adrik'
__email__ = 'samir.adrik@gmail.com'
from .expenses import Expenses
from .currency import Currency
from .address import Address
from .percent import Percent
from .family import Family
from .entity import Entity
from .person import Person
from .female import Female
from .amount import Amount
from .mobile import Mobile
from .share import Share
from .money import Money
from .email import Email
from .value import Value
from .phone import Phone
from .male import Male
from .name import Name
| 20.714286 | 35 | 0.775862 | # -*- coding: utf-8 -*-
"""
Domain model implementation
"""
__author__ = 'Samir Adrik'
__email__ = 'samir.adrik@gmail.com'
from .expenses import Expenses
from .currency import Currency
from .address import Address
from .percent import Percent
from .family import Family
from .entity import Entity
from .person import Person
from .female import Female
from .amount import Amount
from .mobile import Mobile
from .share import Share
from .money import Money
from .email import Email
from .value import Value
from .phone import Phone
from .male import Male
from .name import Name
| 0 | 0 | 0 |
34e238b61d6b5235a3780f6205e3668b3046d1b3 | 11,994 | py | Python | src/experimental/analyse_results.py | neildhir/DCBO | dc8a1df096cc83b37f45f9e546ed7f59ad693f33 | [
"MIT"
] | 6 | 2021-11-07T08:08:55.000Z | 2022-01-27T18:21:34.000Z | src/experimental/analyse_results.py | neildhir/DCBO | dc8a1df096cc83b37f45f9e546ed7f59ad693f33 | [
"MIT"
] | null | null | null | src/experimental/analyse_results.py | neildhir/DCBO | dc8a1df096cc83b37f45f9e546ed7f59ad693f33 | [
"MIT"
] | 2 | 2021-12-19T18:09:30.000Z | 2022-01-13T03:04:39.000Z | from typing import Callable, Dict, Tuple
import numpy as np
from numpy import cumsum
from copy import deepcopy
from ..utils.utilities import calculate_best_intervention_and_effect
def get_relevant_results(results: Callable, replicates: int) -> Dict[str, tuple]:
"""
When we get results from a notebook they are in a different format from when we pickle them. This function converts the results into the correct format so that we can analyse them.
Parameters
----------
results : Callable
The results from running the function 'run_methods_replicates()'
replicates : int
How many replicates we used.
Returns
-------
Dict[str, tuple]
A dictionary with the methods on the keys with results from each replicates on the values.
"""
data = {m: [] for m in results}
for m in results:
for r in range(replicates):
data[m].append(
(
results[m][r].per_trial_cost,
results[m][r].optimal_outcome_values_during_trials,
results[m][r].optimal_intervention_sets,
results[m][r].assigned_blanket,
)
)
return data
| 36.678899 | 184 | 0.609221 | from typing import Callable, Dict, Tuple
import numpy as np
from numpy import cumsum
from copy import deepcopy
from ..utils.utilities import calculate_best_intervention_and_effect
def get_relevant_results(results: Callable, replicates: int) -> Dict[str, tuple]:
"""
When we get results from a notebook they are in a different format from when we pickle them. This function converts the results into the correct format so that we can analyse them.
Parameters
----------
results : Callable
The results from running the function 'run_methods_replicates()'
replicates : int
How many replicates we used.
Returns
-------
Dict[str, tuple]
A dictionary with the methods on the keys with results from each replicates on the values.
"""
data = {m: [] for m in results}
for m in results:
for r in range(replicates):
data[m].append(
(
results[m][r].per_trial_cost,
results[m][r].optimal_outcome_values_during_trials,
results[m][r].optimal_intervention_sets,
results[m][r].assigned_blanket,
)
)
return data
def get_mean_and_std(data, t_steps, repeats=5):
out = {key: [] for key in data}
for model in data.keys():
for t in range(t_steps):
tmp = []
for ex in range(repeats):
tmp.append(data[model][ex][t])
tmp = np.vstack(tmp)
out[model].append((tmp.mean(axis=0), tmp.std(axis=0)))
return out
def get_cumulative_cost_mean_and_std(data, t_steps, repeats=5):
out = {key: [] for key in data}
for model in data.keys():
for t in range(t_steps):
tmp = []
for ex in range(repeats):
tmp.append(data[model][ex][t])
tmp = np.vstack(tmp)
# Calculate the cumulative sum here
out[model].append(cumsum(tmp.mean(axis=0)))
return out
def elaborate(
number_of_interventions: int, n_replicates: int, data: dict, best_objective_values: list, T: int
) -> Tuple[dict, dict]:
# Replace initial data point
if number_of_interventions is None:
for model in data:
for r in range(n_replicates):
for t in range(T):
if data[model][r][1][t][0] == 10000000.0:
data[model][r][1][t][0] = data[model][r][1][t][1]
# Aggregate data
per_trial_cost = {model: [] for model in data.keys()}
optimal_outcome_values_during_trials = {model: [] for model in data.keys()}
for i in range(n_replicates):
for model in data:
per_trial_cost[model].append(data[model][i][0])
optimal_outcome_values_during_trials[model].append(data[model][i][1])
# Aggregate data
exp_per_trial_cost = get_cumulative_cost_mean_and_std(per_trial_cost, T, repeats=n_replicates)
exp_optimal_outcome_values_during_trials = get_mean_and_std(
optimal_outcome_values_during_trials, T, repeats=n_replicates
)
for model in exp_per_trial_cost:
if model == "BO" or model == "ABO":
costs = exp_per_trial_cost[model]
values = exp_optimal_outcome_values_during_trials[model]
for t in range(T):
values_t = values[t]
exp_per_trial_cost[model][t] = np.asarray([0] + list(costs[t]))
exp_optimal_outcome_values_during_trials[model][t] = tuple(
[np.asarray([values_t[i][0]] + list(values_t[i])) for i in range(2)]
)
# Clip values so they are not lower than the min
clip_max = 1000
for model in exp_per_trial_cost:
for t in range(T):
clipped = np.clip(
exp_optimal_outcome_values_during_trials[model][t][0], a_min=best_objective_values[t], a_max=clip_max
)
exp_optimal_outcome_values_during_trials[model][t] = (
clipped,
exp_optimal_outcome_values_during_trials[model][t][1],
)
return exp_optimal_outcome_values_during_trials, exp_per_trial_cost
def get_converge_trial(best_objective_values, exp_optimal_outcome_values_during_trials, n_trials, T, n_decimal=1):
where_converge_dict = {method: [None] * T for method in list(exp_optimal_outcome_values_during_trials.keys())}
for method in exp_optimal_outcome_values_during_trials.keys():
for t in range(T):
if isinstance(best_objective_values, dict):
comparison_values = np.mean(np.vstack(best_objective_values[method])[:, t])
else:
comparison_values = best_objective_values[t]
bool_results = np.round(exp_optimal_outcome_values_during_trials[method][t][0], n_decimal) == np.round(
comparison_values, n_decimal
)
if np.all(~np.array(bool_results)):
where_method = n_trials
else:
where_method = np.argmax(bool_results)
where_converge_dict[method][t] = where_method
return where_converge_dict
def get_common_initial_values(
T, data, n_replicates,
):
total_initial_list = []
for t in range(T):
reps_initial_list = []
for r in range(n_replicates):
initial_list = []
for method in list(data.keys()):
values = data[method][r][1][t]
initial = values[0]
if initial == 10000000.0:
initial = values[1]
initial_list.append(initial)
reps_initial_list.append(np.max(initial_list))
total_initial_list.append(reps_initial_list)
return total_initial_list
def get_table_values(dict_gap_summary, T, n_decimal_mean=2, n_decimal_std=2):
total_list_mean = []
for method in dict_gap_summary.keys():
list_method_mean = [method]
list_method_std = [" "]
for t in range(T):
list_method_mean.append(np.round(dict_gap_summary[method][t][0], n_decimal_mean))
std_value = np.round(dict_gap_summary[method][t][1], n_decimal_std)
if std_value == 0.0:
std_value = "0.00"
list_method_std.append("(" + str(std_value) + ")")
total_list_mean.append(list_method_mean)
total_list_mean.append(list_method_std)
return total_list_mean
def count_optimal_intervention_set(n_replicates, T, data, optimal_set):
dict_count = {method: None for method in list(data.keys())}
for method in list(data.keys()):
count_list = [None] * T
for t in range(T):
count_time = 0.0
for r in range(n_replicates):
intervened_set = data[method][r][2][t]
if isinstance(optimal_set, dict):
count_time += int(optimal_set[method][r][t] == intervened_set)
else:
count_time += int(optimal_set[t] == intervened_set)
count_list[t] = count_time
dict_count[method] = count_list
return dict_count
def gap_metric_standard(
T, data, best_objective_values, total_initial_list, n_replicates, n_trials, where_converge_dict=None,
):
dict_gap = {method: [None] * T for method in list(data.keys())}
for method in list(data.keys()):
for t in range(T):
for r in range(n_replicates):
values = data[method][r][1][t]
initial = total_initial_list[t][r]
last = values[-1]
if last - initial == 0.0:
gap = 0.0
else:
gap = np.clip((last - initial) / (best_objective_values[t] - initial), 0.0, 1.0)
if dict_gap[method][t] is None:
dict_gap[method][t] = [gap]
else:
dict_gap[method][t].append(gap)
dict_gap_iters_summary = {method: [None] * T for method in list(data.keys())}
for t in range(T):
for method in data.keys():
percent_iters = (n_trials - where_converge_dict[method][t]) / n_trials
normaliser = 1.0 + (n_trials - 1) / n_trials
values_gap_standard = list((np.asarray(dict_gap[method][t]) + percent_iters) / normaliser)
dict_gap_iters_summary[method][t] = [np.mean(values_gap_standard), np.std(values_gap_standard)]
return dict_gap_iters_summary
def get_stored_blanket(T, data, n_replicates, list_var):
store_blankets = {
model: [[{var: [None] * T for var in list_var} for _ in range(T)] for _ in range(n_replicates)]
for model in data.keys()
}
for method in data.keys():
for r in range(n_replicates):
for t in range(1, T):
values = data[method][r][3]
store_blankets[method][r][t] = deepcopy(store_blankets[method][r][t - 1])
for var in list_var:
store_blankets[method][r][t][var][t - 1] = values[var][t - 1]
if store_blankets[method][r][t]["X"][t - 1] is not None and method in ["CBO", "DCBO"]:
store_blankets[method][r][t]["Z"][t - 1] = None
return store_blankets
def get_optimal_set_value(GT, T, exploration_sets_list):
opt_set_list = [None] * T
opt_values_list = [None] * T
for t in range(T):
values_min = []
for setx in exploration_sets_list:
values_min.append(np.min(GT[t][setx]))
opt_set_index = np.argmin(values_min)
opt_set_list[t] = exploration_sets_list[opt_set_index]
opt_values_list[t] = values_min[opt_set_index]
return opt_set_list, opt_values_list
def get_average_performance_t(data, dict_values, T):
average_metric = {method: [None, None] for method in list(data.keys())}
for method in dict_values.keys():
sum_method = 0.0
sum_method_std = 0.0
for t in range(T):
# if t > 0:
sum_method += dict_values[method][t][0]
sum_method_std += dict_values[method][t][1]
average_metric[method] = [[sum_method / (T), sum_method_std / (T)]]
return average_metric
def store_optimal_set_values(
store_blankets,
data,
n_replicates,
T,
init_sem,
sem,
exploration_sets,
interventional_grids,
intervention_domain,
exploration_sets_dict,
):
optimal_intervention_values = {model: [] for model in data.keys()}
optimal_intervention_sets = {model: [] for model in data.keys()}
for model in data.keys():
for r in range(n_replicates):
GT, _ = get_ground_truth(
deepcopy(store_blankets[model][r]),
T,
init_sem,
sem,
exploration_sets,
interventional_grids,
intervention_domain,
)
opt_set_list, opt_values_list = get_optimal_set_value(GT, T, exploration_sets_dict[model])
optimal_intervention_sets[model].append(opt_set_list)
optimal_intervention_values[model].append(opt_values_list)
return optimal_intervention_sets, optimal_intervention_values
def get_ground_truth(
blanket, T, init_sem, sem, exploration_sets, interventional_grids, intervention_domain,
):
optimal_assigned_blankets = [None] * T
ground_truth = []
for t in range(T):
new_blanket, true_causal_effect = calculate_best_intervention_and_effect(
static_sem=init_sem,
dynamic_sem=sem,
exploration_sets=exploration_sets,
interventional_grids=interventional_grids,
time=t,
intervention_domain=intervention_domain,
blanket=blanket[t],
T=T,
plot=False,
)
if t < T - 1:
optimal_assigned_blankets[t + 1] = new_blanket
ground_truth.append(true_causal_effect)
return ground_truth, optimal_assigned_blankets
| 10,464 | 0 | 299 |
3419bba1c75411baceb526966ccef4570e3255d9 | 30,401 | py | Python | VUsbTools/Log.py | scanlime/vusb-analyzer | 38397212fabf8c3f62503605b833e094a5f1183f | [
"Unlicense"
] | 34 | 2015-01-13T20:04:06.000Z | 2022-03-30T14:29:31.000Z | VUsbTools/Log.py | a-sf-mirror/vusb-analyzer | 23869c07bb0e6bc95946337ae320bf012e372bbc | [
"MIT"
] | 2 | 2015-01-13T20:09:12.000Z | 2017-02-27T12:36:51.000Z | VUsbTools/Log.py | a-sf-mirror/vusb-analyzer | 23869c07bb0e6bc95946337ae320bf012e372bbc | [
"MIT"
] | 15 | 2015-01-13T20:31:15.000Z | 2021-09-02T05:15:04.000Z | #
# VUsbTools.Log
# Micah Elizabeth Scott <micah@vmware.com>
#
# Implements parsers for USB log files. Currently
# this includes slurping usbAnalyzer data out of the
# VMX log, and parsing the XML logs exported by
# Ellisys Visual USB.
#
# Copyright (C) 2005-2010 VMware, Inc. Licensed under the MIT
# License, please see the README.txt. All rights reserved.
#
from __future__ import division
import sys, time, re, os, string, atexit
import xml.sax, Queue, threading, difflib
import gtk, gobject
import traceback, gzip, struct
from VUsbTools import Types
class UsbIOParser(Types.psyobj):
"""Parses USBIO log lines and generates Transaction objects appropriately.
Finished transactions are pushed into the supplied queue.
"""
lineOriented = True
def flush(self):
"""Force any in-progress transactions to be completed. This should be
called when you know the USB analyzer is finished outputting
data, such as when a non-USBIO line appears in the log.
"""
if self.current.dir:
self.eventQueue.put(self.current)
self.current = Types.Transaction()
class TimestampLogParser:
"""Parse a simple format which logs timestamps in nanosecond resolution.
Lines are of the form:
<timestamp> <name> args...
The event name may be 'begin-foo' or 'end-foo' to indicate an event
which executes over a span of time, or simply 'foo' to mark a single
point.
"""
lineOriented = True
class VmxLogParser(UsbIOParser):
"""Read the VMX log, looking for new USBIO lines and parsing them.
"""
frame = None
epoch = None
lineNumber = 0
def parseRelativeTime(self, line):
"""Start the clock when we see our first USB log line"""
t = self.parseTime(line)
if self.epoch is None:
self.epoch = t
return t - self.epoch
_timeCache = (None, None)
def parseTime(self, line):
"""Return a unix-style timestamp for the given line."""
if line[10] != "T":
"""XXX: This assumes the current year, so logs that straddle
years will have a giant discontinuity in timestamps.
"""
timefmt = "%b %d %H:%M:%S"
stamp = line[:15]
usec = line[16:19]
else:
timefmt = "%Y-%m-%dT%H:%M:%S"
stamp = line[:19]
usec = line[20:23]
# Cache the results of strptime. It only changes every
# second, and this was taking more than 50% of our parsing time!
savedStamp, parsed = self._timeCache
if savedStamp != stamp:
parsed = time.strptime(stamp, timefmt)
self._timeCache = stamp, parsed
now = time.localtime()
try:
usec = int(usec)
except ValueError:
usec = 0
return usec / 1000.0 + time.mktime((
now.tm_year, parsed.tm_mon, parsed.tm_mday,
parsed.tm_hour, parsed.tm_min, parsed.tm_sec,
parsed.tm_wday, parsed.tm_yday, parsed.tm_isdst))
def parseInt(attrs, name, default=None):
"""The Ellisys logs include commas in their integers"""
try:
return int(attrs[name].replace(",", ""))
except (KeyError, ValueError):
return default
def parseFloat(attrs, name, default=None):
"""The Ellisys logs include commas and spaces in their floating point numbers"""
try:
return float(attrs[name].replace(",", "").replace(" ", ""))
except (KeyError, ValueError):
return default
class EllisysXmlHandler(xml.sax.handler.ContentHandler):
"""Handles SAX events from an XML log exported by Ellisys
Visual USB. The completed USB transactions are pushed into
the provided completion queue.
"""
frameNumber = None
device = None
endpoint = None
current = None
characterHandler = None
def beginUrb(self, pipe):
"""Simulate a new URB being created on the supplied pipe. This
begins a Down transaction and makes it pending and current.
"""
t = Types.Transaction()
t.dir = 'Down'
t.dev, t.endpt = pipe
t.timestamp = self.timestamp
t.frame = parseInt(self._frameAttrs, 'frameNumber')
t.status = 0
self.pipes[pipe] = t
self.pending[pipe] = t
self.current = t
def flipUrb(self, pipe):
"""Begin the Up phase on a particular pipe. This
completes the Down transaction, and makes an Up
current (but not pending)
"""
del self.pending[pipe]
down = self.pipes[pipe]
self.eventQueue.put(down)
up = Types.Transaction()
up.dir = 'Up'
up.dev, up.endpt = pipe
# Up and Down transactions share setup data, if applicable
if down.hasSetupData():
up.data = down.data[:8]
self.pipes[pipe] = up
self.current = up
def completeUrb(self, pipe, id):
"""Complete the Up phase on a pipe"""
if pipe in self.pending:
self.flipUrb(pipe)
assert pipe in self.pipes
t = self.pipes[pipe]
del self.pipes[pipe]
self.current = None
t.timestamp = self.timestamp
t.frame = parseInt(self._frameAttrs, 'frameNumber')
if id in ('ACK', 'NYET'):
t.status = 0
else:
t.status = id
self.eventQueue.put(t)
Types.psycoBind(EllisysXmlHandler)
class EllisysXmlParser:
"""Parses XML files exported from Ellisys Visual USB. This
is just a glue object that sets up an XML parser and
sends SAX events to the EllisysXmlHandler.
"""
lineOriented = False
class UsbmonLogParser:
"""Parses usbmon log lines and generates Transaction objects appropriately.
Finished transactions are pushed into the supplied queue.
This parser was originally contributed by Christoph Zimmermann.
"""
lineOriented = True
lineNumber = 0
class Follower(threading.Thread):
"""A thread that continuously scans a file, parsing each line"""
pollInterval = 0.1
running = True
progressInterval = 0.2
progressExpiration = 0
class QueueSink:
"""Polls a Queue for new items, via the Glib main loop.
When they're available, calls a callback with them.
"""
interval = 200
timeSlice = 0.25
maxsize = 512
batch = range(10)
def chooseParser(filename):
"""Return an appropriate log parser class for the provided filename.
This implementation does not try to inspect the file's content,
it just looks at the filename's extension.
"""
base, ext = os.path.splitext(filename)
if ext == ".gz":
return chooseParser(base)
if ext == ".xml":
return EllisysXmlParser
if ext == ".tslog":
return TimestampLogParser
if ext == ".mon":
return UsbmonLogParser
return VmxLogParser
| 36.939247 | 87 | 0.542087 | #
# VUsbTools.Log
# Micah Elizabeth Scott <micah@vmware.com>
#
# Implements parsers for USB log files. Currently
# this includes slurping usbAnalyzer data out of the
# VMX log, and parsing the XML logs exported by
# Ellisys Visual USB.
#
# Copyright (C) 2005-2010 VMware, Inc. Licensed under the MIT
# License, please see the README.txt. All rights reserved.
#
from __future__ import division
import sys, time, re, os, string, atexit
import xml.sax, Queue, threading, difflib
import gtk, gobject
import traceback, gzip, struct
from VUsbTools import Types
class UsbIOParser(Types.psyobj):
"""Parses USBIO log lines and generates Transaction objects appropriately.
Finished transactions are pushed into the supplied queue.
"""
lineOriented = True
def __init__(self, eventQueue):
self.current = Types.Transaction()
self.eventQueue = eventQueue
def parse(self, line, timestamp=None, frame=None, lineNumber=None):
tokens = line.split()
finished = None
if tokens[0] in ('Up', 'Down'):
self.flush()
self.current.dir = tokens[0]
self.current.timestamp = timestamp
self.current.frame = frame
self.current.lineNumber = lineNumber
self.parseKeyValuePairs(tokens[1:])
# new Log_HexDump() format:
# USBIO: 000: 80 06 ......
elif (len(tokens) >= 2 and
len(tokens[0]) >= 4 and
tokens[0][-1] == ':' and
len(tokens[1]) == 2):
data = line.split(':')
data = data[1].lstrip()
self.current.appendHexData(data[:48])
# old Log_HexDump() format:
# USBIO: 80 06 ......
elif len(tokens[0]) == 2:
self.current.appendHexData(line[:48])
else:
self.flush()
self.current.appendDecoded(line.strip())
def parseKeyValuePairs(self, tokens):
for token in tokens:
kv = token.split('=', 1)
if len(kv) > 1:
if kv[0] in ('endpt'):
base = 16
else:
base = 10
setattr(self.current, kv[0], int(kv[1], base))
def flush(self):
"""Force any in-progress transactions to be completed. This should be
called when you know the USB analyzer is finished outputting
data, such as when a non-USBIO line appears in the log.
"""
if self.current.dir:
self.eventQueue.put(self.current)
self.current = Types.Transaction()
class TimestampLogParser:
"""Parse a simple format which logs timestamps in nanosecond resolution.
Lines are of the form:
<timestamp> <name> args...
The event name may be 'begin-foo' or 'end-foo' to indicate an event
which executes over a span of time, or simply 'foo' to mark a single
point.
"""
lineOriented = True
def __init__(self, eventQueue):
self.epoch = None
self.nameEndpoints = {}
self.nextEp = 1
self.lineNumber = 0
self.eventQueue = eventQueue
def flush(self):
pass
def parse(self, line):
self.lineNumber += 1
tokens = line.split()
try:
# Extract the time, convert to seconds
nanotime = int(tokens[0])
if not self.epoch:
self.epoch = nanotime
timestamp = (nanotime - self.epoch) / 1000000000.0
# Detect the start- or end- prefix
name = tokens[1]
if name.startswith("begin-"):
name = name.split('-', 1)[1]
dirs = ('Down',)
elif name.startswith("end-"):
name = name.split('-', 1)[1]
dirs = ('Up',)
else:
dirs = ('Down', 'Up')
# Generate an 'endpoint' for the event name
try:
endpoint = self.nameEndpoints[name]
except KeyError:
endpoint = self.nextEp
self.nameEndpoints[name] = endpoint
self.nextEp = endpoint + 1
for dir in dirs:
trans = Types.Transaction()
trans.dir = dir
trans.timestamp = timestamp
trans.lineNumber = self.lineNumber
trans.endpt = endpoint
trans.dev = 0
trans.status = 0
trans.datalen = 0x1000
trans.appendDecoded(" ".join(tokens[1:]))
self.eventQueue.put(trans)
except:
print "Error on line %d:" % self.lineNumber
traceback.print_exc()
class VmxLogParser(UsbIOParser):
"""Read the VMX log, looking for new USBIO lines and parsing them.
"""
frame = None
epoch = None
lineNumber = 0
def parse(self, line):
self.lineNumber += 1
# Local to the UHCI core
l = line.split("UHCI:")
if len(l) == 2:
m = re.search("- frame ([0-9]+) -", l[1])
if m:
self.frame = int(m.group(1))
# Don't let SOF markers start the clock
if self.epoch is not None:
self.eventQueue.put(Types.SOFMarker(self.parseRelativeTime(line),
self.frame, self.lineNumber))
return
# Local to the EHCI core
l = line.split("EHCI:")
if len(l) == 2:
m = re.search("Execute frame ([0-9]+)[\. ]", l[1])
if m:
self.frame = int(m.group(1))
# Don't let SOF markers start the clock
if self.epoch is not None:
self.eventQueue.put(Types.SOFMarker(self.parseRelativeTime(line),
self.frame, self.lineNumber))
return
# Generic analyzer URBs
l = line.split("USBIO:")
if len(l) == 2:
UsbIOParser.parse(self, l[1][:-1], self.parseRelativeTime(line),
self.frame, self.lineNumber)
else:
self.flush()
def parseRelativeTime(self, line):
"""Start the clock when we see our first USB log line"""
t = self.parseTime(line)
if self.epoch is None:
self.epoch = t
return t - self.epoch
_timeCache = (None, None)
def parseTime(self, line):
"""Return a unix-style timestamp for the given line."""
if line[10] != "T":
"""XXX: This assumes the current year, so logs that straddle
years will have a giant discontinuity in timestamps.
"""
timefmt = "%b %d %H:%M:%S"
stamp = line[:15]
usec = line[16:19]
else:
timefmt = "%Y-%m-%dT%H:%M:%S"
stamp = line[:19]
usec = line[20:23]
# Cache the results of strptime. It only changes every
# second, and this was taking more than 50% of our parsing time!
savedStamp, parsed = self._timeCache
if savedStamp != stamp:
parsed = time.strptime(stamp, timefmt)
self._timeCache = stamp, parsed
now = time.localtime()
try:
usec = int(usec)
except ValueError:
usec = 0
return usec / 1000.0 + time.mktime((
now.tm_year, parsed.tm_mon, parsed.tm_mday,
parsed.tm_hour, parsed.tm_min, parsed.tm_sec,
parsed.tm_wday, parsed.tm_yday, parsed.tm_isdst))
def parseInt(attrs, name, default=None):
"""The Ellisys logs include commas in their integers"""
try:
return int(attrs[name].replace(",", ""))
except (KeyError, ValueError):
return default
def parseFloat(attrs, name, default=None):
"""The Ellisys logs include commas and spaces in their floating point numbers"""
try:
return float(attrs[name].replace(",", "").replace(" ", ""))
except (KeyError, ValueError):
return default
class EllisysXmlHandler(xml.sax.handler.ContentHandler):
"""Handles SAX events from an XML log exported by Ellisys
Visual USB. The completed USB transactions are pushed into
the provided completion queue.
"""
frameNumber = None
device = None
endpoint = None
current = None
characterHandler = None
def __init__(self, eventQueue):
self.pipes = {}
self.pending = {}
self.eventQueue = eventQueue
self._frameAttrs = {}
def startElement(self, name, attrs):
# This will always call self.startElement_%s where %s is the
# element name, but the profiler showed us spending quite a lot
# of time just figuring out who to call, even if this was cached
# in a dictionary. The tests below are ordered to keep very
# frequent elements running fast.
if name == "StartOfFrame":
# Just stow the SOF attributes, decode them if we end up
# actually needing them later.
self._frameAttrs = attrs
elif name == "data":
self.characterHandler = self.current.appendHexData
elif name == "Packet":
self.startElement_Packet(attrs)
elif name == "Transaction":
self.startElement_Transaction(attrs)
elif name == "Reset":
self.startElement_Reset(attrs)
def endElement(self, name):
self.characterHandler = None
if name == 'Document':
for pipe in self.pipes.keys():
self.completeUrb(pipe, 'End of Log')
def startElement_Transaction(self, attrs):
self.device = parseInt(attrs, 'device', 0)
self.endpoint = parseInt(attrs, 'endpoint')
def startElement_Reset(self, attrs):
# Error out any transactions that are active during a reset
for pipe in self.pipes.keys():
self.completeUrb(pipe, 'Bus Reset')
def beginUrb(self, pipe):
"""Simulate a new URB being created on the supplied pipe. This
begins a Down transaction and makes it pending and current.
"""
t = Types.Transaction()
t.dir = 'Down'
t.dev, t.endpt = pipe
t.timestamp = self.timestamp
t.frame = parseInt(self._frameAttrs, 'frameNumber')
t.status = 0
self.pipes[pipe] = t
self.pending[pipe] = t
self.current = t
def flipUrb(self, pipe):
"""Begin the Up phase on a particular pipe. This
completes the Down transaction, and makes an Up
current (but not pending)
"""
del self.pending[pipe]
down = self.pipes[pipe]
self.eventQueue.put(down)
up = Types.Transaction()
up.dir = 'Up'
up.dev, up.endpt = pipe
# Up and Down transactions share setup data, if applicable
if down.hasSetupData():
up.data = down.data[:8]
self.pipes[pipe] = up
self.current = up
def completeUrb(self, pipe, id):
"""Complete the Up phase on a pipe"""
if pipe in self.pending:
self.flipUrb(pipe)
assert pipe in self.pipes
t = self.pipes[pipe]
del self.pipes[pipe]
self.current = None
t.timestamp = self.timestamp
t.frame = parseInt(self._frameAttrs, 'frameNumber')
if id in ('ACK', 'NYET'):
t.status = 0
else:
t.status = id
self.eventQueue.put(t)
def startElement_Packet(self, attrs):
id = attrs['id']
# Fast exit for common packets we don't care about
if id in ('SOF', 'DATA0', 'DATA1'):
return
self.timestamp = parseFloat(attrs, 'time')
if self.endpoint is None:
return
if self.endpoint == 0:
# EP0 is a special case for us, since its transactions
# consiste of several phases. We always begin with SETUP.
# If the request has an input stage, we'll see an OUT after
# that as a handshake. If not, the handshake is an empty
# IN stage.
pipe = self.device, 0
if id == 'SETUP':
self.beginUrb(pipe)
self.ep0FinalStage = False
elif id == 'IN':
if pipe in self.pending:
self.flipUrb(pipe)
else:
self.current = self.pipes[pipe]
if self.current.data and (ord(self.current.data[0]) & 0x80) == 0:
# This is an output request, IN is our last stage
self.ep0FinalStage = True
elif id == 'OUT':
self.current = self.pipes[pipe]
if self.current.data and (ord(self.current.data[0]) & 0x80):
# This is an input request, OUT is our last stage
self.ep0FinalStage = True
elif id == 'PING':
# An acknowledged PING packet should never end a control transfer
self.ep0FinalStage = False
elif pipe in self.pipes and (
id == 'STALL' or (id == 'ACK' and self.ep0FinalStage)):
self.completeUrb(pipe, id)
else:
# It's really annoying that the Ellisys logs strip the
# direction bit from the endpoint number. We have to recover
# this ourselves.
if id == 'IN':
self.endpoint = self.endpoint | 0x80
pipe = self.device, self.endpoint
if id in ('OUT', 'IN', 'PING'):
# These packets indicate that we'd like to be transmitting
# data to a particular endpoint- so the operating system must
# now have an active URB.
if pipe in self.pipes:
# Finish a previous packet that wasn't acknowledged.
# This will be frequent if isochronous transfers are involved!
self.completeUrb(pipe, 'No Handshake')
self.beginUrb(pipe)
if pipe in self.pending and id in ('NAK', 'NYET', 'STALL', 'IN'):
self.flipUrb(pipe)
if pipe in self.pipes:
if id == 'ACK':
# This accounts for combining individual low-level USB packets
# into the larger packets that should be associated with a URB.
# We only end a URB when a short packet is transferred.
#
# FIXME: Determine the real max packet size, rather than
# using this hardcoded nonsense.
if len(self.current.data) & 0x3F:
self.completeUrb(pipe, id)
elif id in ('NYET', 'STALL'):
# Always complete on an error condition
self.completeUrb(pipe, id)
def characters(self, content):
# This extra level of indirection seems to be necessary, I guess Expat is
# binding our functions once at initialization.
if self.characterHandler:
self.characterHandler(content)
Types.psycoBind(EllisysXmlHandler)
class EllisysXmlParser:
"""Parses XML files exported from Ellisys Visual USB. This
is just a glue object that sets up an XML parser and
sends SAX events to the EllisysXmlHandler.
"""
lineOriented = False
def __init__(self, eventQueue):
self.eventQueue = eventQueue
self.xmlParser = xml.sax.make_parser()
self.xmlParser.setContentHandler(EllisysXmlHandler(eventQueue))
def parse(self, line):
self.xmlParser.feed(line)
class UsbmonLogParser:
"""Parses usbmon log lines and generates Transaction objects appropriately.
Finished transactions are pushed into the supplied queue.
This parser was originally contributed by Christoph Zimmermann.
"""
lineOriented = True
lineNumber = 0
def __init__(self, eventQueue):
self.epoch = None
self.trans = Types.Transaction()
self.setupData = None
self.eventQueue = eventQueue
def parse(self, line, timestamp=None):
self.lineNumber += 1
tokens = line.split()
iso = 0
try:
# Do a small stupid sanity check if this is a correct usbmon log line
try:
if len(tokens) < 4:
return
if not(int(tokens[0],16) and int(tokens[1]) and
(tokens[2] in ('S', 'C', 'E'))):
return
except:
print "Error on line %d:" % self.lineNumber
return
# Copied log file format description of the usbmon kernel
# facility You can find the original manual including how
# to use usbmon in your kernel sources: <linux
# sources>/Documentation/usb/usbmon.txt
#
# Copied text starts here:
# Any text format data consists of a stream of events,
# such as URB submission, URB callback, submission
# error. Every event is a text line, which consists of
# whitespace separated words. The number or position of
# words may depend on the event type, but there is a set
# of words, common for all types.
# Here is the list of words, from left to right:
# - URB Tag. This is used to identify URBs, and is
# normally an in-kernel address of the URB structure in
# hexadecimal, but can be a sequence number or any other
# unique string, within reason.
self.trans.lineNumber = self.lineNumber
# TODO Usbmon's timestamps can wrap. Annoyingly, they can wrap
# at either every 4096 seconds or (about) every 4296 seconds, see
# bugzilla.redhat.com/show_bug.cgi?id=574024 Let's wait for some
# feedback on that bugreport before adding (a possibly trivial)
# way to handle that.
# - Timestamp in microseconds, a decimal number. The
# timestamp's resolution depends on available clock, and
# so it can be much worse than a microsecond (if the
# implementation uses jiffies, for example).
# Extract the time, convert to seconds
microtime = int(tokens[1])
if not self.epoch:
self.epoch = microtime
timestamp = (microtime - self.epoch) / 1000000.0
self.trans.timestamp = timestamp
# - Event Type. This type refers to the format of the
# event, not URB type. Available types are: S -
# submission, C - callback, E - submission error.
if tokens[2] == 'S':
self.trans.dir = 'Down'
else:
self.trans.dir = 'Up'
# - "Address" word (formerly a "pipe"). It consists of
# four fields, separated by colons: URB type and
# direction, Bus number, Device address, Endpoint
# number. Type and direction are encoded with two bytes
# in the following manner:
#
# Ci Co Control input and output
# Zi Zo Isochronous input and output
# Ii Io Interrupt input and output
# Bi Bo Bulk input and output
#
# Bus number, Device address, and Endpoint are decimal
# numbers, but they may have leading zeros, for the sake
# of human readers.
#
# Note that older kernels seem to omit the bus number field.
# We can parse either format.
pipe = tokens[3].split(':')
self.trans.dev = int(pipe[-2])
self.trans.endpt = int(pipe[-1])
if pipe[0][1] == 'i' and self.trans.endpt != 0:
# Input endpoint
self.trans.endpt |= 0x80
if len(pipe) >= 4:
self.trans.dev += int(pipe[-3]) * 1000
# - URB Status word. This is either a letter, or several
# numbers separated by colons: URB status, interval,
# start frame, and error count. Unlike the "address"
# word, all fields save the status are
# optional. Interval is printed only for interrupt and
# isochronous URBs. Start frame is printed only for
# isochronous URBs. Error count is printed only for
# isochronous callback events.
#
# The status field is a decimal number, sometimes
# negative, which represents a "status" field of the
# URB. This field makes no sense for submissions, but is
# present anyway to help scripts with parsing. When an
# error occurs, the field contains the error code.
#
# In case of a submission of a Control packet, this
# field contains a Setup Tag instead of an group of
# numbers. It is easy to tell whether the Setup Tag is
# present because it is never a number. Thus if scripts
# find a set of numbers in this word, they proceed to
# read Data Length (except for isochronous URBs). If
# they find something else, like a letter, they read the
# setup packet before reading the Data Length or
# isochronous descriptors.
#
# - Setup packet, if present, consists of 5 words: one of
# each for bmRequestType, bRequest, wValue, wIndex,
# wLength, as specified by the USB Specification 2.0.
# These words are safe to decode if Setup Tag was
# 's'. Otherwise, the setup packet was present, but not
# captured, and the fields contain filler.
#
# - Number of isochronous frame descriptors and
# descriptors themselves. If an Isochronous transfer
# event has a set of descriptors, a total number of them
# in an URB is printed first, then a word per descriptor,
# up to a total of 5. The word consists of 3
# colon-separated decimal numbers for status, offset, and
# length respectively. For submissions, initial length is
# reported. For callbacks, actual length is reported.
if tokens[4] in ('s'):
# This is a setup packet
# Example data stage: 23 01 0010 0002 0040
self.trans.status = 0
data = ''.join(tokens[5:7])
# use VMX's byte ordering for wValue, wIndex and wLength
data = ''.join((data, tokens[7][2:4], tokens[7][0:2]))
data = ''.join((data, tokens[8][2:4], tokens[8][0:2]))
data = ''.join((data, tokens[9][2:4], tokens[9][0:2]))
self.trans.appendHexData(data)
# save the setup data to prepend it to the setup packet data stage
self.setupData = data
else:
status_word = tokens[4].split(':')
self.trans.status = int(status_word[0])
# check if this is a Callback (the 'Up' part of a transaction)
# on a CONTROL endpoint and prepend its Submission's (ie, its
# 'Down' part) setup packet, just as is done in the VMX logs.
if self.setupData and self.trans.endpt == 0 and \
self.trans.dir == 'Up':
self.trans.appendHexData(self.setupData)
self.setupData = None
# - Data Length. For submissions, this is the requested
# length. For callbacks, this is the actual length.
if len(tokens) >= 7:
if not pipe[0][0] == 'Z':
self.trans.datalen = int(tokens[5])
# The isochronous stuff is rather messy ... and probably
# fails with the legacy format. It also assumes input
# direction.
elif self.trans.dir == 'Down' and len(tokens) >= 8:
# skip two tokens:
# - number of isochronous frame descriptors
# - one descriptor
self.trans.datalen = int(tokens[7])
iso = 2
elif self.trans.dir == 'Up':
# The number of isochronous frame descriptors doesn't
# need to equal the number of following descriptors so
# we search for '=' and use the preceding token
try:
equal_sign = tokens.index('=')
if equal_sign > 6:
self.trans.datalen = int(tokens[equal_sign - 1])
iso = equal_sign - 6
except:
pass
# - Data tag. The usbmon may not always capture data, even
# if length is nonzero. The data words are present only
# if this tag is '='.
# - Data words follow, in big endian hexadecimal
# format. Notice that they are not machine words, but
# really just a byte stream split into words to make it
# easier to read. Thus, the last word may contain from
# one to four bytes. The length of collected data is
# limited and can be less than the data length report in
# Data Length word.
if tokens[6 + iso] in ('='):
self.trans.appendHexData(''.join(tokens[7 + iso:]))
self.eventQueue.put(self.trans)
self.trans = Types.Transaction()
# End of copied usbmon description text
# End of log file parsing
except:
print "Error on line %d:" % self.lineNumber
traceback.print_exc()
class Follower(threading.Thread):
"""A thread that continuously scans a file, parsing each line"""
pollInterval = 0.1
running = True
progressInterval = 0.2
progressExpiration = 0
def __init__(self, filename, parser, progressQueue=None, tailMode=False):
self.filename = filename
self.parser = parser
self.progressQueue = progressQueue
if os.path.splitext(filename)[1] == ".gz":
# On a gzip file, we need to read the uncompressed filesize from the footer
f = open(filename, "rb")
f.seek(-4, 2)
self.fileSize = struct.unpack("<l", f.read(4))[0]
f.seek(0)
self.file = gzip.GzipFile(fileobj=f)
else:
self.file = open(filename)
self.fileSize = os.fstat(self.file.fileno()).st_size
if tailMode:
# Start at the end
self.file.seek(0, 2)
threading.Thread.__init__(self)
atexit.register(self.stop)
def run(self):
try:
while self.running:
if self.parser.lineOriented:
line = self.file.readline()
else:
line = self.file.read(16384)
if line:
self.parser.parse(line)
# Compute our progress only every progressInterval seconds
now = time.clock()
if now >= self.progressExpiration:
self.setProgress(min(1.0, self.file.tell() / self.fileSize))
self.progressExpiration = now + self.progressInterval
else:
self.setProgress(1.0)
time.sleep(self.pollInterval)
except KeyboardInterrupt:
gtk.main_quit()
def setProgress(self, progress):
if self.progressQueue:
self.progressQueue.put(("Loading %s" % os.path.basename(self.filename),
progress))
def stop(self):
# Keep the queue empty so it doesn't deadlock on put()
if not self.running:
return
self.running = False
try:
while 1:
self.parser.eventQueue.get(False)
except Queue.Empty:
pass
self.join()
class QueueSink:
"""Polls a Queue for new items, via the Glib main loop.
When they're available, calls a callback with them.
"""
interval = 200
timeSlice = 0.25
maxsize = 512
batch = range(10)
def __init__(self, callback):
self.eventQueue = Queue.Queue(self.maxsize)
self.callback = callback
self.poll()
def poll(self):
try:
deadline = time.clock() + self.timeSlice
while time.clock() < deadline:
# This avoids calling time.clock() once per queue item.
for _ in self.batch:
try:
event = self.eventQueue.get(False)
except Queue.Empty:
# We have nothing to do, set a longer interval
gobject.timeout_add(self.interval, self.poll)
return False
else:
self.callback(event)
except KeyboardInterrupt:
gtk.main_quit()
# Come back after GTK's event queue is idle
gobject.idle_add(self.poll)
return False
def chooseParser(filename):
"""Return an appropriate log parser class for the provided filename.
This implementation does not try to inspect the file's content,
it just looks at the filename's extension.
"""
base, ext = os.path.splitext(filename)
if ext == ".gz":
return chooseParser(base)
if ext == ".xml":
return EllisysXmlParser
if ext == ".tslog":
return TimestampLogParser
if ext == ".mon":
return UsbmonLogParser
return VmxLogParser
| 22,766 | 0 | 648 |
97daf54e1087de9fda53865105a6ca1aaa27c279 | 4,135 | py | Python | find_flags.py | detrout/encode4-curation | c4d7904a8013a276c2771afaeb28db94b73ff020 | [
"BSD-3-Clause"
] | null | null | null | find_flags.py | detrout/encode4-curation | c4d7904a8013a276c2771afaeb28db94b73ff020 | [
"BSD-3-Clause"
] | null | null | null | find_flags.py | detrout/encode4-curation | c4d7904a8013a276c2771afaeb28db94b73ff020 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
from collections import Counter
from htsworkflow.submission.encoded import ENCODED
import psycopg2
import pandas
if __name__ == '__main__':
main()
| 30.62963 | 98 | 0.630955 | #!/usr/bin/python3
from collections import Counter
from htsworkflow.submission.encoded import ENCODED
import psycopg2
import pandas
def main(cmdline=None):
server = ENCODED('www.encodeproject.org')
server.load_netrc()
with psycopg2.connect(database='htsworkflow', host='felcat.caltech.edu') as conn:
term_ids = (
'EFO:0001098', # C2C12
'EFO:0005714', # LHCN-M2
'CL:0000187', # muscle cell, myocyte
'CL:0000188', # skeletal muscle cell
'CL:0000515', # skeletal muscle myoblast
'CL:0000594', # skeletal muscle satellite cell
'CL:0002372' # myotube
)
df = pandas.DataFrame(list(find_biosample_experiments_by_term_id(conn, term_ids)))
flags = []
for experiment in df['experiment']:
current_flags = count_experiment_flags(server, experiment)
current_flags['experiment'] = experiment
flags.append(current_flags)
flags = pandas.DataFrame(flags, columns=['experiment', 'Red', 'Orange', 'Yellow', 'DCC Todo'])
df = df.merge(flags, on='experiment')
df.to_csv(
'biosample-experiment-report.tsv',
sep='\t',
index=False
)
def find_biosample_experiments_by_term_id(connection, term_ids):
cur = connection.cursor()
cur.execute("""
with
experiment as (
select uri as Experiment,
payload->>'accession' as Experiment_Accession,
payload->>'description' as Experiment_Description,
payload->>'status' as Experiment_Status,
payload->>'date_released' as Experiment_Released,
payload->>'assay_title' as Experiment_Type
from item
where object_type = 'Experiment'
),
replicate as (
select uri as Replicate,
payload->>'experiment' as Experiment,
payload->>'library' as Library
from item
where object_type = 'Replicate'
),
library as (
select uri as Library,
payload->>'accession' as Library_Accession,
payload->>'date_created' as Library_Created,
payload->>'biosample' as Biosample
from item
where object_type = 'Library'
),
biosample as (
select uri as Biosample,
payload->>'organism' as Organism,
payload->>'summary' as Summary,
payload->>'award' as award,
payload->>'biosample_term_id' as biosample_term_id,
payload->>'biosample_term_name' as biosample_term_name
from item
where object_type = 'Biosample'
),
award as (
select uri as Award,
payload->>'rfa' as rfa
from item
where object_type = 'Award'
)
select
biosample.Biosample,
award.rfa,
experiment.Experiment_Type,
experiment.Experiment,
Organism,
biosample.biosample_term_name,
biosample.Summary
from library
left join biosample on library.Biosample = biosample.Biosample
left join replicate on replicate.Library = library.Library
left join experiment on replicate.Experiment = experiment.Experiment
left join award on biosample.award = award.Award
where experiment.Experiment_Status = 'released' and
biosample.biosample_term_id in %(term_ids)s
order by biosample.Biosample, award.rfa, Organism, experiment.Experiment_Type
;
""", {'term_ids': term_ids})
for row in cur:
yield {
'biosample': row[0],
'rfa': row[1],
'experiment_type': row[2],
'experiment': row[3],
'organism': row[4],
'term_name': row[5],
'biosample_summary': row[6]
}
def count_experiment_flags(server, experiment):
obj = server.get_json(experiment)
counts = Counter()
for level in obj.get('audit', []):
category = set()
for message in obj['audit'][level]:
category.add(message.get('category'))
counts[level] += len(category)
return {
'Red': str(counts.get('ERROR', ' ')),
'Orange': str(counts.get('NOT_COMPLIANT', ' ')),
'Yellow': str(counts.get('WARNING', ' ')),
'DCC Todo': str(counts.get('DCC Todo', ' ')),
}
if __name__ == '__main__':
main()
| 3,890 | 0 | 69 |
54ef86a8942cfef3d5df4d97ddfaa58e1b8bb0b0 | 1,157 | py | Python | src/pumpwood_communication/hash.py | Murabei-OpenSource-Codes/pumpwood-communication | 1d56dd6d1e3f3ced7c33da4af93b512fde9e4ed7 | [
"BSD-3-Clause"
] | null | null | null | src/pumpwood_communication/hash.py | Murabei-OpenSource-Codes/pumpwood-communication | 1d56dd6d1e3f3ced7c33da4af93b512fde9e4ed7 | [
"BSD-3-Clause"
] | null | null | null | src/pumpwood_communication/hash.py | Murabei-OpenSource-Codes/pumpwood-communication | 1d56dd6d1e3f3ced7c33da4af93b512fde9e4ed7 | [
"BSD-3-Clause"
] | null | null | null | """Create hash from a dictionary."""
import os
import hashlib
from .serializers import pumpJsonDump
from typing import List
def create_hash_from_dict(index_dict: dict, salt: str = "",
get_env: bool = True, keys: List[str] = None):
"""Create a hash for the index."""
# If get_env set as True and salt not set try to get from env variable
if salt == "" and get_env:
salt = os.getenv("HASH_SALT", "")
temp_dict = index_dict
# Retrict keys to be used in hashing
if keys is not None:
temp_dict = dict([(k, index_dict[k]) for k in keys])
string_dict = pumpJsonDump(temp_dict)
hash_object = hashlib.sha1(salt.encode() + str(string_dict).encode())
pbHash = hash_object.hexdigest()
return pbHash
def create_hash_from_str(index: str, salt: str = "", get_env: bool = True):
"""Create a hash for the index."""
# If get_env set as True and salt not set try to get from env variable
if salt == "" and get_env:
salt = os.getenv("HASH_SALT", "")
hash_object = hashlib.sha1(salt.encode() + index.encode())
pbHash = hash_object.hexdigest()
return pbHash
| 33.057143 | 75 | 0.650821 | """Create hash from a dictionary."""
import os
import hashlib
from .serializers import pumpJsonDump
from typing import List
def create_hash_from_dict(index_dict: dict, salt: str = "",
get_env: bool = True, keys: List[str] = None):
"""Create a hash for the index."""
# If get_env set as True and salt not set try to get from env variable
if salt == "" and get_env:
salt = os.getenv("HASH_SALT", "")
temp_dict = index_dict
# Retrict keys to be used in hashing
if keys is not None:
temp_dict = dict([(k, index_dict[k]) for k in keys])
string_dict = pumpJsonDump(temp_dict)
hash_object = hashlib.sha1(salt.encode() + str(string_dict).encode())
pbHash = hash_object.hexdigest()
return pbHash
def create_hash_from_str(index: str, salt: str = "", get_env: bool = True):
"""Create a hash for the index."""
# If get_env set as True and salt not set try to get from env variable
if salt == "" and get_env:
salt = os.getenv("HASH_SALT", "")
hash_object = hashlib.sha1(salt.encode() + index.encode())
pbHash = hash_object.hexdigest()
return pbHash
| 0 | 0 | 0 |
01c71add863481b471830c8175b2fad831a6d622 | 200 | py | Python | py_nest_thermostat/logger.py | bastienboutonnet/py-nest-thermostat | a015ffe618e90a2f890a8a5e66d3881e3f2378e1 | [
"MIT"
] | 3 | 2021-12-01T18:53:36.000Z | 2021-12-24T16:47:49.000Z | py_nest_thermostat/logger.py | bastienboutonnet/py-nest-thermostat | a015ffe618e90a2f890a8a5e66d3881e3f2378e1 | [
"MIT"
] | 8 | 2021-12-01T20:16:18.000Z | 2022-02-08T21:01:43.000Z | py_nest_thermostat/logger.py | bastienboutonnet/py-nest-thermostat | a015ffe618e90a2f890a8a5e66d3881e3f2378e1 | [
"MIT"
] | 1 | 2021-12-24T16:47:52.000Z | 2021-12-24T16:47:52.000Z | import logging
from rich.logging import RichHandler
FORMAT = "%(message)s"
logging.basicConfig(level="INFO", format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
log = logging.getLogger("rich")
| 25 | 90 | 0.74 | import logging
from rich.logging import RichHandler
FORMAT = "%(message)s"
logging.basicConfig(level="INFO", format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
log = logging.getLogger("rich")
| 0 | 0 | 0 |
e64c39a5bdfbbcb485c2cc33376d2ba99ecf112b | 252 | py | Python | Step1-PythonBasic/Practices/yuxq/10-11/ex10.py | Jumpers/MysoftAutoTest | 50efc385a96532fc0777061d6c5e7201a4991f04 | [
"Apache-2.0"
] | null | null | null | Step1-PythonBasic/Practices/yuxq/10-11/ex10.py | Jumpers/MysoftAutoTest | 50efc385a96532fc0777061d6c5e7201a4991f04 | [
"Apache-2.0"
] | null | null | null | Step1-PythonBasic/Practices/yuxq/10-11/ex10.py | Jumpers/MysoftAutoTest | 50efc385a96532fc0777061d6c5e7201a4991f04 | [
"Apache-2.0"
] | null | null | null | tabby_cat="\tI'm tabbed in."
persian_cat="I'm split\non a line."
backslash_cat="I'm\\a\\cat."
fat_cat="""
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
"""
print tabby_cat
print persian_cat
print backslash_cat
print fat_cat | 19.384615 | 36 | 0.68254 | tabby_cat="\tI'm tabbed in."
persian_cat="I'm split\non a line."
backslash_cat="I'm\\a\\cat."
fat_cat="""
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
"""
print tabby_cat
print persian_cat
print backslash_cat
print fat_cat | 0 | 0 | 0 |
5bcd8bea62a574ea82bafbdff02cab2eaaea0311 | 669 | py | Python | HierarichaicalClustering.py | MahmoudAbusaqer/Clustering.py | 9ef6208d051eeef0c0f098a4fd4e548156e16ec5 | [
"Apache-2.0"
] | null | null | null | HierarichaicalClustering.py | MahmoudAbusaqer/Clustering.py | 9ef6208d051eeef0c0f098a4fd4e548156e16ec5 | [
"Apache-2.0"
] | null | null | null | HierarichaicalClustering.py | MahmoudAbusaqer/Clustering.py | 9ef6208d051eeef0c0f098a4fd4e548156e16ec5 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
iris = pd.read_csv('IRIS.csv')
# print(iris.head(5))
# split data attributes and label attribute
attributes = iris.drop(['species'], axis=1)
labels = iris['species']
# import and build hieratchy cluster model
from scipy.cluster.hierarchy import linkage, dendrogram
hc = linkage(attributes, 'single')
# print(hc)
# plot the dendogram
samplelist = range(1, 151)# make a list for the data samples
# import pylot libray
from matplotlib import pyplot as plt
plt.figure(figsize=(30, 15))
dendrogram(hc,
orientation='top',
labels=samplelist,
distance_sort='descending',
show_leaf_counts='true')
plt.show() | 23.892857 | 60 | 0.702541 | import pandas as pd
iris = pd.read_csv('IRIS.csv')
# print(iris.head(5))
# split data attributes and label attribute
attributes = iris.drop(['species'], axis=1)
labels = iris['species']
# import and build hieratchy cluster model
from scipy.cluster.hierarchy import linkage, dendrogram
hc = linkage(attributes, 'single')
# print(hc)
# plot the dendogram
samplelist = range(1, 151)# make a list for the data samples
# import pylot libray
from matplotlib import pyplot as plt
plt.figure(figsize=(30, 15))
dendrogram(hc,
orientation='top',
labels=samplelist,
distance_sort='descending',
show_leaf_counts='true')
plt.show() | 0 | 0 | 0 |
526d64c86ec27dfd527c1b31c0f459bc525cb0c1 | 2,879 | py | Python | cloudflare_exporter/exporter.py | cpaillet/cloudflare-exporter | 194a0ce0f316aadc2802fbf180d06f5aab7849be | [
"Apache-2.0"
] | null | null | null | cloudflare_exporter/exporter.py | cpaillet/cloudflare-exporter | 194a0ce0f316aadc2802fbf180d06f5aab7849be | [
"Apache-2.0"
] | 7 | 2019-11-28T11:43:56.000Z | 2020-06-09T08:21:19.000Z | cloudflare_exporter/exporter.py | cpaillet/cloudflare-exporter | 194a0ce0f316aadc2802fbf180d06f5aab7849be | [
"Apache-2.0"
] | 3 | 2019-11-28T08:36:23.000Z | 2022-02-21T11:34:41.000Z | import argparse
import logging
import sys
from aiohttp import web
from prometheus_client.core import REGISTRY
from cloudflare_exporter.collector import CloudflareCollector
from cloudflare_exporter.config import (DEFAULT_HOST,
DEFAULT_LOGS_FETCH,
DEFAULT_LOGS_COUNT,
DEFAULT_LOGS_RANGE,
DEFAULT_LOGS_SAMPLE,
DEFAULT_PORT, LOG_FORMAT)
from cloudflare_exporter.handlers import handle_health, handle_metrics
if __name__ == '__main__':
main()
| 42.338235 | 82 | 0.572768 | import argparse
import logging
import sys
from aiohttp import web
from prometheus_client.core import REGISTRY
from cloudflare_exporter.collector import CloudflareCollector
from cloudflare_exporter.config import (DEFAULT_HOST,
DEFAULT_LOGS_FETCH,
DEFAULT_LOGS_COUNT,
DEFAULT_LOGS_RANGE,
DEFAULT_LOGS_SAMPLE,
DEFAULT_PORT, LOG_FORMAT)
from cloudflare_exporter.handlers import handle_health, handle_metrics
def parse_args(args):
def int_positive(string):
ivalue = int(string)
if ivalue <= 0:
raise argparse.ArgumentTypeError(f'{string} is not positive')
return ivalue
parser = argparse.ArgumentParser(description='Cloudfalre prometheus exporter')
parser.add_argument('-t', '--token', type=str, required=True,
help='Cloudflare API Token')
parser.add_argument('--host', type=str,
help='TCP/IP host for HTTP server',
default=DEFAULT_HOST)
parser.add_argument('--port', type=int_positive,
help="Port used to expose metrics for Prometheus",
default=DEFAULT_PORT)
parser.add_argument('--logs_fetch', type=bool,
help="Activate metric from logs",
default=DEFAULT_LOGS_FETCH)
parser.add_argument('--logs_count', type=int_positive,
help="Cloudflare logs: count param",
default=DEFAULT_LOGS_COUNT)
parser.add_argument('--logs_sample', type=int_positive,
help="Cloudflare logs: sample param [0-1]",
default=DEFAULT_LOGS_SAMPLE)
parser.add_argument('--logs_range', type=int_positive,
help="Cloudflare logs: range in seconds",
default=DEFAULT_LOGS_RANGE)
return parser.parse_args(args)
def main():
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
args = parse_args(sys.argv[1:])
REGISTRY.register(CloudflareCollector(cloudflare_token=args.token,
logs_fetch=args.logs_fetch,
logs_count=args.logs_count,
logs_sample=args.logs_sample,
logs_range=args.logs_range))
app = web.Application()
app.router.add_get('/metrics', handle_metrics)
app.router.add_get('/health', handle_health)
print(f'======== Running on http://{args.host}:{args.port}/metrics ========')
web.run_app(app, host=args.host, port=args.port, access_log=None,
print=False)
if __name__ == '__main__':
main()
| 2,185 | 0 | 46 |
a0ec64ee234260e40e88f7c9bed369711771446d | 89 | py | Python | metaapi/apps.py | mark-barrett/RESTBroker | d9b0a3574d2970443fdf40c70ab9ceb8d72614f4 | [
"MIT"
] | null | null | null | metaapi/apps.py | mark-barrett/RESTBroker | d9b0a3574d2970443fdf40c70ab9ceb8d72614f4 | [
"MIT"
] | null | null | null | metaapi/apps.py | mark-barrett/RESTBroker | d9b0a3574d2970443fdf40c70ab9ceb8d72614f4 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 14.833333 | 33 | 0.752809 | from django.apps import AppConfig
class MetaapiConfig(AppConfig):
name = 'metaapi'
| 0 | 31 | 23 |
9d683d70e4138283d60fbf925f723b9011581cd7 | 41 | py | Python | zoil/__init__.py | davidzajac1/zoil | 9cd00be3c238344f5a90cbaee24ee99dc2f919fc | [
"MIT"
] | null | null | null | zoil/__init__.py | davidzajac1/zoil | 9cd00be3c238344f5a90cbaee24ee99dc2f919fc | [
"MIT"
] | null | null | null | zoil/__init__.py | davidzajac1/zoil | 9cd00be3c238344f5a90cbaee24ee99dc2f919fc | [
"MIT"
] | null | null | null | from zoil.well import get_production_data | 41 | 41 | 0.902439 | from zoil.well import get_production_data | 0 | 0 | 0 |
9e513ab7def44aae526688fda8460e192ab48963 | 5,601 | py | Python | src/extension/src/RuntimeContextHandler.py | Azure/LinuxPatchExtension | 6af622afb4298805bdf47328d6bc66a785f7166b | [
"Apache-2.0"
] | 4 | 2020-06-01T14:36:30.000Z | 2021-08-24T16:55:50.000Z | src/extension/src/RuntimeContextHandler.py | Azure/LinuxPatchExtension | 6af622afb4298805bdf47328d6bc66a785f7166b | [
"Apache-2.0"
] | 34 | 2020-09-11T17:20:42.000Z | 2022-03-28T14:08:44.000Z | src/extension/src/RuntimeContextHandler.py | Azure/LinuxPatchExtension | 6af622afb4298805bdf47328d6bc66a785f7166b | [
"Apache-2.0"
] | 1 | 2020-12-28T10:13:20.000Z | 2020-12-28T10:13:20.000Z | # Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
import datetime
import time
from extension.src.Constants import Constants
| 70.898734 | 349 | 0.74933 | # Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
import datetime
import time
from extension.src.Constants import Constants
class RuntimeContextHandler(object):
def __init__(self, logger):
self.logger = logger
self.core_state_fields = Constants.CoreStateFields
def terminate_processes_from_previous_operation(self, process_handler, core_state_content):
""" Terminates all running processes from the previous request """
self.logger.log("Verifying if previous patch operation is still in progress")
if core_state_content is None or core_state_content.__getattribute__(self.core_state_fields.completed).lower() == 'true':
self.logger.log("Previous request is complete")
return
# verify if processes from prev request are running
running_process_ids = process_handler.identify_running_processes(core_state_content.__getattribute__(self.core_state_fields.process_ids))
if len(running_process_ids) != 0:
for pid in running_process_ids:
process_handler.kill_process(pid)
def process_previous_patch_operation(self, core_state_handler, process_handler, prev_patch_max_end_time, core_state_content):
""" Waits for the previous request action to complete for a specific time, terminates previous process if it goes over that time """
self.logger.log("Verifying if previous patch operation is still in progress")
core_state_content = core_state_handler.read_file() if core_state_content is None else core_state_content
if core_state_content is None or core_state_content.__getattribute__(self.core_state_fields.completed).lower() == 'true':
self.logger.log("Previous request is complete")
return
# verify if processes from prev request are running
running_process_ids = process_handler.identify_running_processes(core_state_content.__getattribute__(self.core_state_fields.process_ids))
if len(running_process_ids) != 0:
is_patch_complete = self.check_if_patch_completes_in_time(prev_patch_max_end_time, core_state_content.__getattribute__(self.core_state_fields.last_heartbeat), core_state_handler)
if is_patch_complete:
self.logger.log("Previous request is complete")
return
for pid in running_process_ids:
self.logger.log("Previous request did not complete in time. Terminating all of it's running processes.")
process_handler.kill_process(pid)
def check_if_patch_completes_in_time(self, time_for_prev_patch_to_complete, core_state_last_heartbeat, core_state_handler):
""" Waits for the previous request to complete in given time, with intermittent status checks """
if type(time_for_prev_patch_to_complete) is not datetime.datetime:
raise Exception("System Error: Unable to identify the time to wait for previous request to complete")
max_wait_interval_in_seconds = 60
current_time = datetime.datetime.utcnow()
remaining_wait_time = time_for_prev_patch_to_complete - current_time
# Computing seconds as per: https://docs.python.org/2/library/datetime.html#datetime.timedelta.total_seconds, since total_seconds() is not supported in python 2.6
remaining_wait_time_in_secs = ((remaining_wait_time.microseconds + (remaining_wait_time.seconds + remaining_wait_time.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
core_state_content = None
while remaining_wait_time_in_secs > 0:
next_wait_time_in_seconds = max_wait_interval_in_seconds if remaining_wait_time_in_secs > max_wait_interval_in_seconds else remaining_wait_time_in_secs
core_state_last_heartbeat = core_state_last_heartbeat if core_state_content is None else core_state_content.__getattribute__(self.core_state_fields.last_heartbeat)
self.logger.log("Previous patch operation is still in progress with last status update at {0}. Waiting for a maximum of {1} seconds for it to complete with intermittent status change checks. Next check will be performed after {2} seconds.".format(str(core_state_last_heartbeat), str(remaining_wait_time), str(next_wait_time_in_seconds)))
time.sleep(next_wait_time_in_seconds)
remaining_wait_time = time_for_prev_patch_to_complete - datetime.datetime.utcnow()
remaining_wait_time_in_secs = ((remaining_wait_time.microseconds + (remaining_wait_time.seconds + remaining_wait_time.days * 24 * 3600) * 10 ** 6) / 10 ** 6) # Computing seconds as per: https://docs.python.org/2/library/datetime.html#datetime.timedelta.total_seconds, since total_seconds() is not supported in python 2.6
# read CoreState.json file again, to verify if the previous processes is completed
core_state_content = core_state_handler.read_file()
if core_state_content.__getattribute__(self.core_state_fields.completed).lower() == 'true':
return True
return False
| 94 | 4,798 | 23 |
ba0d7ccfeb185965828c45fa889e8a87f6968642 | 5,444 | py | Python | samples/contrib/pytorch-samples/cifar10/cifar10_datamodule.py | RonsenbergVI/pipelines | a85dc4f5f1f65f14bd807dec9ab25d8dafb34379 | [
"Apache-2.0"
] | 2,860 | 2018-05-24T04:55:01.000Z | 2022-03-31T13:49:56.000Z | samples/contrib/pytorch-samples/cifar10/cifar10_datamodule.py | RonsenbergVI/pipelines | a85dc4f5f1f65f14bd807dec9ab25d8dafb34379 | [
"Apache-2.0"
] | 7,331 | 2018-05-16T09:03:26.000Z | 2022-03-31T23:22:04.000Z | samples/contrib/pytorch-samples/cifar10/cifar10_datamodule.py | RonsenbergVI/pipelines | a85dc4f5f1f65f14bd807dec9ab25d8dafb34379 | [
"Apache-2.0"
] | 1,359 | 2018-05-15T11:05:41.000Z | 2022-03-31T09:42:09.000Z | # !/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cifar10 data module."""
import os
import pytorch_lightning as pl
import webdataset as wds
from torch.utils.data import DataLoader
from torchvision import transforms
class CIFAR10DataModule(pl.LightningDataModule): # pylint: disable=too-many-instance-attributes
"""Data module class."""
def __init__(self, **kwargs):
"""Initialization of inherited lightning data module."""
super(CIFAR10DataModule, self).__init__() # pylint: disable=super-with-arguments
self.train_dataset = None
self.valid_dataset = None
self.test_dataset = None
self.train_data_loader = None
self.val_data_loader = None
self.test_data_loader = None
self.normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
self.valid_transform = transforms.Compose([
transforms.ToTensor(),
self.normalize,
])
self.train_transform = transforms.Compose([
transforms.RandomResizedCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize,
])
self.args = kwargs
def prepare_data(self):
"""Implementation of abstract class."""
@staticmethod
def get_num_files(input_path):
"""Gets num files.
Args:
input_path : path to input
"""
return len(os.listdir(input_path)) - 1
def setup(self, stage=None):
"""Downloads the data, parse it and split the data into train, test,
validation data.
Args:
stage: Stage - training or testing
"""
data_path = self.args.get("train_glob", "/pvc/output/processing")
train_base_url = data_path + "/train"
val_base_url = data_path + "/val"
test_base_url = data_path + "/test"
train_count = self.get_num_files(train_base_url)
val_count = self.get_num_files(val_base_url)
test_count = self.get_num_files(test_base_url)
train_url = "{}/{}-{}".format(train_base_url, "train",
"{0.." + str(train_count) + "}.tar")
valid_url = "{}/{}-{}".format(val_base_url, "val",
"{0.." + str(val_count) + "}.tar")
test_url = "{}/{}-{}".format(test_base_url, "test",
"{0.." + str(test_count) + "}.tar")
self.train_dataset = (wds.WebDataset(
train_url,
handler=wds.warn_and_continue).shuffle(100).decode("pil").rename(
image="ppm;jpg;jpeg;png",
info="cls").map_dict(image=self.train_transform).to_tuple(
"image", "info").batched(40))
self.valid_dataset = (wds.WebDataset(
valid_url,
handler=wds.warn_and_continue).shuffle(100).decode("pil").rename(
image="ppm",
info="cls").map_dict(image=self.valid_transform).to_tuple(
"image", "info").batched(20))
self.test_dataset = (wds.WebDataset(
test_url,
handler=wds.warn_and_continue).shuffle(100).decode("pil").rename(
image="ppm",
info="cls").map_dict(image=self.valid_transform).to_tuple(
"image", "info").batched(20))
def create_data_loader(self, dataset, batch_size, num_workers): # pylint: disable=no-self-use
"""Creates data loader."""
return DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers)
def train_dataloader(self):
"""Train Data loader.
Returns:
output - Train data loader for the given input
"""
self.train_data_loader = self.create_data_loader(
self.train_dataset,
self.args.get("train_batch_size", None),
self.args.get("train_num_workers", 4),
)
return self.train_data_loader
def val_dataloader(self):
"""Validation Data Loader.
Returns:
output - Validation data loader for the given input
"""
self.val_data_loader = self.create_data_loader(
self.valid_dataset,
self.args.get("val_batch_size", None),
self.args.get("val_num_workers", 4),
)
return self.val_data_loader
def test_dataloader(self):
"""Test Data Loader.
Returns:
output - Test data loader for the given input
"""
self.test_data_loader = self.create_data_loader(
self.test_dataset,
self.args.get("val_batch_size", None),
self.args.get("val_num_workers", 4),
)
return self.test_data_loader
| 36.536913 | 98 | 0.591661 | # !/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cifar10 data module."""
import os
import pytorch_lightning as pl
import webdataset as wds
from torch.utils.data import DataLoader
from torchvision import transforms
class CIFAR10DataModule(pl.LightningDataModule): # pylint: disable=too-many-instance-attributes
"""Data module class."""
def __init__(self, **kwargs):
"""Initialization of inherited lightning data module."""
super(CIFAR10DataModule, self).__init__() # pylint: disable=super-with-arguments
self.train_dataset = None
self.valid_dataset = None
self.test_dataset = None
self.train_data_loader = None
self.val_data_loader = None
self.test_data_loader = None
self.normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
self.valid_transform = transforms.Compose([
transforms.ToTensor(),
self.normalize,
])
self.train_transform = transforms.Compose([
transforms.RandomResizedCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize,
])
self.args = kwargs
def prepare_data(self):
"""Implementation of abstract class."""
@staticmethod
def get_num_files(input_path):
"""Gets num files.
Args:
input_path : path to input
"""
return len(os.listdir(input_path)) - 1
def setup(self, stage=None):
"""Downloads the data, parse it and split the data into train, test,
validation data.
Args:
stage: Stage - training or testing
"""
data_path = self.args.get("train_glob", "/pvc/output/processing")
train_base_url = data_path + "/train"
val_base_url = data_path + "/val"
test_base_url = data_path + "/test"
train_count = self.get_num_files(train_base_url)
val_count = self.get_num_files(val_base_url)
test_count = self.get_num_files(test_base_url)
train_url = "{}/{}-{}".format(train_base_url, "train",
"{0.." + str(train_count) + "}.tar")
valid_url = "{}/{}-{}".format(val_base_url, "val",
"{0.." + str(val_count) + "}.tar")
test_url = "{}/{}-{}".format(test_base_url, "test",
"{0.." + str(test_count) + "}.tar")
self.train_dataset = (wds.WebDataset(
train_url,
handler=wds.warn_and_continue).shuffle(100).decode("pil").rename(
image="ppm;jpg;jpeg;png",
info="cls").map_dict(image=self.train_transform).to_tuple(
"image", "info").batched(40))
self.valid_dataset = (wds.WebDataset(
valid_url,
handler=wds.warn_and_continue).shuffle(100).decode("pil").rename(
image="ppm",
info="cls").map_dict(image=self.valid_transform).to_tuple(
"image", "info").batched(20))
self.test_dataset = (wds.WebDataset(
test_url,
handler=wds.warn_and_continue).shuffle(100).decode("pil").rename(
image="ppm",
info="cls").map_dict(image=self.valid_transform).to_tuple(
"image", "info").batched(20))
def create_data_loader(self, dataset, batch_size, num_workers): # pylint: disable=no-self-use
"""Creates data loader."""
return DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers)
def train_dataloader(self):
"""Train Data loader.
Returns:
output - Train data loader for the given input
"""
self.train_data_loader = self.create_data_loader(
self.train_dataset,
self.args.get("train_batch_size", None),
self.args.get("train_num_workers", 4),
)
return self.train_data_loader
def val_dataloader(self):
"""Validation Data Loader.
Returns:
output - Validation data loader for the given input
"""
self.val_data_loader = self.create_data_loader(
self.valid_dataset,
self.args.get("val_batch_size", None),
self.args.get("val_num_workers", 4),
)
return self.val_data_loader
def test_dataloader(self):
"""Test Data Loader.
Returns:
output - Test data loader for the given input
"""
self.test_data_loader = self.create_data_loader(
self.test_dataset,
self.args.get("val_batch_size", None),
self.args.get("val_num_workers", 4),
)
return self.test_data_loader
| 0 | 0 | 0 |
56162bfb78a501413a438283bb43a5d96f4c3d83 | 6,531 | py | Python | data/masked_generators/tsp_bgnn.py | MauTrib/gnn-en-folie | 3ca639919a2b285a41641717f4131107c015b510 | [
"Apache-2.0"
] | null | null | null | data/masked_generators/tsp_bgnn.py | MauTrib/gnn-en-folie | 3ca639919a2b285a41641717f4131107c015b510 | [
"Apache-2.0"
] | null | null | null | data/masked_generators/tsp_bgnn.py | MauTrib/gnn-en-folie | 3ca639919a2b285a41641717f4131107c015b510 | [
"Apache-2.0"
] | null | null | null | import os
from pkgutil import get_data
import torch
import dgl
import numpy as np
import requests
import zipfile
from scipy.spatial.distance import pdist, squareform
from data.tsp import distance_matrix_tensor_representation
import tqdm
from toolbox import utils
| 40.314815 | 103 | 0.55872 | import os
from pkgutil import get_data
import torch
import dgl
import numpy as np
import requests
import zipfile
from scipy.spatial.distance import pdist, squareform
from data.tsp import distance_matrix_tensor_representation
import tqdm
from toolbox import utils
class TSP_BGNN_Generator(torch.utils.data.Dataset):
def __init__(self, name, args, coeff=1e8):
self.name=name
path_dataset = os.path.join(args['path_dataset'], 'tsp_bgnn')
self.path_dataset = path_dataset
self.data = []
utils.check_dir(self.path_dataset)#utils.check_dir(self.path_dataset)
self.constant_n_vertices = False
self.coeff = coeff
self.positions = []
self.filename = os.path.join(self.path_dataset, 'TSP/',f'tsp50-500_{self.name}.txt')
self.num_neighbors = 25
def download_files(self):
basefilepath = os.path.join(self.path_dataset,'TSP.zip')
print('Downloading Benchmarking GNNs TSP data...')
url = 'https://www.dropbox.com/s/1wf6zn5nq7qjg0e/TSP.zip?dl=1'
r = requests.get(url)
with open(basefilepath,'wb') as f:
f.write(r.content)
with zipfile.ZipFile(basefilepath, 'r') as zip_ref:
zip_ref.extractall(self.path_dataset)
def load_dataset(self, use_dgl=False):
"""
Look for required dataset in files and create it if
it does not exist
"""
filename = self.name + '.pkl'
filename_dgl = self.name + '_dgl.pkl'
path = os.path.join(self.path_dataset, filename)
path_dgl = os.path.join(self.path_dataset, filename_dgl)
data_exists = os.path.exists(path)
data_dgl_exists = os.path.exists(path_dgl)
if use_dgl and data_dgl_exists:
print('Reading dataset at {}'.format(path_dgl))
l_data,l_pos = torch.load(path_dgl)
elif not use_dgl and data_exists:
print('Reading dataset at {}'.format(path))
l_data,l_pos = torch.load(path)
elif use_dgl:
print('Reading dataset from BGNN files.')
l_data,l_pos = self.get_data_from_file(use_dgl=use_dgl)
print('Saving dataset at {}'.format(path_dgl))
torch.save((l_data, l_pos), path_dgl)
else:
print('Reading dataset from BGNN files.')
l_data,l_pos = self.get_data_from_file(use_dgl=use_dgl)
print('Saving dataset at {}'.format(path))
torch.save((l_data, self.positions), path)
self.data = list(l_data)
self.positions = list(l_pos)
def get_data_from_file(self, use_dgl=False):
if not os.path.isfile(self.filename):
self.download_files()
with open(self.filename, 'r') as f:
file_data = f.readlines()
l_data,l_pos = [],[]
print("Processing data...")
for line in tqdm.tqdm(file_data):
line = line.split(" ") # Split into list
num_nodes = int(line.index('output')//2)
# Convert node coordinates to required format
nodes_coord = []
xs,ys = [],[]
for idx in range(0, 2 * num_nodes, 2):
x,y = float(line[idx]), float(line[idx + 1])
xs.append(x)
ys.append(y)
nodes_coord.append([float(line[idx]), float(line[idx + 1])])
# Compute distance matrix
W_val = squareform(pdist(nodes_coord, metric='euclidean'))
# Determine k-nearest neighbors for each node
knns = np.argpartition(W_val, kth=self.num_neighbors, axis=-1)[:, self.num_neighbors::-1]
# Convert tour nodes to required format
# Don't add final connection for tour/cycle
tour_nodes = [int(node) - 1 for node in line[line.index('output') + 1:-1]][:-1]
# Compute an edge adjacency matrix representation of tour
edges_target = np.zeros((num_nodes, num_nodes))
for idx in range(len(tour_nodes) - 1):
i = tour_nodes[idx]
j = tour_nodes[idx + 1]
edges_target[i][j] = 1
edges_target[j][i] = 1
# Add final connection of tour in edge target
edges_target[j][tour_nodes[0]] = 1
edges_target[tour_nodes[0]][j] = 1
if use_dgl:
g = dgl.DGLGraph()
g.add_nodes(num_nodes)
g.ndata['feat'] = torch.Tensor(nodes_coord)
edge_feats = [] # edge features i.e. euclidean distances between nodes
edge_labels = [] # edges_targets as a list
# Important!: order of edge_labels must be the same as the order of edges in DGLGraph g
# We ensure this by adding them together
for idx in range(num_nodes):
for n_idx in knns[idx]:
if n_idx != idx: # No self-connection
g.add_edge(idx, n_idx)
edge_feats.append(W_val[idx][n_idx])
edge_labels.append(int(edges_target[idx][n_idx]))
# dgl.transform.remove_self_loop(g)
# Sanity check
assert len(edge_feats) == g.number_of_edges() == len(edge_labels)
# Add edge features
g.edata['feat'] = torch.Tensor(edge_feats).unsqueeze(-1)
num_nodes = g.num_nodes()
target_dgl = dgl.graph(g.edges(), num_nodes=num_nodes)
edge_labels = torch.tensor(edge_labels)
target_dgl.edata['solution'] = edge_labels
l_data.append((g, target_dgl))
else:
W = torch.tensor(W_val,dtype=torch.float)
B = distance_matrix_tensor_representation(W)
SOL = torch.zeros((num_nodes,num_nodes),dtype=int)
prec = tour_nodes[-1]
for i in range(num_nodes):
curr = tour_nodes[i]
SOL[curr,prec] = 1
SOL[prec,curr] = 1
prec = curr
l_data.append((B, SOL))
l_pos.append((xs,ys))
return l_data, l_pos
def __getitem__(self, i):
""" Fetch sample at index i """
return self.data[i]
def __len__(self):
""" Get dataset length """
return len(self.data)
| 4,625 | 1,619 | 23 |
7cd64b0870d8323f3c093a616aa65567f0e3b4cb | 27,612 | py | Python | see/Segmentors.py | genster6/see-segment | 3564edf7d0e8b9add79ddfd5b63466fec7a4022e | [
"MIT"
] | null | null | null | see/Segmentors.py | genster6/see-segment | 3564edf7d0e8b9add79ddfd5b63466fec7a4022e | [
"MIT"
] | null | null | null | see/Segmentors.py | genster6/see-segment | 3564edf7d0e8b9add79ddfd5b63466fec7a4022e | [
"MIT"
] | null | null | null | """Segmentor library designed to learn how to segment images using GAs.
This libary actually does not incode the GA itself, instead it just defines
the search parameters the evaluation funtions and the fitness function (comming soon)."""
# DO: Research project-clean up the parameters class to reduce the search space
# DO: Change the seed from a number to a fraction 0-1 which is scaled to image rows and columns
# DO: Enumerate teh word based measures.
from collections import OrderedDict
import sys
import logging
import numpy as np
import skimage
from skimage import segmentation
from skimage import color
from see.Segment_Similarity_Measure import FF_ML2DHD_V2
# List of all algorithms
algorithmspace = dict()
def runAlgo(img, ground_img, individual, return_mask=False):
"""Run and evaluate the performance of an individual.
Keyword arguments:
img -- training image
ground_img -- the ground truth for the image mask
individual -- the list representing an individual in our population
return_mask -- Boolean value indicating whether to return resulting
mask for the individual or not (default False)
Output:
fitness -- resulting fitness value for the individual
mask -- resulting image mask associated with the individual (if return_mask=True)
"""
logging.getLogger().info(f"Running Algorithm {individual[0]}")
# img = copy.deepcopy(copyImg)
seg = algoFromParams(individual)
mask = seg.evaluate(img)
logging.getLogger().info("Calculating Fitness")
fitness = FitnessFunction(mask, ground_img)
if return_mask:
return [fitness, mask]
else:
return fitness
def algoFromParams(individual):
"""Convert an individual's param list to an algorithm. Assumes order
defined in the parameters class.
Keyword arguments:
individual -- the list representing an individual in our population
Output:
algorithm(individual) -- algorithm associated with the individual
"""
if individual[0] in algorithmspace:
algorithm = algorithmspace[individual[0]]
return algorithm(individual)
else:
raise ValueError("Algorithm not avaliable")
def popCounts(pop):
"""Count the number of each algorihtm in a population"""
algorithms = eval(parameters.ranges["algorithm"])
counts = {a:0 for a in algorithms}
for p in pop:
print(p[0])
counts[p[0]] += 1
return counts
class parameters(OrderedDict):
"""Construct an ordered dictionary that represents the search space.
Functions:
printparam -- returns description for each parameter
tolist -- converts dictionary of params into list
fromlist -- converts individual into dictionary of params
"""
descriptions = dict()
ranges = dict()
pkeys = []
ranges["algorithm"] = "['CT','FB','SC','WS','CV','MCV','AC']"
descriptions["algorithm"] = "string code for the algorithm"
descriptions["beta"] = "A parameter for randomWalker So, I should take this out"
ranges["beta"] = "[i for i in range(0,10000)]"
descriptions["tolerance"] = "A parameter for flood and flood_fill"
ranges["tolerance"] = "[float(i)/1000 for i in range(0,1000,1)]"
descriptions["scale"] = "A parameter for felzenszwalb"
ranges["scale"] = "[i for i in range(0,10000)]"
descriptions["sigma"] = "sigma value. A parameter for felzenswalb, inverse_guassian_gradient, slic, and quickshift"
ranges["sigma"] = "[float(i)/100 for i in range(0,100)]"
descriptions["min_size"] = "parameter for felzenszwalb"
ranges["min_size"] = "[i for i in range(0,10000)]"
descriptions["n_segments"] = "A parameter for slic"
ranges["n_segments"] = "[i for i in range(2,10000)]"
descriptions["iterations"] = "A parameter for both morphological algorithms"
ranges["iterations"] = "[10, 10]"
descriptions["ratio"] = "A parameter for ratio"
ranges["ratio"] = "[float(i)/100 for i in range(0,100)]"
descriptions["kernel_size"] = "A parameter for kernel_size"
ranges["kernel_size"] = "[i for i in range(0,10000)]"
descriptions["max_dist"] = "A parameter for quickshift"
ranges["max_dist"] = "[i for i in range(0,10000)]"
descriptions["Channel"] = "A parameter for Picking the Channel R,G,B,H,S,V"
ranges["Channel"] = "[0,1,2,3,4,5]"
descriptions["connectivity"] = "A parameter for flood and floodfill"
ranges["connectivity"] = "[i for i in range(0, 9)]"
descriptions["compactness"] = "A parameter for slic and watershed"
ranges["compactness"] = "[0.0001,0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]"
descriptions["mu"] = "A parameter for chan_vese"
ranges["mu"] = "[float(i)/100 for i in range(0,100)]"
descriptions["lambda"] = "A parameter for chan_vese and morphological_chan_vese"
ranges["lambda"] = "[(1,1), (1,2), (2,1)]"
descriptions["dt"] = "#An algorithm for chan_vese May want to make seperate level sets for different functions e.g. Morph_chan_vese vs morph_geo_active_contour"
ranges["dt"] = "[float(i)/10 for i in range(0,100)]"
descriptions["init_level_set_chan"] = "A parameter for chan_vese and morphological_chan_vese"
ranges["init_level_set_chan"] = "['checkerboard', 'disk', 'small disk']"
descriptions["init_level_set_morph"] = "A parameter for morphological_chan_vese"
ranges["init_level_set_morph"] = "['checkerboard', 'circle']"
descriptions["smoothing"] = "A parameter used in morphological_geodesic_active_contour"
ranges["smoothing"] = "[i for i in range(1, 10)]"
descriptions["alpha"] = "A parameter for inverse_guassian_gradient"
ranges["alpha"] = "[i for i in range(0,10000)]"
descriptions["balloon"] = "A parameter for morphological_geodesic_active_contour"
ranges["balloon"] = "[i for i in range(-50,50)]"
descriptions["seed_pointX"] = "A parameter for flood and flood_fill"
ranges["seed_pointX"] = "[0.0]"
descriptions["seed_pointY"] = "??"
ranges["seed_pointY"] = "[0.0]"
descriptions["seed_pointZ"] = "??"
ranges["seed_pointZ"] = "[0.0]"
# Try to set defaults only once.
# Current method may cause all kinds of weird problems.
# @staticmethod
# def __Set_Defaults__()
def __init__(self):
"""Set default values for each param in the dictionary."""
self["algorithm"] = "None"
self["beta"] = 0.0
self["tolerance"] = 0.0
self["scale"] = 0.0
self["sigma"] = 0.0
self["min_size"] = 0.0
self["n_segments"] = 0.0
self["iterations"] = 10
self["ratio"] = 0.0
self["kernel_size"] = 0.0
self["max_dist"] = 0.0
self["Channel"] = 0.0
self["connectivity"] = 0.0
self["compactness"] = 0.0
self["mu"] = 0.0
self["lambda"] = (1, 1)
self["dt"] = 0.0
self["init_level_set_chan"] = "disk"
self["init_level_set_morph"] = "checkerboard"
self["smoothing"] = 0.0
self["alpha"] = 0.0
self["balloon"] = 0.0
self["seed_pointX"] = 0.0
self["seed_pointY"] = 0.0
self["seed_pointZ"] = 0.0
self.pkeys = list(self.keys())
def printparam(self, key):
"""Return description of parameter from param list."""
return f"{key}={self[key]}\n\t{self.descriptions[key]}\n\t{self.ranges[key]}\n"
def __str__(self):
"""Return descriptions of all parameters in param list."""
out = ""
for index, k in enumerate(self.pkeys):
out += f"{index} " + self.printparam(k)
return out
def tolist(self):
"""Convert dictionary of params into list of parameters."""
plist = []
for key in self.pkeys:
plist.append(self.params[key])
return plist
def fromlist(self, individual):
"""Convert individual's list into dictionary of params."""
logging.getLogger().info(f"Parsing Parameter List for {len(individual)} parameters")
for index, key in enumerate(self.pkeys):
self[key] = individual[index]
class segmentor(object):
"""Base class for segmentor classes defined below.
Functions:
evaluate -- Run segmentation algorithm to get inferred mask.
"""
algorithm = ""
def __init__(self, paramlist=None):
"""Generate algorithm params from parameter list."""
self.params = parameters()
if paramlist:
self.params.fromlist(paramlist)
def evaluate(self, img):
"""Run segmentation algorithm to get inferred mask."""
return np.zeros(img.shape[0:1])
def __str__(self):
"""Return params for algorithm."""
mystring = f"{self.params['algorithm']} -- \n"
for p in self.paramindexes:
mystring += f"\t{p} = {self.params[p]}\n"
return mystring
class ColorThreshold(segmentor):
"""Peform Color Thresholding segmentation algorithm. Segments parts of the image
based on the numerical values for the respective channel.
Parameters:
my_mx -- maximum thresholding value
my_mn -- minimum thresholding value
"""
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(ColorThreshold, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "CT"
self.params["Channel"] = 5
self.params["mu"] = 0.4
self.params["sigma"] = 0.6
self.paramindexes = ["Channel", "sigma", "mu"]
def evaluate(self, img): #XX
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
channel_num = self.params["Channel"]
if len(img.shape) > 2:
num_channels = img.shape[2]
if channel_num < num_channels:
channel = img[:, :, int(channel_num)]
else:
hsv = skimage.color.rgb2hsv(img)
print(f"working with hsv channel {channel_num-3}")
channel = hsv[:, :, int(channel_num)-3]
else:
channel = img
pscale = np.max(channel)
my_mx = self.params["sigma"] * pscale
my_mn = self.params["mu"] * pscale
output = None
if my_mn < my_mx:
output = np.ones(channel.shape)
output[channel < my_mn] = 0
output[channel > my_mx] = 0
else:
output = np.zeros(channel.shape)
output[channel > my_mn] = 1
output[channel < my_mx] = 1
return output
algorithmspace['CT'] = ColorThreshold
algorithmspace["AAA"] = TripleA
class Felzenszwalb(segmentor):
"""Perform Felzenszwalb segmentation algorithm. ONLY WORKS FOR RGB. The felzenszwalb
algorithms computes a graph based on the segmentation. Produces an oversegmentation
of the multichannel using min-span tree. Returns an integer mask indicating the segment labels.
Parameters:
scale -- float, higher meanse larger clusters
sigma -- float, std. dev of Gaussian kernel for preprocessing
min_size -- int, minimum component size. For postprocessing
mulitchannel -- bool, Whether the image is 2D or 3D
"""
def __doc__(self):
"""Return help string for function."""
myhelp = "Wrapper function for the scikit-image Felzenszwalb segmentor:"
myhelp += f" xx {skimage.segmentation.random_walker.__doc__}"
return myhelp
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Felzenszwalb, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "FB"
self.params["scale"] = 984
self.params["sigma"] = 0.09
self.params["min_size"] = 92
self.paramindexes = ["scale", "sigma", "min_size"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
multichannel = False
if len(img.shape) > 2:
multichannel = True
output = skimage.segmentation.felzenszwalb(
img,
self.params["scale"],
self.params["sigma"],
self.params["min_size"],
multichannel=multichannel,
)
return output
algorithmspace["FB"] = Felzenszwalb
class Slic(segmentor):
"""Perform the Slic segmentation algorithm. Segments k-means clustering in Color space
(x, y, z). Returns a 2D or 3D array of labels.
Parameters:
image -- ndarray, input image
n_segments -- int, approximate number of labels in segmented output image
compactness -- float, Balances color proximity and space proximity.
Higher values mean more weight to space proximity (superpixels
become more square/cubic) Recommended log scale values (0.01,
0.1, 1, 10, 100, etc)
max_iter -- int, max number of iterations of k-means
sigma -- float or (3,) shape array of floats, width of Guassian
smoothing kernel. For pre-processing for each dimesion of the
image. Zero means no smoothing.
spacing -- (3,) shape float array. Voxel spacing along each image
dimension. Defalt is uniform spacing
multichannel -- bool, multichannel (True) vs grayscale (False)
"""
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Slic, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "SC"
self.params["n_segments"] = 5
self.params["compactness"] = 5
self.params["iterations"] = 3
self.params["sigma"] = 5
self.paramindexes = ["n_segments", "compactness", "iterations", "sigma"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
multichannel = False
if len(img.shape) > 2:
multichannel = True
output = skimage.segmentation.slic(
img,
n_segments=self.params["n_segments"],
compactness=self.params["compactness"],
max_iter=self.params["iterations"],
sigma=self.params["sigma"],
convert2lab=True,
multichannel=multichannel,
)
return output
algorithmspace["SC"] = Slic
class QuickShift(segmentor):
"""Perform the Quick Shift segmentation algorithm. Segments images with quickshift
clustering in Color (x,y) space. Returns ndarray segmentation mask of the labels.
Parameters:
image -- ndarray, input image
ratio -- float, balances color-space proximity & image-space
proximity. Higher vals give more weight to color-space
kernel_size: float, Width of Guassian kernel using smoothing.
Higher means fewer clusters
max_dist -- float, Cut-off point for data distances. Higher means fewer clusters
sigma -- float, Width of Guassian smoothing as preprocessing.
Zero means no smoothing
random_seed -- int, Random seed used for breacking ties.
"""
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(QuickShift, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "QS"
self.params["kernel_size"] = 5
self.params["max_dist"] = 60
self.params["sigma"] = 5
self.params["Channel"] = 1
self.params["ratio"] = 2
self.paramindexes = ["kernel_size", "max_dist", "sigma", "Channel", "ratio"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
output = skimage.segmentation.quickshift(
color.gray2rgb(img),
ratio=self.params["ratio"],
kernel_size=self.params["kernel_size"],
max_dist=self.params["max_dist"],
sigma=self.params["sigma"],
random_seed=self.params["Channel"],
)
return output
algorithmspace["QS"] = QuickShift
#DO: This algorithm seems to need a channel input. We should fix that.
class Watershed(segmentor):
"""Perform the Watershed segmentation algorithm. Uses user-markers.
treats markers as basins and 'floods' them. Especially good if overlapping objects.
Returns a labeled image ndarray.
Parameters:
image -- ndarray, input array
compactness -- float, compactness of the basins. Higher values
make more regularly-shaped basin.
"""
# Not using connectivity, markers, or offset params as arrays would
# expand the search space too much.
# abbreviation for algorithm = WS
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Watershed, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "WS"
self.params["compactness"] = 2.0
self.paramindexes = ["compactness"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
channel = 0
channel_img = img[:, :, channel]
output = skimage.segmentation.watershed(
channel_img, markers=None, compactness=self.params["compactness"]
)
return output
algorithmspace["WS"] = Watershed
class Chan_Vese(segmentor):
"""Peform Chan Vese segmentation algorithm. ONLY GRAYSCALE. Segments objects
without clear boundaries. Returns segmentation array of algorithm.
Parameters:
image -- ndarray grayscale image to be segmented
mu -- float, 'edge length' weight parameter. Higher mu vals make a
'round edge' closer to zero will detect smaller objects. Typical
values are from 0 - 1.
lambda1 -- float 'diff from average' weight param to determine if
output region is True. If lower than lambda1, the region has a
larger range of values than the other
lambda2 -- float 'diff from average' weight param to determine if
output region is False. If lower than lambda1, the region will
have a larger range of values
tol -- positive float, typically (0-1), very low level set variation
tolerance between iterations.
max_iter -- uint, max number of iterations before algorithms stops
dt -- float, Multiplication factor applied at the calculations step
"""
# Abbreviation for Algorithm = CV
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Chan_Vese, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "CV"
self.params["mu"] = 2.0
self.params["lambda"] = (10, 20)
self.params["iterations"] = 10
self.params["dt"] = 0.10
self.params["tolerance"] = 0.001
self.params["init_level_set_chan"] = "small disk"
self.paramindexes = ["mu", "lambda", "iterations", "dt", "init_level_set_chan"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
if len(img.shape) == 3:
img = skimage.color.rgb2gray(img)
output = skimage.segmentation.chan_vese(
img,
mu=self.params["mu"],
lambda1=self.params["lambda"][0],
lambda2=self.params["lambda"][1],
tol=self.params["tolerance"],
max_iter=self.params["iterations"],
dt=self.params["dt"],
)
return output
algorithmspace["CV"] = Chan_Vese
class Morphological_Chan_Vese(segmentor):
"""Peform Morphological Chan Vese segmentation algorithm.
ONLY WORKS ON GRAYSCALE. Active contours without edges. Can be used to
segment images/volumes without good borders. Required that the inside of
the object looks different than outside (color, shade, darker).
Parameters:
image -- ndarray of grayscale image
iterations -- uint, number of iterations to run
init_level_set -- str, or array same shape as image. Accepted string
values are:
'checkerboard': Uses checkerboard_level_set. Returns a binary level set of a checkerboard
'circle': Uses circle_level_set. Creates a binary level set of a circle, given radius and a
center
smoothing -- uint, number of times the smoothing operator is applied
per iteration. Usually around 1-4. Larger values make it smoother
lambda1 -- Weight param for outer region. If larger than lambda2,
outer region will give larger range of values than inner value.
lambda2 -- Weight param for inner region. If larger thant lambda1,
inner region will have a larger range of values than outer region.
"""
# Abbreviation for algorithm = MCV
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Morphological_Chan_Vese, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "MCV"
self.params["iterations"] = 10
self.params["init_level_set_morph"] = "checkerboard"
self.params["smoothing"] = 10
self.params["lambda"] = (10, 20)
self.paramindexes = [
"iterations",
"init_level_set_morph",
"smoothing",
"lambda",
]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
if len(img.shape) == 3:
img = skimage.color.rgb2gray(img)
output = skimage.segmentation.morphological_chan_vese(
img,
iterations=self.params["iterations"],
init_level_set=self.params["init_level_set_morph"],
smoothing=self.params["smoothing"],
lambda1=self.params["lambda"][0],
lambda2=self.params["lambda"][1],
)
return output
algorithmspace["MCV"] = Morphological_Chan_Vese
class MorphGeodesicActiveContour(segmentor):
"""Peform Morphological Geodesic Active Contour segmentation algorithm. Uses
an image from inverse_gaussian_gradient in order to segment object with visible,
but noisy/broken borders. inverse_gaussian_gradient computes the magnitude of
the gradients in an image. Returns a preprocessed image suitable for above function.
Returns ndarray of segmented image.
Parameters:
gimage -- array, preprocessed image to be segmented.
iterations -- uint, number of iterations to run.
init_level_set -- str, array same shape as gimage. If string, possible
values are:
'checkerboard': Uses checkerboard_level_set. Returns a binary level set of a checkerboard
'circle': Uses circle_level_set. Creates a binary level set of a circle, given radius and a
center
smoothing -- uint, number of times the smoothing operator is applied
per iteration. Usually 1-4, larger values have smoother segmentation.
threshold -- Areas of image with a smaller value than the threshold are borders.
balloon -- float, guides contour of low-information parts of image.
"""
# Abbrevieation for algorithm = AC
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(MorphGeodesicActiveContour, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "AC"
self.params["alpha"] = 0.2
self.params["sigma"] = 0.3
self.params["iterations"] = 10
self.params["init_level_set_morph"] = "checkerboard"
self.params["smoothing"] = 5
self.params["balloon"] = 10
self.paramindexes = [
"alpha",
"sigma",
"iterations",
"init_level_set_morph",
"smoothing",
"balloon",
]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
# We run the inverse_gaussian_gradient to get the image to use
gimage = skimage.segmentation.inverse_gaussian_gradient(
color.rgb2gray(img), self.params["alpha"], self.params["sigma"]
)
# zeros = 0
output = skimage.segmentation.morphological_geodesic_active_contour(
gimage,
self.params["iterations"],
self.params["init_level_set_morph"],
smoothing=self.params["smoothing"],
threshold="auto",
balloon=self.params["balloon"],
)
return output
algorithmspace["AC"] = MorphGeodesicActiveContour
| 35.81323 | 164 | 0.633529 | """Segmentor library designed to learn how to segment images using GAs.
This libary actually does not incode the GA itself, instead it just defines
the search parameters the evaluation funtions and the fitness function (comming soon)."""
# DO: Research project-clean up the parameters class to reduce the search space
# DO: Change the seed from a number to a fraction 0-1 which is scaled to image rows and columns
# DO: Enumerate teh word based measures.
from collections import OrderedDict
import sys
import logging
import numpy as np
import skimage
from skimage import segmentation
from skimage import color
from see.Segment_Similarity_Measure import FF_ML2DHD_V2
def FitnessFunction(inferred, ground_truth):
return FF_ML2DHD_V2(inferred, ground_truth)
# List of all algorithms
algorithmspace = dict()
def runAlgo(img, ground_img, individual, return_mask=False):
"""Run and evaluate the performance of an individual.
Keyword arguments:
img -- training image
ground_img -- the ground truth for the image mask
individual -- the list representing an individual in our population
return_mask -- Boolean value indicating whether to return resulting
mask for the individual or not (default False)
Output:
fitness -- resulting fitness value for the individual
mask -- resulting image mask associated with the individual (if return_mask=True)
"""
logging.getLogger().info(f"Running Algorithm {individual[0]}")
# img = copy.deepcopy(copyImg)
seg = algoFromParams(individual)
mask = seg.evaluate(img)
logging.getLogger().info("Calculating Fitness")
fitness = FitnessFunction(mask, ground_img)
if return_mask:
return [fitness, mask]
else:
return fitness
def algoFromParams(individual):
"""Convert an individual's param list to an algorithm. Assumes order
defined in the parameters class.
Keyword arguments:
individual -- the list representing an individual in our population
Output:
algorithm(individual) -- algorithm associated with the individual
"""
if individual[0] in algorithmspace:
algorithm = algorithmspace[individual[0]]
return algorithm(individual)
else:
raise ValueError("Algorithm not avaliable")
def popCounts(pop):
"""Count the number of each algorihtm in a population"""
algorithms = eval(parameters.ranges["algorithm"])
counts = {a:0 for a in algorithms}
for p in pop:
print(p[0])
counts[p[0]] += 1
return counts
class parameters(OrderedDict):
"""Construct an ordered dictionary that represents the search space.
Functions:
printparam -- returns description for each parameter
tolist -- converts dictionary of params into list
fromlist -- converts individual into dictionary of params
"""
descriptions = dict()
ranges = dict()
pkeys = []
ranges["algorithm"] = "['CT','FB','SC','WS','CV','MCV','AC']"
descriptions["algorithm"] = "string code for the algorithm"
descriptions["beta"] = "A parameter for randomWalker So, I should take this out"
ranges["beta"] = "[i for i in range(0,10000)]"
descriptions["tolerance"] = "A parameter for flood and flood_fill"
ranges["tolerance"] = "[float(i)/1000 for i in range(0,1000,1)]"
descriptions["scale"] = "A parameter for felzenszwalb"
ranges["scale"] = "[i for i in range(0,10000)]"
descriptions["sigma"] = "sigma value. A parameter for felzenswalb, inverse_guassian_gradient, slic, and quickshift"
ranges["sigma"] = "[float(i)/100 for i in range(0,100)]"
descriptions["min_size"] = "parameter for felzenszwalb"
ranges["min_size"] = "[i for i in range(0,10000)]"
descriptions["n_segments"] = "A parameter for slic"
ranges["n_segments"] = "[i for i in range(2,10000)]"
descriptions["iterations"] = "A parameter for both morphological algorithms"
ranges["iterations"] = "[10, 10]"
descriptions["ratio"] = "A parameter for ratio"
ranges["ratio"] = "[float(i)/100 for i in range(0,100)]"
descriptions["kernel_size"] = "A parameter for kernel_size"
ranges["kernel_size"] = "[i for i in range(0,10000)]"
descriptions["max_dist"] = "A parameter for quickshift"
ranges["max_dist"] = "[i for i in range(0,10000)]"
descriptions["Channel"] = "A parameter for Picking the Channel R,G,B,H,S,V"
ranges["Channel"] = "[0,1,2,3,4,5]"
descriptions["connectivity"] = "A parameter for flood and floodfill"
ranges["connectivity"] = "[i for i in range(0, 9)]"
descriptions["compactness"] = "A parameter for slic and watershed"
ranges["compactness"] = "[0.0001,0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]"
descriptions["mu"] = "A parameter for chan_vese"
ranges["mu"] = "[float(i)/100 for i in range(0,100)]"
descriptions["lambda"] = "A parameter for chan_vese and morphological_chan_vese"
ranges["lambda"] = "[(1,1), (1,2), (2,1)]"
descriptions["dt"] = "#An algorithm for chan_vese May want to make seperate level sets for different functions e.g. Morph_chan_vese vs morph_geo_active_contour"
ranges["dt"] = "[float(i)/10 for i in range(0,100)]"
descriptions["init_level_set_chan"] = "A parameter for chan_vese and morphological_chan_vese"
ranges["init_level_set_chan"] = "['checkerboard', 'disk', 'small disk']"
descriptions["init_level_set_morph"] = "A parameter for morphological_chan_vese"
ranges["init_level_set_morph"] = "['checkerboard', 'circle']"
descriptions["smoothing"] = "A parameter used in morphological_geodesic_active_contour"
ranges["smoothing"] = "[i for i in range(1, 10)]"
descriptions["alpha"] = "A parameter for inverse_guassian_gradient"
ranges["alpha"] = "[i for i in range(0,10000)]"
descriptions["balloon"] = "A parameter for morphological_geodesic_active_contour"
ranges["balloon"] = "[i for i in range(-50,50)]"
descriptions["seed_pointX"] = "A parameter for flood and flood_fill"
ranges["seed_pointX"] = "[0.0]"
descriptions["seed_pointY"] = "??"
ranges["seed_pointY"] = "[0.0]"
descriptions["seed_pointZ"] = "??"
ranges["seed_pointZ"] = "[0.0]"
# Try to set defaults only once.
# Current method may cause all kinds of weird problems.
# @staticmethod
# def __Set_Defaults__()
def __init__(self):
"""Set default values for each param in the dictionary."""
self["algorithm"] = "None"
self["beta"] = 0.0
self["tolerance"] = 0.0
self["scale"] = 0.0
self["sigma"] = 0.0
self["min_size"] = 0.0
self["n_segments"] = 0.0
self["iterations"] = 10
self["ratio"] = 0.0
self["kernel_size"] = 0.0
self["max_dist"] = 0.0
self["Channel"] = 0.0
self["connectivity"] = 0.0
self["compactness"] = 0.0
self["mu"] = 0.0
self["lambda"] = (1, 1)
self["dt"] = 0.0
self["init_level_set_chan"] = "disk"
self["init_level_set_morph"] = "checkerboard"
self["smoothing"] = 0.0
self["alpha"] = 0.0
self["balloon"] = 0.0
self["seed_pointX"] = 0.0
self["seed_pointY"] = 0.0
self["seed_pointZ"] = 0.0
self.pkeys = list(self.keys())
def printparam(self, key):
"""Return description of parameter from param list."""
return f"{key}={self[key]}\n\t{self.descriptions[key]}\n\t{self.ranges[key]}\n"
def __str__(self):
"""Return descriptions of all parameters in param list."""
out = ""
for index, k in enumerate(self.pkeys):
out += f"{index} " + self.printparam(k)
return out
def tolist(self):
"""Convert dictionary of params into list of parameters."""
plist = []
for key in self.pkeys:
plist.append(self.params[key])
return plist
def fromlist(self, individual):
"""Convert individual's list into dictionary of params."""
logging.getLogger().info(f"Parsing Parameter List for {len(individual)} parameters")
for index, key in enumerate(self.pkeys):
self[key] = individual[index]
class segmentor(object):
"""Base class for segmentor classes defined below.
Functions:
evaluate -- Run segmentation algorithm to get inferred mask.
"""
algorithm = ""
def __init__(self, paramlist=None):
"""Generate algorithm params from parameter list."""
self.params = parameters()
if paramlist:
self.params.fromlist(paramlist)
def evaluate(self, img):
"""Run segmentation algorithm to get inferred mask."""
return np.zeros(img.shape[0:1])
def __str__(self):
"""Return params for algorithm."""
mystring = f"{self.params['algorithm']} -- \n"
for p in self.paramindexes:
mystring += f"\t{p} = {self.params[p]}\n"
return mystring
class ColorThreshold(segmentor):
"""Peform Color Thresholding segmentation algorithm. Segments parts of the image
based on the numerical values for the respective channel.
Parameters:
my_mx -- maximum thresholding value
my_mn -- minimum thresholding value
"""
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(ColorThreshold, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "CT"
self.params["Channel"] = 5
self.params["mu"] = 0.4
self.params["sigma"] = 0.6
self.paramindexes = ["Channel", "sigma", "mu"]
def evaluate(self, img): #XX
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
channel_num = self.params["Channel"]
if len(img.shape) > 2:
num_channels = img.shape[2]
if channel_num < num_channels:
channel = img[:, :, int(channel_num)]
else:
hsv = skimage.color.rgb2hsv(img)
print(f"working with hsv channel {channel_num-3}")
channel = hsv[:, :, int(channel_num)-3]
else:
channel = img
pscale = np.max(channel)
my_mx = self.params["sigma"] * pscale
my_mn = self.params["mu"] * pscale
output = None
if my_mn < my_mx:
output = np.ones(channel.shape)
output[channel < my_mn] = 0
output[channel > my_mx] = 0
else:
output = np.zeros(channel.shape)
output[channel > my_mn] = 1
output[channel < my_mx] = 1
return output
algorithmspace['CT'] = ColorThreshold
class TripleA (segmentor):
def __init__(self, paramlist=None):
super(TripleA, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "AAA"
self.params["mu"] = 0.4
self.params["sigma"] = 0.6
self.paramindexes = ["sigma", "mu"]
def evaluate(self, img): #XX
channel_num = 1 # Do: Need to make this a searchable parameter.
if len(img.shape) > 2:
if channel_num < img.shape[2]:
channel = img[:, :, 1]
else:
channel = img[:, :, 0]
else:
channel = img
pscale = np.max(channel)
my_mx = self.params["sigma"] * pscale
my_mn = self.params["mu"] * pscale
if my_mx < my_mn:
temp = my_mx
my_mx = my_mn
my_mn = temp
output = np.ones(channel.shape)
output[channel < my_mn] = 0
output[channel > my_mx] = 0
return output
algorithmspace["AAA"] = TripleA
class Felzenszwalb(segmentor):
"""Perform Felzenszwalb segmentation algorithm. ONLY WORKS FOR RGB. The felzenszwalb
algorithms computes a graph based on the segmentation. Produces an oversegmentation
of the multichannel using min-span tree. Returns an integer mask indicating the segment labels.
Parameters:
scale -- float, higher meanse larger clusters
sigma -- float, std. dev of Gaussian kernel for preprocessing
min_size -- int, minimum component size. For postprocessing
mulitchannel -- bool, Whether the image is 2D or 3D
"""
def __doc__(self):
"""Return help string for function."""
myhelp = "Wrapper function for the scikit-image Felzenszwalb segmentor:"
myhelp += f" xx {skimage.segmentation.random_walker.__doc__}"
return myhelp
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Felzenszwalb, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "FB"
self.params["scale"] = 984
self.params["sigma"] = 0.09
self.params["min_size"] = 92
self.paramindexes = ["scale", "sigma", "min_size"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
multichannel = False
if len(img.shape) > 2:
multichannel = True
output = skimage.segmentation.felzenszwalb(
img,
self.params["scale"],
self.params["sigma"],
self.params["min_size"],
multichannel=multichannel,
)
return output
algorithmspace["FB"] = Felzenszwalb
class Slic(segmentor):
"""Perform the Slic segmentation algorithm. Segments k-means clustering in Color space
(x, y, z). Returns a 2D or 3D array of labels.
Parameters:
image -- ndarray, input image
n_segments -- int, approximate number of labels in segmented output image
compactness -- float, Balances color proximity and space proximity.
Higher values mean more weight to space proximity (superpixels
become more square/cubic) Recommended log scale values (0.01,
0.1, 1, 10, 100, etc)
max_iter -- int, max number of iterations of k-means
sigma -- float or (3,) shape array of floats, width of Guassian
smoothing kernel. For pre-processing for each dimesion of the
image. Zero means no smoothing.
spacing -- (3,) shape float array. Voxel spacing along each image
dimension. Defalt is uniform spacing
multichannel -- bool, multichannel (True) vs grayscale (False)
"""
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Slic, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "SC"
self.params["n_segments"] = 5
self.params["compactness"] = 5
self.params["iterations"] = 3
self.params["sigma"] = 5
self.paramindexes = ["n_segments", "compactness", "iterations", "sigma"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
multichannel = False
if len(img.shape) > 2:
multichannel = True
output = skimage.segmentation.slic(
img,
n_segments=self.params["n_segments"],
compactness=self.params["compactness"],
max_iter=self.params["iterations"],
sigma=self.params["sigma"],
convert2lab=True,
multichannel=multichannel,
)
return output
algorithmspace["SC"] = Slic
class QuickShift(segmentor):
"""Perform the Quick Shift segmentation algorithm. Segments images with quickshift
clustering in Color (x,y) space. Returns ndarray segmentation mask of the labels.
Parameters:
image -- ndarray, input image
ratio -- float, balances color-space proximity & image-space
proximity. Higher vals give more weight to color-space
kernel_size: float, Width of Guassian kernel using smoothing.
Higher means fewer clusters
max_dist -- float, Cut-off point for data distances. Higher means fewer clusters
sigma -- float, Width of Guassian smoothing as preprocessing.
Zero means no smoothing
random_seed -- int, Random seed used for breacking ties.
"""
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(QuickShift, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "QS"
self.params["kernel_size"] = 5
self.params["max_dist"] = 60
self.params["sigma"] = 5
self.params["Channel"] = 1
self.params["ratio"] = 2
self.paramindexes = ["kernel_size", "max_dist", "sigma", "Channel", "ratio"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
output = skimage.segmentation.quickshift(
color.gray2rgb(img),
ratio=self.params["ratio"],
kernel_size=self.params["kernel_size"],
max_dist=self.params["max_dist"],
sigma=self.params["sigma"],
random_seed=self.params["Channel"],
)
return output
algorithmspace["QS"] = QuickShift
#DO: This algorithm seems to need a channel input. We should fix that.
class Watershed(segmentor):
"""Perform the Watershed segmentation algorithm. Uses user-markers.
treats markers as basins and 'floods' them. Especially good if overlapping objects.
Returns a labeled image ndarray.
Parameters:
image -- ndarray, input array
compactness -- float, compactness of the basins. Higher values
make more regularly-shaped basin.
"""
# Not using connectivity, markers, or offset params as arrays would
# expand the search space too much.
# abbreviation for algorithm = WS
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Watershed, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "WS"
self.params["compactness"] = 2.0
self.paramindexes = ["compactness"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
channel = 0
channel_img = img[:, :, channel]
output = skimage.segmentation.watershed(
channel_img, markers=None, compactness=self.params["compactness"]
)
return output
algorithmspace["WS"] = Watershed
class Chan_Vese(segmentor):
"""Peform Chan Vese segmentation algorithm. ONLY GRAYSCALE. Segments objects
without clear boundaries. Returns segmentation array of algorithm.
Parameters:
image -- ndarray grayscale image to be segmented
mu -- float, 'edge length' weight parameter. Higher mu vals make a
'round edge' closer to zero will detect smaller objects. Typical
values are from 0 - 1.
lambda1 -- float 'diff from average' weight param to determine if
output region is True. If lower than lambda1, the region has a
larger range of values than the other
lambda2 -- float 'diff from average' weight param to determine if
output region is False. If lower than lambda1, the region will
have a larger range of values
tol -- positive float, typically (0-1), very low level set variation
tolerance between iterations.
max_iter -- uint, max number of iterations before algorithms stops
dt -- float, Multiplication factor applied at the calculations step
"""
# Abbreviation for Algorithm = CV
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Chan_Vese, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "CV"
self.params["mu"] = 2.0
self.params["lambda"] = (10, 20)
self.params["iterations"] = 10
self.params["dt"] = 0.10
self.params["tolerance"] = 0.001
self.params["init_level_set_chan"] = "small disk"
self.paramindexes = ["mu", "lambda", "iterations", "dt", "init_level_set_chan"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
if len(img.shape) == 3:
img = skimage.color.rgb2gray(img)
output = skimage.segmentation.chan_vese(
img,
mu=self.params["mu"],
lambda1=self.params["lambda"][0],
lambda2=self.params["lambda"][1],
tol=self.params["tolerance"],
max_iter=self.params["iterations"],
dt=self.params["dt"],
)
return output
algorithmspace["CV"] = Chan_Vese
class Morphological_Chan_Vese(segmentor):
"""Peform Morphological Chan Vese segmentation algorithm.
ONLY WORKS ON GRAYSCALE. Active contours without edges. Can be used to
segment images/volumes without good borders. Required that the inside of
the object looks different than outside (color, shade, darker).
Parameters:
image -- ndarray of grayscale image
iterations -- uint, number of iterations to run
init_level_set -- str, or array same shape as image. Accepted string
values are:
'checkerboard': Uses checkerboard_level_set. Returns a binary level set of a checkerboard
'circle': Uses circle_level_set. Creates a binary level set of a circle, given radius and a
center
smoothing -- uint, number of times the smoothing operator is applied
per iteration. Usually around 1-4. Larger values make it smoother
lambda1 -- Weight param for outer region. If larger than lambda2,
outer region will give larger range of values than inner value.
lambda2 -- Weight param for inner region. If larger thant lambda1,
inner region will have a larger range of values than outer region.
"""
# Abbreviation for algorithm = MCV
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Morphological_Chan_Vese, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "MCV"
self.params["iterations"] = 10
self.params["init_level_set_morph"] = "checkerboard"
self.params["smoothing"] = 10
self.params["lambda"] = (10, 20)
self.paramindexes = [
"iterations",
"init_level_set_morph",
"smoothing",
"lambda",
]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
if len(img.shape) == 3:
img = skimage.color.rgb2gray(img)
output = skimage.segmentation.morphological_chan_vese(
img,
iterations=self.params["iterations"],
init_level_set=self.params["init_level_set_morph"],
smoothing=self.params["smoothing"],
lambda1=self.params["lambda"][0],
lambda2=self.params["lambda"][1],
)
return output
algorithmspace["MCV"] = Morphological_Chan_Vese
class MorphGeodesicActiveContour(segmentor):
"""Peform Morphological Geodesic Active Contour segmentation algorithm. Uses
an image from inverse_gaussian_gradient in order to segment object with visible,
but noisy/broken borders. inverse_gaussian_gradient computes the magnitude of
the gradients in an image. Returns a preprocessed image suitable for above function.
Returns ndarray of segmented image.
Parameters:
gimage -- array, preprocessed image to be segmented.
iterations -- uint, number of iterations to run.
init_level_set -- str, array same shape as gimage. If string, possible
values are:
'checkerboard': Uses checkerboard_level_set. Returns a binary level set of a checkerboard
'circle': Uses circle_level_set. Creates a binary level set of a circle, given radius and a
center
smoothing -- uint, number of times the smoothing operator is applied
per iteration. Usually 1-4, larger values have smoother segmentation.
threshold -- Areas of image with a smaller value than the threshold are borders.
balloon -- float, guides contour of low-information parts of image.
"""
# Abbrevieation for algorithm = AC
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(MorphGeodesicActiveContour, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "AC"
self.params["alpha"] = 0.2
self.params["sigma"] = 0.3
self.params["iterations"] = 10
self.params["init_level_set_morph"] = "checkerboard"
self.params["smoothing"] = 5
self.params["balloon"] = 10
self.paramindexes = [
"alpha",
"sigma",
"iterations",
"init_level_set_morph",
"smoothing",
"balloon",
]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
# We run the inverse_gaussian_gradient to get the image to use
gimage = skimage.segmentation.inverse_gaussian_gradient(
color.rgb2gray(img), self.params["alpha"], self.params["sigma"]
)
# zeros = 0
output = skimage.segmentation.morphological_geodesic_active_contour(
gimage,
self.params["iterations"],
self.params["init_level_set_morph"],
smoothing=self.params["smoothing"],
threshold="auto",
balloon=self.params["balloon"],
)
return output
algorithmspace["AC"] = MorphGeodesicActiveContour
| 974 | 5 | 99 |
7b19b8ec5c5d60a6a3c79eddaa378e1b3aa0b813 | 289 | py | Python | Data Structures and Algorithms/LeetCode Algo Solutions/EASY DIFFICULTY PROBLEMS/PlusOne.py | akkik04/Python-DataStructures-and-Algorithms | 8db63173218e5a9205dbb325935c71fec93b695c | [
"MIT"
] | 1 | 2022-01-22T18:19:07.000Z | 2022-01-22T18:19:07.000Z | Data Structures and Algorithms/LeetCode Algo Solutions/EASY DIFFICULTY PROBLEMS/PlusOne.py | akkik04/Python-DataStructures-and-Algorithms | 8db63173218e5a9205dbb325935c71fec93b695c | [
"MIT"
] | null | null | null | Data Structures and Algorithms/LeetCode Algo Solutions/EASY DIFFICULTY PROBLEMS/PlusOne.py | akkik04/Python-DataStructures-and-Algorithms | 8db63173218e5a9205dbb325935c71fec93b695c | [
"MIT"
] | null | null | null | # PLUS ONE LEETCODE SOLUTION:
# creating a class.
# creating a function to solve the problem.
| 28.9 | 69 | 0.643599 | # PLUS ONE LEETCODE SOLUTION:
# creating a class.
class Solution(object):
# creating a function to solve the problem.
def plusOne(self, digits):
# returning the modified version of the array.
return list(map(int, str(int(''.join(map(str,digits))) + 1))) | 134 | 2 | 49 |
7e9e2fdac9e8a144278059bcc1c6b14baacd058f | 760 | py | Python | drone_awe/params/Drone/Mavic2.py | rymanderson/Drone-Models | 396ed030f277a96365c7cbfaffb3d2006e5b12a8 | [
"MIT"
] | 2 | 2019-12-01T10:27:54.000Z | 2019-12-01T10:28:07.000Z | drone_awe/params/Drone/Mavic2.py | rymanderson/drone_awe | 396ed030f277a96365c7cbfaffb3d2006e5b12a8 | [
"MIT"
] | null | null | null | drone_awe/params/Drone/Mavic2.py | rymanderson/drone_awe | 396ed030f277a96365c7cbfaffb3d2006e5b12a8 | [
"MIT"
] | null | null | null | # Create structure with mavic 2 pro parameters
params = {'wingtype' : 'rotary',
'TOW' : 0.907, #kg
'max_speed' : 20, #m/s
'max_alt' : 6000, #m above sea level
'max_t' : 31, #min, no wind
'max_t_hover' : 29, #min, no wind
'max_tilt' : 35, #deg
'min_temp' : -10, #deg C
'max_temp' : 40, #deg C
'power_rating' : 60,
'batt_type' : 'LiPo',
'batt_capacity' : 3850, #mAh
'batt_voltage' : 15.4, #V
'batt_cells' : 4,
'batt_energy' : 59.29,
'batt_mass' : 0.297 #kg
}
#test change | 36.190476 | 56 | 0.392105 | # Create structure with mavic 2 pro parameters
params = {'wingtype' : 'rotary',
'TOW' : 0.907, #kg
'max_speed' : 20, #m/s
'max_alt' : 6000, #m above sea level
'max_t' : 31, #min, no wind
'max_t_hover' : 29, #min, no wind
'max_tilt' : 35, #deg
'min_temp' : -10, #deg C
'max_temp' : 40, #deg C
'power_rating' : 60,
'batt_type' : 'LiPo',
'batt_capacity' : 3850, #mAh
'batt_voltage' : 15.4, #V
'batt_cells' : 4,
'batt_energy' : 59.29,
'batt_mass' : 0.297 #kg
}
#test change | 0 | 0 | 0 |
1260be3f4dddd22f4fa22666c86e4b7ede953487 | 3,714 | py | Python | utils.py | Agchai52/ConditinalDeblurGAN-Pytorch | 4e6438c22223b6cbcb7be0a27f009bca8b91d61c | [
"MIT"
] | 4 | 2020-04-10T17:57:16.000Z | 2020-11-24T01:19:04.000Z | utils.py | Agchai52/ConditinalDeblurGAN-Pytorch | 4e6438c22223b6cbcb7be0a27f009bca8b91d61c | [
"MIT"
] | 3 | 2021-02-25T03:29:41.000Z | 2022-03-12T00:18:08.000Z | utils.py | Agchai52/ConditinalDeblurGAN-Pytorch | 4e6438c22223b6cbcb7be0a27f009bca8b91d61c | [
"MIT"
] | 1 | 2020-12-10T03:47:28.000Z | 2020-12-10T03:47:28.000Z | from __future__ import division
import os
import numpy as np
import math
import torch
import torchvision.transforms as transforms
from PIL import Image
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
#plot_losses()
| 31.74359 | 114 | 0.618471 | from __future__ import division
import os
import numpy as np
import math
import torch
import torchvision.transforms as transforms
from PIL import Image
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
def save_img(args, image_tensor, filename):
image_tensor = torch.clamp(image_tensor, min=-1.0, max=1.0)
transform = transforms.Compose([transforms.Normalize((-1.0, -1.0, -1.0), (2.0, 2.0, 2.0)),
transforms.ToPILImage(),
transforms.Resize((args.H, args.W))
])
image_pil = transform(image_tensor)
image_pil.save(filename+'.png')
def psnr(img1, img2):
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return 100
PIXEL_MAX = 1.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def find_latest_model(net_path):
file_list = os.listdir(net_path)
model_names = [int(f[14:-4]) for f in file_list if ".tar" in f]
if len(model_names) == 0:
return False
else:
iter_num = max(model_names)
if net_path[-1] == 'G':
return os.path.join(net_path, "G_model_epoch_{}.tar".format(iter_num))
elif net_path[-1] == 'D':
return os.path.join(net_path, "D_model_epoch_{}.tar".format(iter_num))
class LambdaLR():
def __init__(self, n_epochs, offset, decay_start_epoch):
assert ((n_epochs - decay_start_epoch) > 0), "Decay must start before the training session ends!"
self.n_epochs = n_epochs
self.offset = offset
self.decay_start_epoch = decay_start_epoch
def step(self, epoch):
return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch)/(self.n_epochs - self.decay_start_epoch)
def plot_losses():
loss_record = "loss_record.txt"
psnr_record = "psnr_record.txt"
ddg_record = "ddg_record.txt"
losses_dg = np.loadtxt(loss_record)
psnr_ave = np.loadtxt(psnr_record)
ddg_ave = np.loadtxt(ddg_record)
plt.figure()
plt.plot(losses_dg[0:-1:100, 0], 'r-', label='d_loss')
plt.xlabel("iteration*100")
plt.ylabel("Error")
#plt.xlim(xmin=-5, xmax=300) # xmax=300
#plt.ylim(ymin=0, ymax=60) # ymax=60
plt.title("Discriminator Loss")
plt.savefig("plot_d_loss.jpg")
plt.figure()
plt.plot(losses_dg[0:-1:100, 1], 'g-', label='g_loss')
plt.xlabel("iteration*100")
plt.ylabel("Error")
#plt.xlim(xmin=-5, xmax=300)
#plt.ylim(ymin=0, ymax=60)
plt.title("Generator Loss")
plt.savefig("plot_g_loss.jpg")
plt.figure()
plt.plot(losses_dg[0:-1:100, 3], 'b--', label='l2_loss')
plt.plot(losses_dg[0:-1:100, 4], 'g:', label='grad_loss')
plt.plot(losses_dg[0:-1:100, 5], 'r-', label='dc_loss')
plt.plot(losses_dg[0:-1:100, 2], 'k-', label='gan_loss')
plt.xlabel("iteration*100")
plt.ylabel("Error")
# plt.xlim(xmin=-5, xmax=480)
# plt.ylim(ymin=0, ymax=16)
plt.title("L2_Grad_DarkChan Loss")
plt.savefig("plot_4g_losses.jpg")
# plt.show()
plt.figure()
plt.plot(psnr_ave, 'r-')
plt.xlabel("epochs")
plt.ylabel("Average PSNR")
# plt.xlim(xmin=-5, xmax=300) # xmax=300
# plt.ylim(ymin=0, ymax=30.) # ymax=60
plt.title("Validation PSNR")
plt.savefig("plot_psnr_loss.jpg")
plt.figure()
plt.plot(ddg_ave[:, 0], 'b-', label='d_fake')
plt.plot(ddg_ave[:, 1], 'r-', label='d_real')
plt.plot(ddg_ave[:, 2], 'g-', label='gan')
plt.xlabel("epochs")
plt.ylabel("Average loss")
plt.legend()
# plt.xlim(xmin=-5, xmax=300) # xmax=300
#plt.ylim(ymin=0, ymax=2.) # ymax=60
plt.title("D1_D2_G PSNR")
plt.savefig("plot_ddg_loss.jpg")
#plot_losses()
| 3,303 | -4 | 168 |
6fbef8e9e8e81593b183fa49ac416fbec33e65bb | 218 | py | Python | run_DP.py | wdomitrz/dp_dpll | 1434efabe95665e984a746f8ef8ad7489916078c | [
"MIT"
] | null | null | null | run_DP.py | wdomitrz/dp_dpll | 1434efabe95665e984a746f8ef8ad7489916078c | [
"MIT"
] | null | null | null | run_DP.py | wdomitrz/dp_dpll | 1434efabe95665e984a746f8ef8ad7489916078c | [
"MIT"
] | null | null | null | import sys
from base import DP, least_common
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Expected exactly one argument - the .cnf file name")
exit(1)
DP(least_common)(sys.argv[1])
| 24.222222 | 67 | 0.642202 | import sys
from base import DP, least_common
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Expected exactly one argument - the .cnf file name")
exit(1)
DP(least_common)(sys.argv[1])
| 0 | 0 | 0 |
275fc9763ae6aaf317319e3382901b40c840cb50 | 1,400 | py | Python | main.py | SamChatfield/final-year-project | 9d1ae2cb3009ffbff89cb438cfcde855db8a53ac | [
"MIT"
] | null | null | null | main.py | SamChatfield/final-year-project | 9d1ae2cb3009ffbff89cb438cfcde855db8a53ac | [
"MIT"
] | null | null | null | main.py | SamChatfield/final-year-project | 9d1ae2cb3009ffbff89cb438cfcde855db8a53ac | [
"MIT"
] | null | null | null | import numpy as np
import hmm
import utils
from cnn import CNNModel3 as CNNModel
from discriminator import Discriminator
from ea import EA
POOL_SIZE = 4
if __name__ == "__main__":
main()
| 22.580645 | 88 | 0.691429 | import numpy as np
import hmm
import utils
from cnn import CNNModel3 as CNNModel
from discriminator import Discriminator
from ea import EA
POOL_SIZE = 4
def init_real_hmm():
# Set the fixed parameters of the "real" HMM
x = 5
y = "abc"
s = [1.0, 0.0, 0.0, 0.0, 0.0]
# Create "real" HMM with random transition and emission matrices
# real_hmm = hmm.random_hmm(x, y, s)
real_hmm = hmm.random_hmm(x, y, s)
return real_hmm
def init_discriminator(real_hmm):
# Set training parameters
epochs = 20
epoch_size = 100
batch_size = 100
seq_len = 20
# Create real HMM data generator
# train_data_gen = utils.HMMDataGenerator(real_hmm, epoch_size, batch_size, seq_len)
# model = CNNModel(train_data_gen.input_shape())
discriminator = Discriminator(real_hmm, epoch_size, batch_size, seq_len)
# model.fit_generator(generator=train_data_gen, epochs=epochs)
discriminator.initial_train(epochs)
return discriminator
def main():
# Initialise "real" HMM randomly
real_hmm = init_real_hmm()
# Initialise and train the neural network discriminator
discriminator = init_discriminator(real_hmm)
# Initialise EA toolbox
# ea = EA(discriminator, pool_size=POOL_SIZE)
ea = EA(discriminator)
# Run EA
final_pop = ea.run()
# Clean up EA
ea.cleanup()
if __name__ == "__main__":
main()
| 1,133 | 0 | 69 |
07c7b2b3a4e1561b7b9b663075fdfbd8c0b45840 | 13,807 | py | Python | guillotina/schema/_bootstrapfields.py | diefenbach/guillotina | a8c7247fca8294752901f643b35c5ed1c5dee76d | [
"BSD-2-Clause"
] | null | null | null | guillotina/schema/_bootstrapfields.py | diefenbach/guillotina | a8c7247fca8294752901f643b35c5ed1c5dee76d | [
"BSD-2-Clause"
] | null | null | null | guillotina/schema/_bootstrapfields.py | diefenbach/guillotina | a8c7247fca8294752901f643b35c5ed1c5dee76d | [
"BSD-2-Clause"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from guillotina.schema._bootstrapinterfaces import IContextAwareDefaultFactory
from guillotina.schema._bootstrapinterfaces import IFromUnicode
from guillotina.schema._schema import get_fields
from guillotina.schema.exceptions import ConstraintNotSatisfied
from guillotina.schema.exceptions import NotAContainer
from guillotina.schema.exceptions import NotAnIterator
from guillotina.schema.exceptions import RequiredMissing
from guillotina.schema.exceptions import StopValidation
from guillotina.schema.exceptions import TooBig
from guillotina.schema.exceptions import TooLong
from guillotina.schema.exceptions import TooShort
from guillotina.schema.exceptions import TooSmall
from guillotina.schema.exceptions import WrongType
from typing import Any
from zope.interface import Attribute
from zope.interface import implementer
from zope.interface import providedBy
__docformat__ = 'restructuredtext'
# XXX This class violates the Liskov Substituability Principle: it
# is derived from Container, but cannot be used everywhere an instance
# of Container could be, because it's '_validate' is more restrictive.
class Orderable(object):
"""Values of ordered fields can be sorted.
They can be restricted to a range of values.
Orderable is a mixin used in combination with Field.
"""
min = ValidatedProperty('min')
max = ValidatedProperty('max')
class MinMaxLen(object):
"""Expresses constraints on the length of a field.
MinMaxLen is a mixin used in combination with Field.
"""
min_length = 0
max_length = None
@implementer(IFromUnicode)
class Text(MinMaxLen, Field):
"""A field containing text used for human discourse."""
_type = str
def from_unicode(self, str):
"""
>>> t = Text(constraint=lambda v: 'x' in v)
>>> t.from_unicode(b"foo x spam")
Traceback (most recent call last):
...
WrongType: ('foo x spam', <type 'unicode'>, '')
>>> t.from_unicode("foo x spam")
u'foo x spam'
>>> t.from_unicode("foo spam")
Traceback (most recent call last):
...
ConstraintNotSatisfied: ('foo spam', '')
"""
self.validate(str)
return str
class TextLine(Text):
"""A text field with no newlines."""
class Password(TextLine):
"""A text field containing a text used as a password."""
UNCHANGED_PASSWORD = object()
def set(self, context, value):
"""Update the password.
We use a special marker value that a widget can use
to tell us that the password didn't change. This is
needed to support edit forms that don't display the
existing password and want to work together with
encryption.
"""
if value is self.UNCHANGED_PASSWORD:
return
super(Password, self).set(context, value)
class Bool(Field):
"""A field representing a Bool."""
_type = bool
def from_unicode(self, str):
"""
>>> b = Bool()
>>> IFromUnicode.providedBy(b)
True
>>> b.from_unicode('True')
True
>>> b.from_unicode('')
False
>>> b.from_unicode('true')
True
>>> b.from_unicode('false') or b.from_unicode('False')
False
"""
v = str == 'True' or str == 'true'
self.validate(v)
return v
@implementer(IFromUnicode)
class Int(Orderable, Field):
"""A field representing an Integer."""
_type = int
def from_unicode(self, str):
"""
>>> f = Int()
>>> f.from_unicode("125")
125
>>> f.from_unicode("125.6") #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: invalid literal for int(): 125.6
"""
v = int(str)
self.validate(v)
return v
| 31.74023 | 82 | 0.616064 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from guillotina.schema._bootstrapinterfaces import IContextAwareDefaultFactory
from guillotina.schema._bootstrapinterfaces import IFromUnicode
from guillotina.schema._schema import get_fields
from guillotina.schema.exceptions import ConstraintNotSatisfied
from guillotina.schema.exceptions import NotAContainer
from guillotina.schema.exceptions import NotAnIterator
from guillotina.schema.exceptions import RequiredMissing
from guillotina.schema.exceptions import StopValidation
from guillotina.schema.exceptions import TooBig
from guillotina.schema.exceptions import TooLong
from guillotina.schema.exceptions import TooShort
from guillotina.schema.exceptions import TooSmall
from guillotina.schema.exceptions import WrongType
from typing import Any
from zope.interface import Attribute
from zope.interface import implementer
from zope.interface import providedBy
__docformat__ = 'restructuredtext'
class ValidatedProperty(object):
def __init__(self, name, check=None):
self._info = name, check
def __set__(self, inst, value):
name, check = self._info
if value != inst.missing_value:
if check is not None:
check(inst, value)
else:
inst.validate(value)
inst.__dict__[name] = value
def __get__(self, inst, owner):
name, check = self._info
return inst.__dict__[name]
class DefaultProperty(ValidatedProperty):
def __get__(self, inst, owner):
name, check = self._info
default_factory = inst.__dict__.get('defaultFactory')
# If there is no default factory, simply return the default.
if default_factory is None:
return inst.__dict__[name]
# Get the default value by calling the factory. Some factories might
# require a context to produce a value.
if IContextAwareDefaultFactory.providedBy(default_factory):
value = default_factory(inst.context)
else:
value = default_factory()
# Check that the created value is valid.
if check is not None:
check(inst, value)
elif value != inst.missing_value:
inst.validate(value)
return value
class Field(Attribute):
# Type restrictions, if any
_type: Any = None
context = None
# If a field has no assigned value, it will be set to missing_value.
missing_value = None
# This is the default value for the missing_value argument to the
# Field constructor. A marker is helpful since we don't want to
# overwrite missing_value if it is set differently on a Field
# subclass and isn't specified via the constructor.
__missing_value_marker = object()
# Note that the "order" field has a dual existance:
# 1. The class variable Field.order is used as a source for the
# monotonically increasing values used to provide...
# 2. The instance variable self.order which provides a
# monotonically increasing value that tracks the creation order
# of Field (including Field subclass) instances.
order = 0
default = DefaultProperty('default')
# These were declared as slots in zope.interface, we override them here to
# get rid of the dedcriptors so they don't break .bind()
__name__ = None
interface = None
_Element__tagged_values = None
def __init__(self, title='', description='', __name__='',
required=True, readonly=False, constraint=None, default=None,
defaultFactory=None, missing_value=__missing_value_marker, **kw):
"""Pass in field values as keyword parameters.
Generally, you want to pass either a title and description, or
a doc string. If you pass no doc string, it will be computed
from the title and description. If you pass a doc string that
follows the Python coding style (title line separated from the
body by a blank line), the title and description will be
computed from the doc string. Unfortunately, the doc string
must be passed as a positional argument.
Here are some examples:
>>> f = Field()
>>> f.__doc__, f.title, f.description
('', u'', u'')
>>> f = Field(title='sample')
>>> f.__doc__, f.title, f.description
(u'sample', u'sample', u'')
>>> f = Field(title='sample', description='blah blah\\nblah')
>>> f.__doc__, f.title, f.description
(u'sample\\n\\nblah blah\\nblah', u'sample', u'blah blah\\nblah')
"""
__doc__ = ''
if title:
if description:
__doc__ = "%s\n\n%s" % (title, description)
else:
__doc__ = title
elif description:
__doc__ = description
super(Field, self).__init__(__name__, __doc__)
self.title = title
self.description = description
self.required = required
self.readonly = readonly
if constraint is not None:
self.constraint = constraint
self.default = default
self.defaultFactory = defaultFactory
# Keep track of the order of field definitions
Field.order += 1
self.order = Field.order
self.extra_values = kw
if missing_value is not self.__missing_value_marker:
self.missing_value = missing_value
def constraint(self, value): # type: ignore
return True
def bind(self, object):
clone = self.__class__.__new__(self.__class__)
clone.__dict__.update(self.__dict__)
clone.context = object
return clone
def validate(self, value):
if value == self.missing_value:
if self.required:
raise RequiredMissing(self.__name__)
else:
try:
self._validate(value)
except StopValidation:
pass
def __eq__(self, other):
# should be the same type
if type(self) != type(other):
return False
# should have the same properties
names = {} # used as set of property names, ignoring values
for interface in providedBy(self):
names.update(get_fields(interface))
# order will be different always, don't compare it
if 'order' in names:
del names['order']
for name in names:
if getattr(self, name) != getattr(other, name):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _validate(self, value):
if self._type is not None and not isinstance(value, self._type):
raise WrongType(value, self._type, self.__name__)
if not self.constraint(value):
raise ConstraintNotSatisfied(value, self.__name__)
def get(self, object):
return getattr(object, self.__name__)
def query(self, object, default=None):
return getattr(object, self.__name__, default)
def set(self, object, value):
if self.readonly:
raise TypeError("Can't set values on read-only fields "
"(name=%s, class=%s.%s)"
% (self.__name__,
object.__class__.__module__,
object.__class__.__name__))
setattr(object, self.__name__, value)
class Container(Field):
def _validate(self, value):
super(Container, self)._validate(value)
if not hasattr(value, '__contains__'):
try:
iter(value)
except TypeError:
raise NotAContainer(value)
# XXX This class violates the Liskov Substituability Principle: it
# is derived from Container, but cannot be used everywhere an instance
# of Container could be, because it's '_validate' is more restrictive.
class Iterable(Container):
def _validate(self, value):
super(Iterable, self)._validate(value)
# See if we can get an iterator for it
try:
iter(value)
except TypeError:
raise NotAnIterator(value)
class Orderable(object):
"""Values of ordered fields can be sorted.
They can be restricted to a range of values.
Orderable is a mixin used in combination with Field.
"""
min = ValidatedProperty('min')
max = ValidatedProperty('max')
def __init__(self, min=None, max=None, default=None, **kw):
# Set min and max to None so that we can validate if
# one of the super methods invoke validation.
self.min = None
self.max = None
super(Orderable, self).__init__(**kw)
# Now really set min and max
self.min = min
self.max = max
# We've taken over setting default so it can be limited by min
# and max.
self.default = default
def _validate(self, value):
super(Orderable, self)._validate(value)
if self.min is not None and value < self.min:
raise TooSmall(value, self.min)
if self.max is not None and value > self.max:
raise TooBig(value, self.max)
class MinMaxLen(object):
"""Expresses constraints on the length of a field.
MinMaxLen is a mixin used in combination with Field.
"""
min_length = 0
max_length = None
def __init__(self, min_length=0, max_length=None, **kw):
self.min_length = min_length
self.max_length = max_length
super(MinMaxLen, self).__init__(**kw)
def _validate(self, value):
super(MinMaxLen, self)._validate(value)
if self.min_length is not None and len(value) < self.min_length:
raise TooShort(value, self.min_length)
if self.max_length is not None and len(value) > self.max_length:
raise TooLong(value, self.max_length)
@implementer(IFromUnicode)
class Text(MinMaxLen, Field):
"""A field containing text used for human discourse."""
_type = str
def __init__(self, *args, **kw):
super(Text, self).__init__(*args, **kw)
def from_unicode(self, str):
"""
>>> t = Text(constraint=lambda v: 'x' in v)
>>> t.from_unicode(b"foo x spam")
Traceback (most recent call last):
...
WrongType: ('foo x spam', <type 'unicode'>, '')
>>> t.from_unicode("foo x spam")
u'foo x spam'
>>> t.from_unicode("foo spam")
Traceback (most recent call last):
...
ConstraintNotSatisfied: ('foo spam', '')
"""
self.validate(str)
return str
class TextLine(Text):
"""A text field with no newlines."""
def constraint(self, value):
return '\n' not in value and '\r' not in value
class Password(TextLine):
"""A text field containing a text used as a password."""
UNCHANGED_PASSWORD = object()
def set(self, context, value):
"""Update the password.
We use a special marker value that a widget can use
to tell us that the password didn't change. This is
needed to support edit forms that don't display the
existing password and want to work together with
encryption.
"""
if value is self.UNCHANGED_PASSWORD:
return
super(Password, self).set(context, value)
def validate(self, value):
try:
existing = bool(self.get(self.context))
except AttributeError:
existing = False
if value is self.UNCHANGED_PASSWORD and existing:
# Allow the UNCHANGED_PASSWORD value, if a password is set already
return
return super(Password, self).validate(value)
class Bool(Field):
"""A field representing a Bool."""
_type = bool
def _validate(self, value):
# Convert integers to bools to they don't get mis-flagged
# by the type check later.
if isinstance(value, int):
value = bool(value)
Field._validate(self, value)
def set(self, object, value):
if isinstance(value, int):
value = bool(value)
Field.set(self, object, value)
def from_unicode(self, str):
"""
>>> b = Bool()
>>> IFromUnicode.providedBy(b)
True
>>> b.from_unicode('True')
True
>>> b.from_unicode('')
False
>>> b.from_unicode('true')
True
>>> b.from_unicode('false') or b.from_unicode('False')
False
"""
v = str == 'True' or str == 'true'
self.validate(v)
return v
@implementer(IFromUnicode)
class Int(Orderable, Field):
"""A field representing an Integer."""
_type = int
def __init__(self, *args, **kw):
super(Int, self).__init__(*args, **kw)
def from_unicode(self, str):
"""
>>> f = Int()
>>> f.from_unicode("125")
125
>>> f.from_unicode("125.6") #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: invalid literal for int(): 125.6
"""
v = int(str)
self.validate(v)
return v
| 5,328 | 3,431 | 546 |
8ce18082211b909eac5b9538822052963b829027 | 9,972 | py | Python | linux/particle_filter_python_by_markus_waldmann/particle_filter.py | juebrauer/Solutions-Exercises-MultimodalSensorSystems | b1f4bae5ce21d992ff740804af07b227d34f828b | [
"Unlicense"
] | 2 | 2021-06-15T11:50:21.000Z | 2022-01-05T15:52:06.000Z | linux/particle_filter_python_by_markus_waldmann/particle_filter.py | juebrauer/Solutions-Exercises-MultimodalSensorSystems | b1f4bae5ce21d992ff740804af07b227d34f828b | [
"Unlicense"
] | null | null | null | linux/particle_filter_python_by_markus_waldmann/particle_filter.py | juebrauer/Solutions-Exercises-MultimodalSensorSystems | b1f4bae5ce21d992ff740804af07b227d34f828b | [
"Unlicense"
] | 1 | 2021-05-23T19:10:54.000Z | 2021-05-23T19:10:54.000Z | # A straightforward implementation
# of the particle filter idea
#
# A particle filter is a sample based approach
# for recursive Bayesian filtering
# The particles are a population based discrete
# representation of a probability density function.
#
# The filter recursively updates
#
# - the particle locations according to a
# probabilistic motion model
# (prediction update step)
#
# - recomputes importance weights for each particle
# (measurement update step)
#
# - resamples the particles according to the current
# pdf represented by the importance weights
#
# ---
# by Prof. Dr. Juergen Brauer, www.juergenbrauer.org
# ported from C++ to Python by Markus Waldmann.
from dataclasses import dataclass
import numpy as np
from params import *
#
# for each particle we store its location in state space
# and an importance weight
@dataclass
all_particles = list() # list of all particles
particle_with_highest_weight = Particle(np.zeros(1), 0)
#
# base class for motion & measurement update models
#
#
# update location or important weight of the specified particle
#
#
# represents a probability distribution using
# a set of discrete particles
#
#
# method to set pointer to user data
# needed to access in motion or perception model
#
#
# reset positions & weights of all particles
# to start conditions
#
#
# should be used by the user to specify in which range [min_value,max_value]
# the <param_nr>-th parameter of the state space lives
#
#
# for the specified particle we guess a random start location
#
#
# set initial location in state space for all particles
#
#
# returns a copy of an existing particle
# the particle to be copied is chosen according to
# a probability that is proportional to its importance weight
#
#
# one particle filter update step
#
| 36.394161 | 103 | 0.63919 | # A straightforward implementation
# of the particle filter idea
#
# A particle filter is a sample based approach
# for recursive Bayesian filtering
# The particles are a population based discrete
# representation of a probability density function.
#
# The filter recursively updates
#
# - the particle locations according to a
# probabilistic motion model
# (prediction update step)
#
# - recomputes importance weights for each particle
# (measurement update step)
#
# - resamples the particles according to the current
# pdf represented by the importance weights
#
# ---
# by Prof. Dr. Juergen Brauer, www.juergenbrauer.org
# ported from C++ to Python by Markus Waldmann.
from dataclasses import dataclass
import numpy as np
from params import *
#
# for each particle we store its location in state space
# and an importance weight
@dataclass
class Particle:
state: np.ndarray
weight: float
# operator =
def __copy__(self):
return Particle(self.state, self.weight) # copy of object
all_particles = list() # list of all particles
particle_with_highest_weight = Particle(np.zeros(1), 0)
#
# base class for motion & measurement update models
#
class Particle_filter_update_model:
def particle_filter_update_model(self):
pass
#
# update location or important weight of the specified particle
#
def update_particle(self, particle):
# default: no motion at all / no importance weight change
# implement your motion model + perception model in an own subclass
pass
#
# represents a probability distribution using
# a set of discrete particles
#
class Particle_filter:
def __init__(self, population_size, state_space_dimension):
self.ptr_user_data = None
# 1. save infos about nr of particles to generate
# & dimension of the state space
self.population_size = population_size
self.state_space_dimension = state_space_dimension
# 2. create the desired number of particles
for i in range(population_size):
# 2.1 create new particle object
# 2.2 create vector for storing the particles location
# in state space
state = np.zeros(state_space_dimension)
# 2.3 set initial weight of this particle
weight = 1.0/population_size
self.particle = Particle(state,weight)
# 2.4 store pointer to this particle
all_particles.append(self.particle)
# 3. prepare arrays for minimum and maximum coordinates
# of each state space dimension
self.min_values = np.zeros(state_space_dimension)
self.max_values = np.zeros(state_space_dimension)
self.range_sizes = np.zeros(state_space_dimension)
# 4. we have no motion and perception model yet
self.your_prediction_model = None
self.your_perception_model = None
# 5. start locations of particles not yet set!
self.start_locations_initalized = False
# 6. no update steps done so far
self.nr_update_steps_done = 0
# 7. helper data structure for Monte Carlo step
self.segmentation_of_unit_interval = np.zeros(population_size + 1)
#
# method to set pointer to user data
# needed to access in motion or perception model
#
def set_userdata(self, ptr_user_data):
self.ptr_user_data = ptr_user_data
#
# reset positions & weights of all particles
# to start conditions
#
def reset_positions_and_weights(self):
# 1. reset positions
self.set_random_start_states_for_all_particles()
# 2. reset weights
for p in all_particles:
p.weight = 1.0 / self.population_size
#
# should be used by the user to specify in which range [min_value,max_value]
# the <param_nr>-th parameter of the state space lives
#
def set_param_ranges(self, param_nr, min_value, max_value):
self.min_values[param_nr] = min_value
self.max_values[param_nr] = max_value
self.range_sizes[param_nr] = max_value - min_value
#
# for the specified particle we guess a random start location
#
def set_random_start_state_for_specified_particle(self, particle):
for i in range(self.state_space_dimension):
particle.state[i] = np.random.randint(self.min_values[i], self.max_values[i])
#
# set initial location in state space for all particles
#
def set_random_start_states_for_all_particles(self):
for particle in all_particles:
self.set_random_start_state_for_specified_particle(particle)
self.start_locations_initalized = True
#
# returns a copy of an existing particle
# the particle to be copied is chosen according to
# a probability that is proportional to its importance weight
#
def sample_one_particle_according_to_importance_weight_pdf(self):
# 1. guess a random number from [0,1]
rndVal = np.random.uniform(0, 1)
# 2. to which particle does the interval segment belong
# in which this random number lies?
idx = -1
for i, particle in enumerate(all_particles):
# 2.1 get next segment of partition of unit interval
a = self.segmentation_of_unit_interval[i]
b = self.segmentation_of_unit_interval[i + 1]
# 2.2 does the rndVal lie in the interval [a,b] of [0,1] that belongs to particle i?
if a <= rndVal <= b:
idx = i
break
if idx == -1:
idx = len(all_particles) - 1
# 3. particle with index <idx> has won! we will resample this particle for the next iteration!
winner_particle = all_particles[idx]
# 4. return a _similar_ 'copy' of that particle
# 4.1 generate new copy particle
# 4.2 copy location of that particle in state space
copy_state = winner_particle.state.copy()
# 4.3 copy shall be similar, not 100% identical
value_range = 0.01 * self.range_sizes
for state in copy_state:
state += np.random.uniform(-value_range, value_range)
# 4.4 weight is reset to 1/N
copy_weight = 1.0 / self.population_size
# 5. return particle copy
return Particle(copy_state, copy_weight)
#
# one particle filter update step
#
def update(self):
global all_particles
# 1. did the user specify a motion and a perception update model?
if self.your_prediction_model is None or self.your_perception_model is None:
return
# 2. set initial particle locations?
if not self.start_locations_initalized:
self.set_random_start_states_for_all_particles()
# 3. update each particle
# 3.1 get next particle
for particle in all_particles:
# 3.2 move that particle according to prediction
if DO_PREDICTION_STEP:
self.your_prediction_model.update_particle(particle)
# 3.3 move that particle according to measurement
if DO_MEASUREMENT_CORRECTION_STEP:
self.your_perception_model.update_particle(particle)
# 3.4 make sure, particles do not leave state space!
for i, state in enumerate(particle.state):
if state < self.min_values[i]: state = self.min_values[i]
if state > self.max_values[i]: state = self.max_values[i]
# 4. normalize importance weights
# 4.1 compute sum of all weights
sum_weights = 0
for particle in all_particles:
sum_weights += particle.weight
# 4.2 normalize each particle weight
for particle in all_particles:
particle.weight /= sum_weights
# 5. resample complete particle population based on
# current importance weights of current particles?
if RESAMPLING:
# 5.1 compute division of unit interval [0,1]
# such that each particle gets a piece of that interval
# where the length of the interval is proportional to its importance weight
next_border = 0.0
self.segmentation_of_unit_interval[0] = 0.0
# get next particle
for i, particle in enumerate(all_particles):
# compute next border
next_border += particle.weight
# compute next border in unit interval
self.segmentation_of_unit_interval[i + 1] = next_border
# 5.2 generate new particle population
new_population = list()
for particle in all_particles:
new_population.append(self.sample_one_particle_according_to_importance_weight_pdf())
# 5.2.1 Set X Percentage of the particles randomly for recover
if RESAMPLING_PERCENTAGE:
n_rnd_particles = (self.population_size // 100) * int(RESAMPLING_PERCENTAGE)
for particle in new_population[:n_rnd_particles]:
self.set_random_start_state_for_specified_particle(particle)
# 5.3 delete old population
all_particles.clear()
# 5.4 set new sample population as current population
all_particles = new_population.copy()
# 6. find particle with highest weight / highest prob
global particle_with_highest_weight
for particle in all_particles:
if particle_with_highest_weight.weight < particle.weight:
particle_with_highest_weight = particle
return all_particles
| 7,522 | 98 | 338 |
a3f3404d2f30af7f5f47ca1398c6a75278f5817b | 6,100 | py | Python | code/main.py | michael-1003/EfficientNet-1D | ec7ff79fbde647b97d5c60359eba3c68beae934b | [
"MIT"
] | 4 | 2021-06-23T02:58:42.000Z | 2022-01-19T11:09:38.000Z | code/main.py | michael-1003/EfficientNet-1D | ec7ff79fbde647b97d5c60359eba3c68beae934b | [
"MIT"
] | 1 | 2021-06-23T03:19:45.000Z | 2021-06-23T03:19:45.000Z | code/main.py | michael-1003/EfficientNet-1D | ec7ff79fbde647b97d5c60359eba3c68beae934b | [
"MIT"
] | 3 | 2021-05-22T09:24:50.000Z | 2021-06-23T03:13:08.000Z | import os
import time
import sys
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from configurator import read_config, select_data, select_model, select_loss, select_optimizer
from src.pipeline import NLUDataset
from src.utils import make_dir, save_train_log, save_ckpt, load_ckpt
from src.train import train_1epoch
from src.evaluate import evaluate
#%% Input args
# Run this file in background: nohup python main.py --config_fname=sample > log_sample.txt &
parser = argparse.ArgumentParser()
parser.add_argument('--config_fname', type=str, default='sample')
parser.add_argument('--overwrite', type=bool, default=True)
args = parser.parse_args()
#%%
CONFIG_FNAME = args.config_fname
try:
experiment_configs = read_config(CONFIG_FNAME)
except ValueError as e:
print('There is no such configure file!')
RESULT_ROOT_DIR = make_dir('../results', CONFIG_FNAME, overwrite=args.overwrite)
#%%
#%% =============================================== main
if __name__ == "__main__":
result = []
num_exps = len(experiment_configs)
for i in range(num_exps):
config = experiment_configs[i]
print('########################################################')
print('# Config: %s, Case: %d'\
%(CONFIG_FNAME,config['case_num']))
test_acc = main(config)
print('# Config: %s, Case: %d, Acc: %.4f'\
%(CONFIG_FNAME,experiment_configs[i]['case_num'],test_acc))
print('########################################################')
result.append([config['case_num'], test_acc])
test_results = np.array(result)
np.savetxt('%s/test_results.txt'%RESULT_ROOT_DIR, test_results, delimiter=',')
| 39.354839 | 127 | 0.581475 | import os
import time
import sys
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from configurator import read_config, select_data, select_model, select_loss, select_optimizer
from src.pipeline import NLUDataset
from src.utils import make_dir, save_train_log, save_ckpt, load_ckpt
from src.train import train_1epoch
from src.evaluate import evaluate
#%% Input args
# Run this file in background: nohup python main.py --config_fname=sample > log_sample.txt &
parser = argparse.ArgumentParser()
parser.add_argument('--config_fname', type=str, default='sample')
parser.add_argument('--overwrite', type=bool, default=True)
args = parser.parse_args()
#%%
CONFIG_FNAME = args.config_fname
try:
experiment_configs = read_config(CONFIG_FNAME)
except ValueError as e:
print('There is no such configure file!')
RESULT_ROOT_DIR = make_dir('../results', CONFIG_FNAME, overwrite=args.overwrite)
#%%
def main(config):
CASE_NUM = config['case_num']
DATASET = config['dataset']
NORMALIZATION = config['normalization']
BATCH_SIZE = config['batch_size']
MAX_EPOCH = config['max_epoch']
OPTIM_TYPE = config['optimizer']
LR = config['learning_rate']
LR_STEP = config['lr_step']
LR_DECAY = config['lr_decay']
L2_DECAY = config['l2_decay']
TB_STATE = config['use_tensorboard']
MODEL_NAME = config['model_name']
ALPHA = config['alpha']
BETA = config['beta']
GAMMA = config['gamma']
PHI = config['phi']
LOSS_FN = config['loss_fn']
KERNEL_SIZE = config['kernel_size']
result_dir = make_dir(RESULT_ROOT_DIR, str(CASE_NUM), overwrite=args.overwrite)
ckpt_path = result_dir + '/' + 'checkpoint.pt'
# =============================================== Select data and construct
data_fname, data_dim = select_data(DATASET)
data_path = '../data/' + data_fname
data_train = NLUDataset(data_path, mode='train', normalization=NORMALIZATION, random_seed=42)
dataloader_train = DataLoader(data_train, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
data_valid = NLUDataset(data_path, mode='valid', normalization=NORMALIZATION, random_seed=42)
dataloader_valid = DataLoader(data_valid, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
data_test = NLUDataset(data_path, mode='test', normalization=NORMALIZATION, random_seed=42)
dataloader_test = DataLoader(data_test, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
num_train_samples = data_train.__len__()
classes = data_train.labels
num_classes = len(classes)
# =============================================== Initialize model and optimizer
device = ('cuda' if torch.cuda.is_available() else 'cpu')
if device=='cuda': print('Using GPU, %s' % torch.cuda.get_device_name(0))
net = select_model(MODEL_NAME, data_dim, KERNEL_SIZE, num_classes, ALPHA, BETA, PHI)
net.to(device)
loss_fn = select_loss(LOSS_FN)
optimizer = select_optimizer(OPTIM_TYPE, net.parameters(), LR, L2_DECAY)
scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=LR_STEP, gamma=LR_DECAY)
# =============================================== Train
it = 0
train_losses, valid_losses, valid_accs = {}, {}, {}
best_validation_acc = 0
log_term = 5
for epoch in range(MAX_EPOCH):
#------------------------------------------------ One epoch start
one_epoch_start = time.time()
print('Epoch {} / Learning Rate: {:.0e}'.format(epoch,scheduler.get_lr()[0]))
#------------------------------------------------ Train
train_losses, it, net, optimizer, scheduler \
= train_1epoch(dataloader_train, device, train_losses, it, net, loss_fn, optimizer, scheduler, log_every=log_term)
#------------------------------------------------ Validation
valid_acc, valid_loss = evaluate(dataloader_valid, device, net, loss_fn)
valid_losses[it] = valid_loss
valid_accs[it] = valid_acc
#------------------------------------------------ Save model
saved = ''
if valid_acc > best_validation_acc:
best_validation_acc = valid_acc
saved = save_ckpt(ckpt_path, net, best_validation_acc)
print('Epoch {} / Valid loss: {:.4f}, Valid acc: {:.4f} {}'.format(epoch, valid_loss, valid_acc, saved))
#------------------------------------------------ One epoch end
curr_time = time.time()
print("One epoch time = %.2f s" %(curr_time-one_epoch_start))
print('#------------------------------------------------------#')
save_train_log(result_dir, train_losses, valid_losses, valid_accs, best_validation_acc)
# =============================================== Test
net, best_validation_acc = load_ckpt(ckpt_path, net)
test_acc, test_loss = evaluate(dataloader_test, device, net, loss_fn)
return test_acc
#%% =============================================== main
if __name__ == "__main__":
result = []
num_exps = len(experiment_configs)
for i in range(num_exps):
config = experiment_configs[i]
print('########################################################')
print('# Config: %s, Case: %d'\
%(CONFIG_FNAME,config['case_num']))
test_acc = main(config)
print('# Config: %s, Case: %d, Acc: %.4f'\
%(CONFIG_FNAME,experiment_configs[i]['case_num'],test_acc))
print('########################################################')
result.append([config['case_num'], test_acc])
test_results = np.array(result)
np.savetxt('%s/test_results.txt'%RESULT_ROOT_DIR, test_results, delimiter=',')
| 4,170 | 0 | 23 |
65f118417289436d5364d0e0b735d5d5d1c86599 | 7,589 | py | Python | tests/test_bigquery.py | gzuidhof/docker-python | 45afc4d02a457735ee627223006a735a588d6447 | [
"Apache-2.0"
] | null | null | null | tests/test_bigquery.py | gzuidhof/docker-python | 45afc4d02a457735ee627223006a735a588d6447 | [
"Apache-2.0"
] | null | null | null | tests/test_bigquery.py | gzuidhof/docker-python | 45afc4d02a457735ee627223006a735a588d6447 | [
"Apache-2.0"
] | null | null | null | import unittest
import os
import json
from unittest.mock import patch
import threading
from test.support import EnvironmentVarGuard
from urllib.parse import urlparse
from http.server import BaseHTTPRequestHandler, HTTPServer
from google.cloud import bigquery
from google.auth.exceptions import DefaultCredentialsError
from google.cloud.bigquery._http import Connection
from kaggle_gcp import KaggleKernelCredentials, PublicBigqueryClient, _DataProxyConnection
import kaggle_secrets
| 45.716867 | 124 | 0.675188 | import unittest
import os
import json
from unittest.mock import patch
import threading
from test.support import EnvironmentVarGuard
from urllib.parse import urlparse
from http.server import BaseHTTPRequestHandler, HTTPServer
from google.cloud import bigquery
from google.auth.exceptions import DefaultCredentialsError
from google.cloud.bigquery._http import Connection
from kaggle_gcp import KaggleKernelCredentials, PublicBigqueryClient, _DataProxyConnection
import kaggle_secrets
class TestBigQuery(unittest.TestCase):
API_BASE_URL = "http://127.0.0.1:2121"
def _test_integration(self, client):
class HTTPHandler(BaseHTTPRequestHandler):
called = False
bearer_header_found = False
def do_HEAD(self):
self.send_response(200)
def do_GET(self):
HTTPHandler.called = True
HTTPHandler.bearer_header_found = any(
k for k in self.headers if k == "authorization" and self.headers[k] == "Bearer secret")
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
sample_dataset = {
"id": "bigqueryproject:datasetname",
"datasetReference": {
"datasetId": "datasetname",
"projectId": "bigqueryproject"
}
}
self.wfile.write(json.dumps({"kind": "bigquery#datasetList", "datasets": [sample_dataset]}).encode("utf-8"))
server_address = urlparse(self.API_BASE_URL)
with HTTPServer((server_address.hostname, server_address.port), HTTPHandler) as httpd:
threading.Thread(target=httpd.serve_forever).start()
for dataset in client.list_datasets():
self.assertEqual(dataset.dataset_id, "datasetname")
httpd.shutdown()
self.assertTrue(
HTTPHandler.called, msg="Fake server was not called from the BQ client, but should have been.")
self.assertTrue(
HTTPHandler.bearer_header_found, msg="authorization header was missing from the BQ request.")
def _setup_mocks(self, api_url_mock):
api_url_mock.__str__.return_value = self.API_BASE_URL
@patch.object(Connection, 'API_BASE_URL')
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_connected_account(self, mock_access_token, ApiUrlMock):
self._setup_mocks(ApiUrlMock)
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
with env:
client = bigquery.Client(
project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials())
self._test_integration(client)
@patch.object(Connection, 'API_BASE_URL')
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_empty_integrations(self, mock_access_token, ApiUrlMock):
self._setup_mocks(ApiUrlMock)
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', '')
with env:
client = bigquery.Client(
project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials())
self._test_integration(client)
@patch.object(Connection, 'API_BASE_URL')
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_connected_account_unrelated_integrations(self, mock_access_token, ApiUrlMock):
self._setup_mocks(ApiUrlMock)
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'GCS:ANOTHER_ONE')
with env:
client = bigquery.Client(
project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials())
self._test_integration(client)
@patch.object(Connection, 'API_BASE_URL')
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_connected_account_default_credentials(self, mock_access_token, ApiUrlMock):
self._setup_mocks(ApiUrlMock)
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'BIGQUERY')
with env:
client = bigquery.Client(project='ANOTHER_PROJECT')
self._test_integration(client)
@patch.object(Connection, 'API_BASE_URL')
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_env_var_project_default_credentials(self, mock_access_token, ApiUrlMock):
self._setup_mocks(ApiUrlMock)
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'BIGQUERY')
env.set('GOOGLE_CLOUD_PROJECT', 'ANOTHER_PROJECT')
with env:
client = bigquery.Client()
self._test_integration(client)
@patch.object(Connection, 'API_BASE_URL')
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_simultaneous_clients(self, mock_access_token, ApiUrlMock):
self._setup_mocks(ApiUrlMock)
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
with env:
proxy_client = bigquery.Client()
bq_client = bigquery.Client(
project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials())
self._test_integration(bq_client)
# Verify that proxy client is still going to proxy to ensure global Connection
# isn't being modified.
self.assertNotEqual(type(proxy_client._connection), KaggleKernelCredentials)
self.assertEqual(type(proxy_client._connection), _DataProxyConnection)
def test_no_project_with_connected_account(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'BIGQUERY')
with env:
with self.assertRaises(DefaultCredentialsError):
# TODO(vimota): Handle this case, either default to Kaggle Proxy or use some default project
# by the user or throw a custom exception.
client = bigquery.Client()
self._test_integration(client)
def test_magics_with_connected_account_default_credentials(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'BIGQUERY')
with env:
import sitecustomize
sitecustomize.init()
from google.cloud.bigquery import magics
self.assertEqual(type(magics.context._credentials), KaggleKernelCredentials)
magics.context.credentials = None
def test_magics_without_connected_account(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
with env:
import sitecustomize
sitecustomize.init()
from google.cloud.bigquery import magics
self.assertIsNone(magics.context._credentials)
| 5,778 | 1,303 | 23 |
145fae9f4ac4dc233781136f0554dcf3814c3f8b | 18,329 | py | Python | src/ostorlab/agent/message/proto/v2/parse/cve_pb2.py | bbhunter/ostorlab | 968fe4e5b927c0cd159594c13b73f95b71150154 | [
"Apache-2.0"
] | 113 | 2022-02-21T09:30:14.000Z | 2022-03-31T21:54:26.000Z | src/ostorlab/agent/message/proto/v2/parse/cve_pb2.py | bbhunter/ostorlab | 968fe4e5b927c0cd159594c13b73f95b71150154 | [
"Apache-2.0"
] | 2 | 2022-02-25T10:56:55.000Z | 2022-03-24T13:08:06.000Z | src/ostorlab/agent/message/proto/v2/parse/cve_pb2.py | bbhunter/ostorlab | 968fe4e5b927c0cd159594c13b73f95b71150154 | [
"Apache-2.0"
] | 20 | 2022-02-28T14:25:04.000Z | 2022-03-30T23:01:11.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cve.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='cve.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b(
'\n\tcve.proto\"G\n\treference\x12\x0b\n\x03url\x18\x01 \x02(\t\x12\x0c\n\x04name\x18\x02 \x02(\t\x12\x11\n\trefsource\x18\x03 \x02(\t\x12\x0c\n\x04tags\x18\x04 \x03(\t\"W\n\rversion_match\x12\r\n\x05start\x18\x01 \x02(\t\x12\x15\n\rstart_include\x18\x02 \x02(\x08\x12\x0b\n\x03\x65nd\x18\x03 \x02(\t\x12\x13\n\x0b\x65nd_include\x18\x04 \x02(\x08\"\xdd\x01\n\tcpe_match\x12\x0c\n\x04part\x18\x01 \x02(\t\x12\x0e\n\x06vendor\x18\x02 \x01(\t\x12\x0f\n\x07product\x18\x03 \x01(\t\x12\x1f\n\x07version\x18\x04 \x01(\x0b\x32\x0e.version_match\x12\x0e\n\x06update\x18\x05 \x01(\t\x12\x0f\n\x07\x65\x64ition\x18\x06 \x01(\t\x12\x10\n\x08language\x18\x07 \x01(\t\x12\x12\n\nsw_edition\x18\x08 \x01(\t\x12\n\n\x02sw\x18\t \x01(\t\x12\n\n\x02hw\x18\n \x01(\t\x12\r\n\x05other\x18\x0b \x01(\t\x12\x12\n\nvulnerable\x18\x0c \x02(\x08\")\n\x06target\x12\x1f\n\x0b\x63pe_matches\x18\x01 \x03(\x0b\x32\n.cpe_match\"\xe1\x01\n\x03\x63ve\x12\x0f\n\x07scan_id\x18\x01 \x02(\x05\x12\x0e\n\x06\x63ve_id\x18\x02 \x02(\t\x12\x18\n\x07targets\x18\x03 \x03(\x0b\x32\x07.target\x12\x0b\n\x03\x63we\x18\x04 \x01(\x05\x12\x13\n\x0b\x64\x65scription\x18\x05 \x02(\t\x12\x1e\n\nreferences\x18\x06 \x03(\x0b\x32\n.reference\x12\x16\n\x0e\x63vss_v3_vector\x18\x07 \x01(\t\x12\x16\n\x0e\x63vss_v2_vector\x18\x08 \x01(\t\x12\x16\n\x0epublished_date\x18\t \x02(\t\x12\x15\n\rmodified_date\x18\x13 \x02(\t')
)
_REFERENCE = _descriptor.Descriptor(
name='reference',
full_name='reference',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='reference.url', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='reference.name', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='refsource', full_name='reference.refsource', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='reference.tags', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=13,
serialized_end=84,
)
_VERSION_MATCH = _descriptor.Descriptor(
name='version_match',
full_name='version_match',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='version_match.start', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start_include', full_name='version_match.start_include', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='version_match.end', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end_include', full_name='version_match.end_include', index=3,
number=4, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=86,
serialized_end=173,
)
_CPE_MATCH = _descriptor.Descriptor(
name='cpe_match',
full_name='cpe_match',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='part', full_name='cpe_match.part', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vendor', full_name='cpe_match.vendor', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product', full_name='cpe_match.product', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='cpe_match.version', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update', full_name='cpe_match.update', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='edition', full_name='cpe_match.edition', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language', full_name='cpe_match.language', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sw_edition', full_name='cpe_match.sw_edition', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sw', full_name='cpe_match.sw', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hw', full_name='cpe_match.hw', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='other', full_name='cpe_match.other', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vulnerable', full_name='cpe_match.vulnerable', index=11,
number=12, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=176,
serialized_end=397,
)
_TARGET = _descriptor.Descriptor(
name='target',
full_name='target',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cpe_matches', full_name='target.cpe_matches', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=399,
serialized_end=440,
)
_CVE = _descriptor.Descriptor(
name='cve',
full_name='cve',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scan_id', full_name='cve.scan_id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cve_id', full_name='cve.cve_id', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targets', full_name='cve.targets', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cwe', full_name='cve.cwe', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='cve.description', index=4,
number=5, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='references', full_name='cve.references', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cvss_v3_vector', full_name='cve.cvss_v3_vector', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cvss_v2_vector', full_name='cve.cvss_v2_vector', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='published_date', full_name='cve.published_date', index=8,
number=9, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='modified_date', full_name='cve.modified_date', index=9,
number=19, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=443,
serialized_end=668,
)
_CPE_MATCH.fields_by_name['version'].message_type = _VERSION_MATCH
_TARGET.fields_by_name['cpe_matches'].message_type = _CPE_MATCH
_CVE.fields_by_name['targets'].message_type = _TARGET
_CVE.fields_by_name['references'].message_type = _REFERENCE
DESCRIPTOR.message_types_by_name['reference'] = _REFERENCE
DESCRIPTOR.message_types_by_name['version_match'] = _VERSION_MATCH
DESCRIPTOR.message_types_by_name['cpe_match'] = _CPE_MATCH
DESCRIPTOR.message_types_by_name['target'] = _TARGET
DESCRIPTOR.message_types_by_name['cve'] = _CVE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
reference = _reflection.GeneratedProtocolMessageType('reference', (_message.Message,), dict(
DESCRIPTOR=_REFERENCE,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:reference)
))
_sym_db.RegisterMessage(reference)
version_match = _reflection.GeneratedProtocolMessageType('version_match', (_message.Message,), dict(
DESCRIPTOR=_VERSION_MATCH,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:version_match)
))
_sym_db.RegisterMessage(version_match)
cpe_match = _reflection.GeneratedProtocolMessageType('cpe_match', (_message.Message,), dict(
DESCRIPTOR=_CPE_MATCH,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:cpe_match)
))
_sym_db.RegisterMessage(cpe_match)
target = _reflection.GeneratedProtocolMessageType('target', (_message.Message,), dict(
DESCRIPTOR=_TARGET,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:target)
))
_sym_db.RegisterMessage(target)
cve = _reflection.GeneratedProtocolMessageType('cve', (_message.Message,), dict(
DESCRIPTOR=_CVE,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:cve)
))
_sym_db.RegisterMessage(cve)
# @@protoc_insertion_point(module_scope)
| 45.368812 | 1,380 | 0.665012 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cve.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='cve.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b(
'\n\tcve.proto\"G\n\treference\x12\x0b\n\x03url\x18\x01 \x02(\t\x12\x0c\n\x04name\x18\x02 \x02(\t\x12\x11\n\trefsource\x18\x03 \x02(\t\x12\x0c\n\x04tags\x18\x04 \x03(\t\"W\n\rversion_match\x12\r\n\x05start\x18\x01 \x02(\t\x12\x15\n\rstart_include\x18\x02 \x02(\x08\x12\x0b\n\x03\x65nd\x18\x03 \x02(\t\x12\x13\n\x0b\x65nd_include\x18\x04 \x02(\x08\"\xdd\x01\n\tcpe_match\x12\x0c\n\x04part\x18\x01 \x02(\t\x12\x0e\n\x06vendor\x18\x02 \x01(\t\x12\x0f\n\x07product\x18\x03 \x01(\t\x12\x1f\n\x07version\x18\x04 \x01(\x0b\x32\x0e.version_match\x12\x0e\n\x06update\x18\x05 \x01(\t\x12\x0f\n\x07\x65\x64ition\x18\x06 \x01(\t\x12\x10\n\x08language\x18\x07 \x01(\t\x12\x12\n\nsw_edition\x18\x08 \x01(\t\x12\n\n\x02sw\x18\t \x01(\t\x12\n\n\x02hw\x18\n \x01(\t\x12\r\n\x05other\x18\x0b \x01(\t\x12\x12\n\nvulnerable\x18\x0c \x02(\x08\")\n\x06target\x12\x1f\n\x0b\x63pe_matches\x18\x01 \x03(\x0b\x32\n.cpe_match\"\xe1\x01\n\x03\x63ve\x12\x0f\n\x07scan_id\x18\x01 \x02(\x05\x12\x0e\n\x06\x63ve_id\x18\x02 \x02(\t\x12\x18\n\x07targets\x18\x03 \x03(\x0b\x32\x07.target\x12\x0b\n\x03\x63we\x18\x04 \x01(\x05\x12\x13\n\x0b\x64\x65scription\x18\x05 \x02(\t\x12\x1e\n\nreferences\x18\x06 \x03(\x0b\x32\n.reference\x12\x16\n\x0e\x63vss_v3_vector\x18\x07 \x01(\t\x12\x16\n\x0e\x63vss_v2_vector\x18\x08 \x01(\t\x12\x16\n\x0epublished_date\x18\t \x02(\t\x12\x15\n\rmodified_date\x18\x13 \x02(\t')
)
_REFERENCE = _descriptor.Descriptor(
name='reference',
full_name='reference',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='reference.url', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='reference.name', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='refsource', full_name='reference.refsource', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='reference.tags', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=13,
serialized_end=84,
)
_VERSION_MATCH = _descriptor.Descriptor(
name='version_match',
full_name='version_match',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='version_match.start', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start_include', full_name='version_match.start_include', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='version_match.end', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end_include', full_name='version_match.end_include', index=3,
number=4, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=86,
serialized_end=173,
)
_CPE_MATCH = _descriptor.Descriptor(
name='cpe_match',
full_name='cpe_match',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='part', full_name='cpe_match.part', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vendor', full_name='cpe_match.vendor', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product', full_name='cpe_match.product', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='cpe_match.version', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update', full_name='cpe_match.update', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='edition', full_name='cpe_match.edition', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language', full_name='cpe_match.language', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sw_edition', full_name='cpe_match.sw_edition', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sw', full_name='cpe_match.sw', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hw', full_name='cpe_match.hw', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='other', full_name='cpe_match.other', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vulnerable', full_name='cpe_match.vulnerable', index=11,
number=12, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=176,
serialized_end=397,
)
_TARGET = _descriptor.Descriptor(
name='target',
full_name='target',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cpe_matches', full_name='target.cpe_matches', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=399,
serialized_end=440,
)
_CVE = _descriptor.Descriptor(
name='cve',
full_name='cve',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scan_id', full_name='cve.scan_id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cve_id', full_name='cve.cve_id', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targets', full_name='cve.targets', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cwe', full_name='cve.cwe', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='cve.description', index=4,
number=5, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='references', full_name='cve.references', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cvss_v3_vector', full_name='cve.cvss_v3_vector', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cvss_v2_vector', full_name='cve.cvss_v2_vector', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='published_date', full_name='cve.published_date', index=8,
number=9, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='modified_date', full_name='cve.modified_date', index=9,
number=19, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=443,
serialized_end=668,
)
_CPE_MATCH.fields_by_name['version'].message_type = _VERSION_MATCH
_TARGET.fields_by_name['cpe_matches'].message_type = _CPE_MATCH
_CVE.fields_by_name['targets'].message_type = _TARGET
_CVE.fields_by_name['references'].message_type = _REFERENCE
DESCRIPTOR.message_types_by_name['reference'] = _REFERENCE
DESCRIPTOR.message_types_by_name['version_match'] = _VERSION_MATCH
DESCRIPTOR.message_types_by_name['cpe_match'] = _CPE_MATCH
DESCRIPTOR.message_types_by_name['target'] = _TARGET
DESCRIPTOR.message_types_by_name['cve'] = _CVE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
reference = _reflection.GeneratedProtocolMessageType('reference', (_message.Message,), dict(
DESCRIPTOR=_REFERENCE,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:reference)
))
_sym_db.RegisterMessage(reference)
version_match = _reflection.GeneratedProtocolMessageType('version_match', (_message.Message,), dict(
DESCRIPTOR=_VERSION_MATCH,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:version_match)
))
_sym_db.RegisterMessage(version_match)
cpe_match = _reflection.GeneratedProtocolMessageType('cpe_match', (_message.Message,), dict(
DESCRIPTOR=_CPE_MATCH,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:cpe_match)
))
_sym_db.RegisterMessage(cpe_match)
target = _reflection.GeneratedProtocolMessageType('target', (_message.Message,), dict(
DESCRIPTOR=_TARGET,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:target)
))
_sym_db.RegisterMessage(target)
cve = _reflection.GeneratedProtocolMessageType('cve', (_message.Message,), dict(
DESCRIPTOR=_CVE,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:cve)
))
_sym_db.RegisterMessage(cve)
# @@protoc_insertion_point(module_scope)
| 0 | 0 | 0 |
d71816b5dfb607c0604ad673ce6bd7f777f04c05 | 600 | py | Python | plugins/PyBirchPlugin.py | BlackXanthus/PyBirch5 | 61a70fe48f859509bd737d2c204882e13c2411c6 | [
"BSD-3-Clause"
] | null | null | null | plugins/PyBirchPlugin.py | BlackXanthus/PyBirch5 | 61a70fe48f859509bd737d2c204882e13c2411c6 | [
"BSD-3-Clause"
] | null | null | null | plugins/PyBirchPlugin.py | BlackXanthus/PyBirch5 | 61a70fe48f859509bd737d2c204882e13c2411c6 | [
"BSD-3-Clause"
] | null | null | null | from TextString import TextString
##
# The abstract class that is the basic plugin
##
| 17.142857 | 45 | 0.748333 | from TextString import TextString
##
# The abstract class that is the basic plugin
##
class PyBirchPlugin:
_pybirchmagic="PyBirchPluginMagic"
def __init__(self):
self._plugin_name="Py Birch Base Plugin"
self._plugin_version="0.2"
def hello_world(self):
print("Abstract Class Loaded")
return None
def get_name(self):
return self._plugin_name
def get_version(self):
return self._plugin_version
def user_input(self,TextString):
pass
def server_input(self,TextString):
pass
##
# Do not override this method
##
def get_pybirchmagic(self):
return self._pybirchmagic
| 246 | 245 | 23 |
05896c42173dea02103091673feb63d6c6bfc251 | 1,914 | py | Python | pg_python2/pg_dates.py | ingloriousb/pg_python2 | a4b78150d24fc4ebff1c1671cfd66ab421ad1705 | [
"BSD-2-Clause"
] | null | null | null | pg_python2/pg_dates.py | ingloriousb/pg_python2 | a4b78150d24fc4ebff1c1671cfd66ab421ad1705 | [
"BSD-2-Clause"
] | null | null | null | pg_python2/pg_dates.py | ingloriousb/pg_python2 | a4b78150d24fc4ebff1c1671cfd66ab421ad1705 | [
"BSD-2-Clause"
] | null | null | null | import calendar
from umalqurra.hijri_date import HijriDate
import jdatetime
from datetime import datetime
import re
def middle_east_parsed_date(text_date, kwargs):
"""
:param text_date:
:param kwargs: format : %d-%m-%Y for 12-7-1397.
:return:
"""
dict_month_numeric = dict((v, k) for k, v in enumerate(calendar.month_name))
dict_month_abbr_numeric = dict((v, k) for k, v in enumerate(calendar.month_abbr))
day = -1
month = -1
year = -1
default_format = ["%d","%m","%Y"]
tsplit = split_non_alpha(text_date)
if "format" in kwargs:
format = kwargs["format"]
else:
format = default_format
if len(tsplit) != len(default_format):
#TODO: likely split characters next to each other 29101394
return None
for idx in range(0, len(tsplit)):
item = tsplit[idx]
if not isinstance(item, int) and not isinstance(item, float):
item = item.capitalize().strip()
if item in dict_month_numeric:
item = dict_month_numeric[item]
elif item in dict_month_abbr_numeric:
item = dict_month_abbr_numeric[item]
f_value = format[idx]
if f_value == "%d":
day = int(item)
elif f_value == "%m":
month = int(item)
elif f_value == "%Y":
year = int(item)
if month > 0 and day > 0 and year > 0:
if year < 1410:
jd = jdatetime.datetime(year, month, day)
return jd.togregorian()
if year < 1500:
um = HijriDate(year, month, day)
return datetime(um.year_gr, um.month_gr, um.day_gr)
return None | 30.380952 | 85 | 0.600836 | import calendar
from umalqurra.hijri_date import HijriDate
import jdatetime
from datetime import datetime
import re
def split_non_alpha(string_to_split):
ret_val = []
arr_spl = re.split('[^a-zA-Z0-9 ]', string_to_split)
for s in arr_spl:
ret_val.append(s.strip())
return ret_val
def middle_east_parsed_date(text_date, kwargs):
"""
:param text_date:
:param kwargs: format : %d-%m-%Y for 12-7-1397.
:return:
"""
dict_month_numeric = dict((v, k) for k, v in enumerate(calendar.month_name))
dict_month_abbr_numeric = dict((v, k) for k, v in enumerate(calendar.month_abbr))
day = -1
month = -1
year = -1
default_format = ["%d","%m","%Y"]
tsplit = split_non_alpha(text_date)
if "format" in kwargs:
format = kwargs["format"]
else:
format = default_format
if len(tsplit) != len(default_format):
#TODO: likely split characters next to each other 29101394
return None
for idx in range(0, len(tsplit)):
item = tsplit[idx]
if not isinstance(item, int) and not isinstance(item, float):
item = item.capitalize().strip()
if item in dict_month_numeric:
item = dict_month_numeric[item]
elif item in dict_month_abbr_numeric:
item = dict_month_abbr_numeric[item]
f_value = format[idx]
if f_value == "%d":
day = int(item)
elif f_value == "%m":
month = int(item)
elif f_value == "%Y":
year = int(item)
if month > 0 and day > 0 and year > 0:
if year < 1410:
jd = jdatetime.datetime(year, month, day)
return jd.togregorian()
if year < 1500:
um = HijriDate(year, month, day)
return datetime(um.year_gr, um.month_gr, um.day_gr)
return None
def gregorian_parsed_date(text_date):
return | 192 | 0 | 46 |
258f87f7934eab8366be49ae5985c5e51862d13e | 1,533 | py | Python | tuw_trinamic_iwos_revolute_controller/src/tuw_trinamic_iwos_revolute_controller/device/wheel.py | kaltenegger/tuw_trinamic | 86760553ea137d189612edf99b0ea0446d72f9a3 | [
"BSD-3-Clause"
] | null | null | null | tuw_trinamic_iwos_revolute_controller/src/tuw_trinamic_iwos_revolute_controller/device/wheel.py | kaltenegger/tuw_trinamic | 86760553ea137d189612edf99b0ea0446d72f9a3 | [
"BSD-3-Clause"
] | null | null | null | tuw_trinamic_iwos_revolute_controller/src/tuw_trinamic_iwos_revolute_controller/device/wheel.py | kaltenegger/tuw_trinamic | 86760553ea137d189612edf99b0ea0446d72f9a3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import math
from tuw_trinamic_iwos_revolute_controller.device.motor import Motor
from tuw_trinamic_iwos_revolute_controller.device.configuration_tool import ConfigurationTool
class Wheel:
"""
class representing a wheel controlled with Trinamic TMCM-1640
"""
def set_velocity(self, velocity):
"""
set the rounded target velocity (m/s) for the wheel
:param velocity: target velocity (m/s)
:return:
"""
# velocity needs to be multiplied by negative one to change direction (since wheels are mounted in reverse)
velocity_ms = velocity * -1
velocity_rps = velocity_ms / self._perimeter
velocity_rpm = velocity_rps * 60
self._motor.set_target_velocity_rpm(velocity=round(velocity_rpm))
| 34.840909 | 117 | 0.714286 | #!/usr/bin/env python3
import math
from tuw_trinamic_iwos_revolute_controller.device.motor import Motor
from tuw_trinamic_iwos_revolute_controller.device.configuration_tool import ConfigurationTool
class Wheel:
"""
class representing a wheel controlled with Trinamic TMCM-1640
"""
def __init__(self, usb_port, path_to_configuration):
self._path_to_configuration = path_to_configuration
self._usb_port = usb_port
self._configuration = None
self._perimeter = None
self._motor = None
self._load_configuration()
self._setup_perimeter()
self._setup_motor()
def set_velocity(self, velocity):
"""
set the rounded target velocity (m/s) for the wheel
:param velocity: target velocity (m/s)
:return:
"""
# velocity needs to be multiplied by negative one to change direction (since wheels are mounted in reverse)
velocity_ms = velocity * -1
velocity_rps = velocity_ms / self._perimeter
velocity_rpm = velocity_rps * 60
self._motor.set_target_velocity_rpm(velocity=round(velocity_rpm))
def _load_configuration(self):
self._configuration = ConfigurationTool.read_configuration(path_to_configuration=self._path_to_configuration)
def _setup_perimeter(self):
self._perimeter = self._configuration.diameter * math.pi
def _setup_motor(self):
self._motor = Motor(usb_port=self._usb_port, configuration=self._configuration.motor_configuration)
| 622 | 0 | 107 |
0a93400fd59ffbf7a2b05328787cae4a9c1759ae | 1,031 | py | Python | examples/plotting/radon.py | heprom/pymicro | 176bf3a829dbf67796a3d4471f18868a3da229a7 | [
"MIT"
] | 30 | 2017-03-02T14:43:48.000Z | 2022-02-25T13:22:22.000Z | examples/plotting/radon.py | heprom/pymicro | 176bf3a829dbf67796a3d4471f18868a3da229a7 | [
"MIT"
] | 14 | 2019-12-29T12:41:29.000Z | 2021-12-01T21:13:20.000Z | examples/plotting/radon.py | heprom/pymicro | 176bf3a829dbf67796a3d4471f18868a3da229a7 | [
"MIT"
] | 18 | 2017-03-21T12:43:19.000Z | 2022-03-22T14:30:06.000Z | import os, numpy as np
from pymicro.file.file_utils import HST_read
from skimage.transform import radon
from matplotlib import pyplot as plt
'''
Example of use of the radon transform.
'''
data = HST_read('../data/mousse_250x250x250_uint8.raw', autoparse_filename=True, zrange=range(1))[:, :, 0]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5))
ax1.set_title('Original data')
ax1.imshow(data.T, cmap=plt.cm.Greys_r)
theta = np.linspace(0., 180., max(data.shape), endpoint=False)
sinogram = radon(data, theta=theta, circle=False)
ax2.set_title('Radon transform (Sinogram)')
ax2.set_xlabel('Projection angle (deg)')
ax2.set_ylabel('Projection position (pixels)')
ax2.imshow(sinogram, cmap=plt.cm.Greys_r,
extent=(0, 180, 0, sinogram.shape[0]), aspect='auto')
fig.subplots_adjust(left=0.05, right=0.95)
image_name = os.path.splitext(__file__)[0] + '.png'
print('writting %s' % image_name)
plt.savefig(image_name, format='png')
from matplotlib import image
image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
| 34.366667 | 106 | 0.731329 | import os, numpy as np
from pymicro.file.file_utils import HST_read
from skimage.transform import radon
from matplotlib import pyplot as plt
'''
Example of use of the radon transform.
'''
data = HST_read('../data/mousse_250x250x250_uint8.raw', autoparse_filename=True, zrange=range(1))[:, :, 0]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5))
ax1.set_title('Original data')
ax1.imshow(data.T, cmap=plt.cm.Greys_r)
theta = np.linspace(0., 180., max(data.shape), endpoint=False)
sinogram = radon(data, theta=theta, circle=False)
ax2.set_title('Radon transform (Sinogram)')
ax2.set_xlabel('Projection angle (deg)')
ax2.set_ylabel('Projection position (pixels)')
ax2.imshow(sinogram, cmap=plt.cm.Greys_r,
extent=(0, 180, 0, sinogram.shape[0]), aspect='auto')
fig.subplots_adjust(left=0.05, right=0.95)
image_name = os.path.splitext(__file__)[0] + '.png'
print('writting %s' % image_name)
plt.savefig(image_name, format='png')
from matplotlib import image
image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
| 0 | 0 | 0 |
eaf4e4052c4df78e2e8309ca66ba30a99e51f981 | 404 | py | Python | prog.py | schrum2/SonicDeepRLEvolution | f4476fa66d96c4f6cd9bf15947f0e9392c99eadb | [
"MIT"
] | null | null | null | prog.py | schrum2/SonicDeepRLEvolution | f4476fa66d96c4f6cd9bf15947f0e9392c99eadb | [
"MIT"
] | 4 | 2019-08-22T08:02:25.000Z | 2019-08-22T08:07:35.000Z | prog.py | schrum2/SonicDeepRLEvolution | f4476fa66d96c4f6cd9bf15947f0e9392c99eadb | [
"MIT"
] | null | null | null | import argparse
import math
parser = argparse.ArgumentParser(prog='PROG')
parser.add_argument('foo', type=perfect_square)
args = parser.parse_args()
print(args.foo) | 26.933333 | 52 | 0.690594 | import argparse
import math
def perfect_square(string):
value = int(string)
sqrt = math.sqrt(value)
if sqrt != int(sqrt):
msg = "%r is not a perfect square" % string
raise argparse.ArgumentTypeError(msg)
return math.sqrt(value)
parser = argparse.ArgumentParser(prog='PROG')
parser.add_argument('foo', type=perfect_square)
args = parser.parse_args()
print(args.foo) | 216 | 0 | 23 |
b7d5b5fb86ea4bb818e118dcb16c87a208f51c53 | 2,764 | py | Python | memote/suite/tests/test_sbml.py | matthiaskoenig/memote | 7c14cd304523dda83eaf4835ee007243e8673f85 | [
"Apache-2.0"
] | null | null | null | memote/suite/tests/test_sbml.py | matthiaskoenig/memote | 7c14cd304523dda83eaf4835ee007243e8673f85 | [
"Apache-2.0"
] | null | null | null | memote/suite/tests/test_sbml.py | matthiaskoenig/memote | 7c14cd304523dda83eaf4835ee007243e8673f85 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the level, version and FBC usage of the loaded SBML file."""
from __future__ import absolute_import
from memote.utils import annotate, wrapper
@annotate(title="SBML Level and Version", format_type="raw")
def test_sbml_level(sbml_version):
"""
Expect the SBML to be at least level 3 version 2.
This test reports if the model file is represented in the latest edition
(level) of the Systems Biology Markup Language (SBML) which is Level 3,
and at least version 1.
Implementation:
The level and version are parsed directly from the SBML document.
"""
version_tag = 'SBML Level {} Version {}'.format(
sbml_version[0], sbml_version[1])
ann = test_sbml_level.annotation
ann["data"] = version_tag
outcome = sbml_version[:2] >= (3, 1)
ann["metric"] = 1.0 - float(outcome)
ann["message"] = wrapper.fill(
"""The SBML file uses: {}""".format(ann["data"]))
assert sbml_version[:2] >= (3, 1), ann["message"]
@annotate(title="FBC enabled", format_type="raw")
def test_fbc_presence(sbml_version):
"""
Expect the FBC plugin to be present.
The Flux Balance Constraints (FBC) Package extends SBML with structured
and semantic descriptions for domain-specific model components such as
flux bounds, multiple linear objective functions, gene-protein-reaction
associations, metabolite chemical formulas, charge and related annotations
which are relevant for parameterized GEMs and FBA models. The SBML and
constraint-based modeling communities collaboratively develop this package
and update it based on user input.
Implementation:
Parse the state of the FBC plugin from the SBML document.
"""
fbc_present = sbml_version[2] is not None
ann = test_fbc_presence.annotation
ann["data"] = fbc_present
ann["metric"] = 1.0 - float(fbc_present)
if fbc_present:
ann["message"] = wrapper.fill("The FBC package *is* used.")
else:
ann["message"] = wrapper.fill("The FBC package is *not* used.")
assert fbc_present, ann["message"]
| 36.853333 | 78 | 0.713459 | # -*- coding: utf-8 -*-
# Copyright 2019 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the level, version and FBC usage of the loaded SBML file."""
from __future__ import absolute_import
from memote.utils import annotate, wrapper
@annotate(title="SBML Level and Version", format_type="raw")
def test_sbml_level(sbml_version):
"""
Expect the SBML to be at least level 3 version 2.
This test reports if the model file is represented in the latest edition
(level) of the Systems Biology Markup Language (SBML) which is Level 3,
and at least version 1.
Implementation:
The level and version are parsed directly from the SBML document.
"""
version_tag = 'SBML Level {} Version {}'.format(
sbml_version[0], sbml_version[1])
ann = test_sbml_level.annotation
ann["data"] = version_tag
outcome = sbml_version[:2] >= (3, 1)
ann["metric"] = 1.0 - float(outcome)
ann["message"] = wrapper.fill(
"""The SBML file uses: {}""".format(ann["data"]))
assert sbml_version[:2] >= (3, 1), ann["message"]
@annotate(title="FBC enabled", format_type="raw")
def test_fbc_presence(sbml_version):
"""
Expect the FBC plugin to be present.
The Flux Balance Constraints (FBC) Package extends SBML with structured
and semantic descriptions for domain-specific model components such as
flux bounds, multiple linear objective functions, gene-protein-reaction
associations, metabolite chemical formulas, charge and related annotations
which are relevant for parameterized GEMs and FBA models. The SBML and
constraint-based modeling communities collaboratively develop this package
and update it based on user input.
Implementation:
Parse the state of the FBC plugin from the SBML document.
"""
fbc_present = sbml_version[2] is not None
ann = test_fbc_presence.annotation
ann["data"] = fbc_present
ann["metric"] = 1.0 - float(fbc_present)
if fbc_present:
ann["message"] = wrapper.fill("The FBC package *is* used.")
else:
ann["message"] = wrapper.fill("The FBC package is *not* used.")
assert fbc_present, ann["message"]
| 0 | 0 | 0 |
c4b1802e56632a6b37d5aa10c0b46d1a3b8cbe9b | 463 | py | Python | TestLib/NormFilters.py | EmilPi/PuzzleLib | 31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9 | [
"Apache-2.0"
] | 52 | 2020-02-28T20:40:15.000Z | 2021-08-25T05:35:17.000Z | TestLib/NormFilters.py | EmilPi/PuzzleLib | 31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9 | [
"Apache-2.0"
] | 2 | 2021-02-14T15:57:03.000Z | 2021-10-05T12:21:34.000Z | TestLib/NormFilters.py | EmilPi/PuzzleLib | 31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9 | [
"Apache-2.0"
] | 8 | 2020-02-28T20:40:11.000Z | 2020-07-09T13:27:23.000Z | from PuzzleLib.Backend import gpuarray
from PuzzleLib.Modules import SubtractMean, LCN
from PuzzleLib.Visual import loadImage, showImage
if __name__ == "__main__":
main()
| 21.045455 | 73 | 0.740821 | from PuzzleLib.Backend import gpuarray
from PuzzleLib.Modules import SubtractMean, LCN
from PuzzleLib.Visual import loadImage, showImage
def main():
subtractMean = SubtractMean(size=7)
lcn = LCN(N=7)
img = gpuarray.to_gpu(loadImage("../TestData/Bench.png"))
subtractMean(img)
showImage(subtractMean.data.get(), "../TestData/ResultSubtractNorm.png")
lcn(img)
showImage(lcn.data.get(), "../TestData/ResultLCN.png")
if __name__ == "__main__":
main()
| 264 | 0 | 23 |
c3e18c48da47d09639838dff038ca42b114c2c5b | 5,783 | py | Python | utils/meters.py | LegionChang/CoTNet | b1bc456c0b13b282b807d1082a1598b71014b4fe | [
"Apache-2.0"
] | 360 | 2021-07-26T07:23:29.000Z | 2022-03-16T03:03:25.000Z | python_developer_tools/cv/bases/conv/CoTNet/CoTNet-master/utils/meters.py | HonestyBrave/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 | [
"Apache-2.0"
] | 22 | 2021-07-29T15:05:00.000Z | 2022-03-17T04:28:14.000Z | python_developer_tools/cv/bases/conv/CoTNet/CoTNet-master/utils/meters.py | HonestyBrave/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 | [
"Apache-2.0"
] | 47 | 2021-07-27T02:14:21.000Z | 2022-02-25T09:15:12.000Z | import decimal
import numpy as np
from collections import deque
import torch
from config import cfg
from utils.timer import Timer
from utils.logger import logger_info
import utils.distributed as dist
from utils.distributed import sum_tensor
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 1.0 for k in topk]
class AverageMeter:
"""Computes and stores the average and current value"""
def time_string(seconds):
"""Converts time in seconds to a fixed-width string format."""
days, rem = divmod(int(seconds), 24 * 3600)
hrs, rem = divmod(rem, 3600)
mins, secs = divmod(rem, 60)
return "{0:02},{1:02}:{2:02}:{3:02}".format(days, hrs, mins, secs)
def gpu_mem_usage():
"""Computes the GPU memory usage for the current device (MB)."""
mem_usage_bytes = torch.cuda.max_memory_allocated()
return mem_usage_bytes / 1024 / 1024
def float_to_decimal(data, prec=4):
"""Convert floats to decimals which allows for fixed width json."""
if isinstance(data, dict):
return {k: float_to_decimal(v, prec) for k, v in data.items()}
if isinstance(data, float):
return decimal.Decimal(("{:." + str(prec) + "f}").format(data))
else:
return data
class ScalarMeter(object):
"""Measures a scalar value (adapted from Detectron)."""
class TrainMeter(object):
"""Measures training stats."""
| 32.488764 | 134 | 0.605395 | import decimal
import numpy as np
from collections import deque
import torch
from config import cfg
from utils.timer import Timer
from utils.logger import logger_info
import utils.distributed as dist
from utils.distributed import sum_tensor
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 1.0 for k in topk]
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def time_string(seconds):
"""Converts time in seconds to a fixed-width string format."""
days, rem = divmod(int(seconds), 24 * 3600)
hrs, rem = divmod(rem, 3600)
mins, secs = divmod(rem, 60)
return "{0:02},{1:02}:{2:02}:{3:02}".format(days, hrs, mins, secs)
def gpu_mem_usage():
"""Computes the GPU memory usage for the current device (MB)."""
mem_usage_bytes = torch.cuda.max_memory_allocated()
return mem_usage_bytes / 1024 / 1024
def float_to_decimal(data, prec=4):
"""Convert floats to decimals which allows for fixed width json."""
if isinstance(data, dict):
return {k: float_to_decimal(v, prec) for k, v in data.items()}
if isinstance(data, float):
return decimal.Decimal(("{:." + str(prec) + "f}").format(data))
else:
return data
class ScalarMeter(object):
"""Measures a scalar value (adapted from Detectron)."""
def __init__(self, window_size):
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
def reset(self):
self.deque.clear()
self.total = 0.0
self.count = 0
def add_value(self, value):
self.deque.append(value)
self.count += 1
self.total += value
def get_win_median(self):
return np.median(self.deque)
def get_win_avg(self):
return np.mean(self.deque)
def get_global_avg(self):
return self.total / self.count
class TrainMeter(object):
"""Measures training stats."""
def __init__(self, start_epoch, num_epochs, epoch_iters):
self.epoch_iters = epoch_iters
self.max_iter = (num_epochs - start_epoch) * epoch_iters
self.iter_timer = Timer()
self.loss = ScalarMeter(cfg.solver.log_interval)
self.loss_total = 0.0
self.lr = None
self.num_samples = 0
self.max_epoch = num_epochs
self.start_epoch = start_epoch
def reset(self, timer=False):
if timer:
self.iter_timer.reset()
self.loss.reset()
self.loss_total = 0.0
self.lr = None
self.num_samples = 0
def iter_tic(self):
self.iter_timer.tic()
def iter_toc(self):
self.iter_timer.toc()
def update_stats(self, loss, lr, mb_size):
self.loss.add_value(loss)
self.lr = lr
self.loss_total += loss * mb_size
self.num_samples += mb_size
def get_iter_stats(self, cur_epoch, cur_iter):
cur_iter_total = (cur_epoch - self.start_epoch) * self.epoch_iters + cur_iter + 1
eta_sec = self.iter_timer.average_time * (self.max_iter - cur_iter_total)
mem_usage = gpu_mem_usage()
stats = {
"epoch": "{}/{}".format(cur_epoch + 1, self.max_epoch),
"iter": "{}/{}".format(cur_iter + 1, self.epoch_iters),
"time_avg": self.iter_timer.average_time,
"eta": time_string(eta_sec),
"loss": self.loss.get_win_avg(),
"lr": self.lr,
"mem": int(np.ceil(mem_usage)),
}
return stats
def log_iter_stats(self, cur_epoch, cur_iter):
if (cur_iter + 1) % cfg.solver.log_interval != 0:
return
stats = self.get_iter_stats(cur_epoch, cur_iter)
info = "Epoch: {:s}, Iter: {:s}, loss: {:.4f}, lr: {:s}, time_avg: {:.4f}, eta: {:s}, mem: {:d}".format(\
stats["epoch"], stats["iter"], stats["loss"], stats["lr"], stats["time_avg"], stats["eta"], stats["mem"])
logger_info(info)
class TestMeter(object):
def __init__(self):
self.num_top1 = 0
self.num_top5 = 0
self.num_samples = 0
def reset(self):
self.num_top1 = 0
self.num_top5 = 0
self.num_samples = 0
def update_stats(self, num_top1, num_top5, mb_size):
self.num_top1 += num_top1
self.num_top5 += num_top5
self.num_samples += mb_size
def log_iter_stats(self, cur_epoch):
if cfg.distributed:
tensor_reduce = torch.tensor([self.num_top1 * 1.0, self.num_top5 * 1.0, self.num_samples * 1.0], device="cuda")
tensor_reduce = sum_tensor(tensor_reduce)
tensor_reduce = tensor_reduce.data.cpu().numpy()
num_top1 = tensor_reduce[0]
num_top5 = tensor_reduce[1]
num_samples = tensor_reduce[2]
else:
num_top1 = self.num_top1
num_top5 = self.num_top5
num_samples = self.num_samples
top1_acc = num_top1 * 1.0 / num_samples
top5_acc = num_top5 * 1.0 / num_samples
info = "Epoch: {:d}, top1_acc = {:.2%}, top5_acc = {:.2%} in {:d}".format(cur_epoch + 1, top1_acc, top5_acc, int(num_samples))
logger_info(info)
return top1_acc, top5_acc
| 3,542 | 3 | 565 |
a7b173e38daeb055e31229e89e694045cf16422c | 352 | py | Python | Hackerearth/Recursion/Easy/Python/stairs.py | pranaylobo/Team-Kalm-CP | daa967d84ccd162efc0b7f19448daa01f745e7e2 | [
"Apache-2.0"
] | null | null | null | Hackerearth/Recursion/Easy/Python/stairs.py | pranaylobo/Team-Kalm-CP | daa967d84ccd162efc0b7f19448daa01f745e7e2 | [
"Apache-2.0"
] | 1 | 2021-02-15T16:02:47.000Z | 2021-02-15T16:02:47.000Z | Hackerearth/Recursion/Easy/Python/stairs.py | pranaylobo/Team-Kalm-CP | daa967d84ccd162efc0b7f19448daa01f745e7e2 | [
"Apache-2.0"
] | 1 | 2021-02-16T04:53:47.000Z | 2021-02-16T04:53:47.000Z |
print(solve(int(input()))) | 11.354839 | 51 | 0.4375 | def solve(n):
if n==0 or n==1:
return 1
elif n==2:
return 2
elif n==3:
return 4
elif n==4:
return 7
elif n==5:
return 13
elif n==6:
return 24
elif n==7:
return 44
else:
return solve(n-1) + solve(n-2) + solve(n-3)
print(solve(int(input()))) | 300 | 0 | 22 |
3d64bd367efb90a5dab78c19ff495414aaf0679b | 5,605 | py | Python | ResophNotes.py | rrottmann/ResophNotesPy | 0315ae85dad92075ac4f6fbdcf1f533b830f5903 | [
"BSD-2-Clause"
] | 1 | 2017-08-24T17:34:43.000Z | 2017-08-24T17:34:43.000Z | ResophNotes.py | rrottmann/ResophNotesPy | 0315ae85dad92075ac4f6fbdcf1f533b830f5903 | [
"BSD-2-Clause"
] | null | null | null | ResophNotes.py | rrottmann/ResophNotesPy | 0315ae85dad92075ac4f6fbdcf1f533b830f5903 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ResophNotes.py
Convert to JSON and query ResophNotes notes from shell.
Copyright 2017 by Reiner Rottmann <reiner@rottmann.it
Released under the BSD License.
"""
import os
import sys
import base64
import uuid
import json
import argparse
import subprocess
import collections
import xmltodict
def convert(path):
"""Convert the ResophNotes data file (resophnotesdata.xml) at given path. Returns db as dict."""
tags = {}
db = {}
fd = open(path, 'r')
content = fd.read()
fd.close()
data = xmltodict.parse(content)
tags['none'] = []
for tag in data['database']['tag']:
try:
tags[base64.b64decode(str(tag))] = []
except KeyError:
print tag
for obj in data['database']['object']:
uid = str(uuid.uuid4())
try:
if 'tags' in obj:
objtags = base64.b64decode(str(obj['tags'])).split(',')
else:
objtags = ['none']
for tag in objtags:
tags[tag].append(uid)
db[uid] = {}
for key in obj.keys():
if key in ['content', 'tags']:
value = base64.b64decode(str(obj[key]))
else:
value = str(obj[key])
db[uid][key] = value
except:
pass
return db
def save_json(path, db):
"""Save the db as JSON dump to given path."""
fd = open(path, 'w')
json.dump(db, fd)
fd.close()
def open_json(path):
"""Open the db from JSON file previously saved at given path."""
fd = open(path, 'r')
return json.load(fd)
def count_tags(db):
"""Count tag statistics for some awesomeness."""
all_tags = []
for key in db.keys():
obj = db[key]
if 'tags' in obj.keys():
all_tags = all_tags + obj['tags'].split(',')
stats = collections.Counter(all_tags)
return stats.most_common(10)
def cli(db, internal=False):
"""Query the database via CLI. May use internal viewer instead of less."""
count_tags(db)
print """
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|R|e|s|o|p|h|N|o|t|e|s|.|p|y|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
print 'Total number of notes:', len(db)
print 'Most common tags:', ', '.join(['@' + x[0] + ':' + str(x[1]) for x in count_tags(db)])
print ''
while True:
query = raw_input('Query (q to quit)? ').lower()
if query == 'q':
sys.exit(0)
results = []
for key in db.keys():
obj = db[key]
match_tag = True
for q in query.split():
if 'tags' in obj.keys():
if not q in str(obj['tags']).lower().split(','):
match_tag = False
break
else:
match_tag = False
break
if match_tag:
if not obj in results:
results.append(obj)
continue
if 'content' in obj.keys():
if obj['content'].encode('utf-8').lower().find(query) > 0:
if not obj in results:
results.append(obj)
continue
i = 0
for result in results[:36]:
if not 'tags' in result.keys():
result['tags'] = 'none'
print str(i), '|', result['modify'], '|', result['content'].splitlines()[0], '|', 'tags:', ' '.join(
['@' + x for x in result['tags'].split(',')])
i += 1
print 'Results:', len(results)
if len(results) > 0:
while True:
show = str(raw_input('Read which note (q to return)? '))
if show == 'q':
break
if show.isdigit():
show = int(show)
if show >= 0 and show <= len(results):
if internal:
sys.stdout.write(str(results[show]['content']))
else:
p = subprocess.Popen('/usr/bin/less', stdin=subprocess.PIPE, shell=True)
p.communicate(results[show]['content'].encode('utf-8').strip())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert to JSON and query ResophNotes notes from shell.')
parser.add_argument('--data', help='Import from this ResophNotes data file. Default: resophnotesdata.xml',
default='resophnotesdata.xml')
parser.add_argument('--json', help='JSON file with converted ResophNotes data. Default: resophnotesdata.json',
default='resophnotesdata.json')
parser.add_argument('--cli', help='Open an interactive cli to query ResophNotes data.', action='store_true')
parser.add_argument('--internal', help='Use internal viewer instead of less.', action='store_true')
args = parser.parse_args()
db = None
if not os.path.exists(args.json) and os.path.exists(args.data):
db = convert(args.data)
save_json(args.json, db)
else:
if os.path.exists(args.json):
db = open_json(args.json)
if db is None and os.path.exists(args.json):
db = open_json(args.json)
else:
if db:
if args.cli:
cli(db, args.internal)
sys.exit(0)
else:
print "Error: No ResophNotes available."
sys.exit(1)
parser.print_help()
sys.exit(0)
| 33.363095 | 114 | 0.509723 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ResophNotes.py
Convert to JSON and query ResophNotes notes from shell.
Copyright 2017 by Reiner Rottmann <reiner@rottmann.it
Released under the BSD License.
"""
import os
import sys
import base64
import uuid
import json
import argparse
import subprocess
import collections
import xmltodict
def convert(path):
"""Convert the ResophNotes data file (resophnotesdata.xml) at given path. Returns db as dict."""
tags = {}
db = {}
fd = open(path, 'r')
content = fd.read()
fd.close()
data = xmltodict.parse(content)
tags['none'] = []
for tag in data['database']['tag']:
try:
tags[base64.b64decode(str(tag))] = []
except KeyError:
print tag
for obj in data['database']['object']:
uid = str(uuid.uuid4())
try:
if 'tags' in obj:
objtags = base64.b64decode(str(obj['tags'])).split(',')
else:
objtags = ['none']
for tag in objtags:
tags[tag].append(uid)
db[uid] = {}
for key in obj.keys():
if key in ['content', 'tags']:
value = base64.b64decode(str(obj[key]))
else:
value = str(obj[key])
db[uid][key] = value
except:
pass
return db
def save_json(path, db):
"""Save the db as JSON dump to given path."""
fd = open(path, 'w')
json.dump(db, fd)
fd.close()
def open_json(path):
"""Open the db from JSON file previously saved at given path."""
fd = open(path, 'r')
return json.load(fd)
def count_tags(db):
"""Count tag statistics for some awesomeness."""
all_tags = []
for key in db.keys():
obj = db[key]
if 'tags' in obj.keys():
all_tags = all_tags + obj['tags'].split(',')
stats = collections.Counter(all_tags)
return stats.most_common(10)
def cli(db, internal=False):
"""Query the database via CLI. May use internal viewer instead of less."""
count_tags(db)
print """
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|R|e|s|o|p|h|N|o|t|e|s|.|p|y|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
print 'Total number of notes:', len(db)
print 'Most common tags:', ', '.join(['@' + x[0] + ':' + str(x[1]) for x in count_tags(db)])
print ''
while True:
query = raw_input('Query (q to quit)? ').lower()
if query == 'q':
sys.exit(0)
results = []
for key in db.keys():
obj = db[key]
match_tag = True
for q in query.split():
if 'tags' in obj.keys():
if not q in str(obj['tags']).lower().split(','):
match_tag = False
break
else:
match_tag = False
break
if match_tag:
if not obj in results:
results.append(obj)
continue
if 'content' in obj.keys():
if obj['content'].encode('utf-8').lower().find(query) > 0:
if not obj in results:
results.append(obj)
continue
i = 0
for result in results[:36]:
if not 'tags' in result.keys():
result['tags'] = 'none'
print str(i), '|', result['modify'], '|', result['content'].splitlines()[0], '|', 'tags:', ' '.join(
['@' + x for x in result['tags'].split(',')])
i += 1
print 'Results:', len(results)
if len(results) > 0:
while True:
show = str(raw_input('Read which note (q to return)? '))
if show == 'q':
break
if show.isdigit():
show = int(show)
if show >= 0 and show <= len(results):
if internal:
sys.stdout.write(str(results[show]['content']))
else:
p = subprocess.Popen('/usr/bin/less', stdin=subprocess.PIPE, shell=True)
p.communicate(results[show]['content'].encode('utf-8').strip())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert to JSON and query ResophNotes notes from shell.')
parser.add_argument('--data', help='Import from this ResophNotes data file. Default: resophnotesdata.xml',
default='resophnotesdata.xml')
parser.add_argument('--json', help='JSON file with converted ResophNotes data. Default: resophnotesdata.json',
default='resophnotesdata.json')
parser.add_argument('--cli', help='Open an interactive cli to query ResophNotes data.', action='store_true')
parser.add_argument('--internal', help='Use internal viewer instead of less.', action='store_true')
args = parser.parse_args()
db = None
if not os.path.exists(args.json) and os.path.exists(args.data):
db = convert(args.data)
save_json(args.json, db)
else:
if os.path.exists(args.json):
db = open_json(args.json)
if db is None and os.path.exists(args.json):
db = open_json(args.json)
else:
if db:
if args.cli:
cli(db, args.internal)
sys.exit(0)
else:
print "Error: No ResophNotes available."
sys.exit(1)
parser.print_help()
sys.exit(0)
| 0 | 0 | 0 |
2fde3e0585be2c7e44bbf4a50888acdf44b8a5c1 | 1,703 | py | Python | review2.py | cs-fullstack-2019-spring/python-review2-cw-gkg901 | 0a0d65b7548614e48048c7bf742cbe9c56108ffe | [
"Apache-2.0"
] | null | null | null | review2.py | cs-fullstack-2019-spring/python-review2-cw-gkg901 | 0a0d65b7548614e48048c7bf742cbe9c56108ffe | [
"Apache-2.0"
] | null | null | null | review2.py | cs-fullstack-2019-spring/python-review2-cw-gkg901 | 0a0d65b7548614e48048c7bf742cbe9c56108ffe | [
"Apache-2.0"
] | null | null | null |
# Create a task list. A user is presented with the text below.
# Let them select an option to list all of their tasks, add a task to their list, delete a task, or quit the program.
# Make each option a different function in your program.
# Do NOT use Google. Do NOT use other students. Try to do this on your own.
if __name__ == '__main__':
main()
| 21.2875 | 132 | 0.53259 | def main():
ex()
# Create a task list. A user is presented with the text below.
# Let them select an option to list all of their tasks, add a task to their list, delete a task, or quit the program.
# Make each option a different function in your program.
# Do NOT use Google. Do NOT use other students. Try to do this on your own.
def ex():
# DELETE TASK FUNCTION
def deleteTask(x):
taskList.remove(x)
taskList = ["Finish this project"]
# IMPORTS SAVE
with open("TASK.txt","r")as save:
for item in save:
taskList.append(item)
# PROGRAM START!!!!!!!!!!!!!!!!!!!
print("Congratulations! You're running Gerren's Task List program.")
userInput = ""
while (userInput != 0):
userInput = input(
"What would you like to do next?\n 1. List all tasks.\n 2. Add a task to the list.\n 3. Delete a task.\n 0. to quit.\n")
if userInput == "1":
for items in taskList:
print(items)
elif userInput == "2":
newTask = input("Enter your new task.\n")
taskList.append(newTask)
elif userInput == "3":
print(taskList)
ask = input("Delete which task?\n")
if ask in taskList:
deleteTask(ask)
else:
print("INVALID ENTRY")
elif userInput == "0":
# SAVES LIST TO TASK.txt
for each in taskList:
f = open("TASK.txt","a")
f.write(each+"\n")
f.close()
break
else:
print("INVALID ENTRY")
continue
if __name__ == '__main__':
main()
| 1,296 | 0 | 45 |
0a80ed22b1f8f2014a9901db8351528c6723316c | 579 | py | Python | Python/Strings/alphabet_rangoli.py | LikimiaD/HackerRank | ad34261ea4d7d721f92a3230a369eccaac5ef88b | [
"MIT"
] | null | null | null | Python/Strings/alphabet_rangoli.py | LikimiaD/HackerRank | ad34261ea4d7d721f92a3230a369eccaac5ef88b | [
"MIT"
] | null | null | null | Python/Strings/alphabet_rangoli.py | LikimiaD/HackerRank | ad34261ea4d7d721f92a3230a369eccaac5ef88b | [
"MIT"
] | null | null | null | import string
size = 10
mid_line = '-'.join([string.ascii_letters[size - x] for x in range(1, size)] + [string.ascii_letters[x] for x in range(size)])
lines = []
for x in range(2,size+1):
main = ''.join(string.ascii_letters[size - x] for x in range(1, x))
*main_list,_ = list(main)
reverse = ''.join(x for x in reversed(main_list))
line = '-'.join(main+reverse)
num = (len(mid_line)-len(line)) // 2
output_line = '-' * num + line + '-' * num
lines.append(output_line)
[print(x) for x in lines]
print(mid_line)
[print(x) for x in reversed(lines)] | 36.1875 | 126 | 0.62867 | import string
size = 10
mid_line = '-'.join([string.ascii_letters[size - x] for x in range(1, size)] + [string.ascii_letters[x] for x in range(size)])
lines = []
for x in range(2,size+1):
main = ''.join(string.ascii_letters[size - x] for x in range(1, x))
*main_list,_ = list(main)
reverse = ''.join(x for x in reversed(main_list))
line = '-'.join(main+reverse)
num = (len(mid_line)-len(line)) // 2
output_line = '-' * num + line + '-' * num
lines.append(output_line)
[print(x) for x in lines]
print(mid_line)
[print(x) for x in reversed(lines)] | 0 | 0 | 0 |
be18080a401b2813e33234cd1be2ebc3fe126b62 | 971 | py | Python | app/modules/users.py | Gerschtli/betting-game-backend | 4fb38e5316ea6509b4468f8ca11de2f899366abb | [
"MIT"
] | null | null | null | app/modules/users.py | Gerschtli/betting-game-backend | 4fb38e5316ea6509b4468f8ca11de2f899366abb | [
"MIT"
] | 1 | 2021-11-04T16:42:26.000Z | 2021-11-04T16:42:26.000Z | app/modules/users.py | Gerschtli/betting-game-backend | 4fb38e5316ea6509b4468f8ca11de2f899366abb | [
"MIT"
] | null | null | null | from flask import Blueprint
from flask_restful import Api
from .. import models, request
from ..resource import Resource
from ..response import Response, no_content
from ..validator import matcher, schemas, validate_input, validate_schema
module = Blueprint('users', __name__, url_prefix='/users')
api = Api(module)
@api.resource('')
| 26.243243 | 73 | 0.627188 | from flask import Blueprint
from flask_restful import Api
from .. import models, request
from ..resource import Resource
from ..response import Response, no_content
from ..validator import matcher, schemas, validate_input, validate_schema
module = Blueprint('users', __name__, url_prefix='/users')
api = Api(module)
@api.resource('')
class Users(Resource):
@validate_schema(schemas.USER)
@validate_input({
'username': matcher.And(
matcher.Required(),
matcher.UniqueUsername(),
),
'password': matcher.MinLength(6),
'email': matcher.Required(),
})
@staticmethod
def post() -> Response:
data = request.get_json()
new_user = models.User(
username=data['username'],
password=models.User.generate_hash(data['password']),
email=data['email'],
is_admin=data['is_admin'],
)
new_user.save()
return no_content()
| 309 | 302 | 22 |
e40ae9de2fac686e2a524206e3c3fcd494ed381b | 15,777 | py | Python | sdk/python/pulumi_azure_nextgen/devtestlab/v20150521preview/virtual_machine_resource.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/devtestlab/v20150521preview/virtual_machine_resource.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/devtestlab/v20150521preview/virtual_machine_resource.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['VirtualMachineResource']
| 44.442254 | 364 | 0.652722 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['VirtualMachineResource']
class VirtualMachineResource(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
artifact_deployment_status: Optional[pulumi.Input[pulumi.InputType['ArtifactDeploymentStatusPropertiesArgs']]] = None,
artifacts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ArtifactInstallPropertiesArgs']]]]] = None,
compute_id: Optional[pulumi.Input[str]] = None,
created_by_user: Optional[pulumi.Input[str]] = None,
created_by_user_id: Optional[pulumi.Input[str]] = None,
custom_image_id: Optional[pulumi.Input[str]] = None,
disallow_public_ip_address: Optional[pulumi.Input[bool]] = None,
fqdn: Optional[pulumi.Input[str]] = None,
gallery_image_reference: Optional[pulumi.Input[pulumi.InputType['GalleryImageReferenceArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
is_authentication_with_ssh_key: Optional[pulumi.Input[bool]] = None,
lab_name: Optional[pulumi.Input[str]] = None,
lab_subnet_name: Optional[pulumi.Input[str]] = None,
lab_virtual_network_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
os_type: Optional[pulumi.Input[str]] = None,
owner_object_id: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[str]] = None,
ssh_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A virtual machine.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ArtifactDeploymentStatusPropertiesArgs']] artifact_deployment_status: The artifact deployment status for the virtual machine.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ArtifactInstallPropertiesArgs']]]] artifacts: The artifacts to be installed on the virtual machine.
:param pulumi.Input[str] compute_id: The resource identifier (Microsoft.Compute) of the virtual machine.
:param pulumi.Input[str] created_by_user: The email address of creator of the virtual machine.
:param pulumi.Input[str] created_by_user_id: The object identifier of the creator of the virtual machine.
:param pulumi.Input[str] custom_image_id: The custom image identifier of the virtual machine.
:param pulumi.Input[bool] disallow_public_ip_address: Indicates whether the virtual machine is to be created without a public IP address.
:param pulumi.Input[str] fqdn: The fully-qualified domain name of the virtual machine.
:param pulumi.Input[pulumi.InputType['GalleryImageReferenceArgs']] gallery_image_reference: The Microsoft Azure Marketplace image reference of the virtual machine.
:param pulumi.Input[str] id: The identifier of the resource.
:param pulumi.Input[bool] is_authentication_with_ssh_key: A value indicating whether this virtual machine uses an SSH key for authentication.
:param pulumi.Input[str] lab_name: The name of the lab.
:param pulumi.Input[str] lab_subnet_name: The lab subnet name of the virtual machine.
:param pulumi.Input[str] lab_virtual_network_id: The lab virtual network identifier of the virtual machine.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] name: The name of the resource.
:param pulumi.Input[str] notes: The notes of the virtual machine.
:param pulumi.Input[str] os_type: The OS type of the virtual machine.
:param pulumi.Input[str] owner_object_id: The object identifier of the owner of the virtual machine.
:param pulumi.Input[str] password: The password of the virtual machine administrator.
:param pulumi.Input[str] provisioning_state: The provisioning status of the resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] size: The size of the virtual machine.
:param pulumi.Input[str] ssh_key: The SSH key of the virtual machine administrator.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
:param pulumi.Input[str] type: The type of the resource.
:param pulumi.Input[str] user_name: The user name of the virtual machine.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['artifact_deployment_status'] = artifact_deployment_status
__props__['artifacts'] = artifacts
__props__['compute_id'] = compute_id
__props__['created_by_user'] = created_by_user
__props__['created_by_user_id'] = created_by_user_id
__props__['custom_image_id'] = custom_image_id
__props__['disallow_public_ip_address'] = disallow_public_ip_address
__props__['fqdn'] = fqdn
__props__['gallery_image_reference'] = gallery_image_reference
__props__['id'] = id
__props__['is_authentication_with_ssh_key'] = is_authentication_with_ssh_key
if lab_name is None and not opts.urn:
raise TypeError("Missing required property 'lab_name'")
__props__['lab_name'] = lab_name
__props__['lab_subnet_name'] = lab_subnet_name
__props__['lab_virtual_network_id'] = lab_virtual_network_id
__props__['location'] = location
__props__['name'] = name
__props__['notes'] = notes
__props__['os_type'] = os_type
__props__['owner_object_id'] = owner_object_id
__props__['password'] = password
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['size'] = size
__props__['ssh_key'] = ssh_key
__props__['tags'] = tags
__props__['type'] = type
__props__['user_name'] = user_name
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:devtestlab:VirtualMachineResource"), pulumi.Alias(type_="azure-nextgen:devtestlab/latest:VirtualMachineResource"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20160515:VirtualMachineResource"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20180915:VirtualMachineResource")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualMachineResource, __self__).__init__(
'azure-nextgen:devtestlab/v20150521preview:VirtualMachineResource',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualMachineResource':
"""
Get an existing VirtualMachineResource resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return VirtualMachineResource(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="artifactDeploymentStatus")
def artifact_deployment_status(self) -> pulumi.Output[Optional['outputs.ArtifactDeploymentStatusPropertiesResponse']]:
"""
The artifact deployment status for the virtual machine.
"""
return pulumi.get(self, "artifact_deployment_status")
@property
@pulumi.getter
def artifacts(self) -> pulumi.Output[Optional[Sequence['outputs.ArtifactInstallPropertiesResponse']]]:
"""
The artifacts to be installed on the virtual machine.
"""
return pulumi.get(self, "artifacts")
@property
@pulumi.getter(name="computeId")
def compute_id(self) -> pulumi.Output[Optional[str]]:
"""
The resource identifier (Microsoft.Compute) of the virtual machine.
"""
return pulumi.get(self, "compute_id")
@property
@pulumi.getter(name="createdByUser")
def created_by_user(self) -> pulumi.Output[Optional[str]]:
"""
The email address of creator of the virtual machine.
"""
return pulumi.get(self, "created_by_user")
@property
@pulumi.getter(name="createdByUserId")
def created_by_user_id(self) -> pulumi.Output[Optional[str]]:
"""
The object identifier of the creator of the virtual machine.
"""
return pulumi.get(self, "created_by_user_id")
@property
@pulumi.getter(name="customImageId")
def custom_image_id(self) -> pulumi.Output[Optional[str]]:
"""
The custom image identifier of the virtual machine.
"""
return pulumi.get(self, "custom_image_id")
@property
@pulumi.getter(name="disallowPublicIpAddress")
def disallow_public_ip_address(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether the virtual machine is to be created without a public IP address.
"""
return pulumi.get(self, "disallow_public_ip_address")
@property
@pulumi.getter
def fqdn(self) -> pulumi.Output[Optional[str]]:
"""
The fully-qualified domain name of the virtual machine.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="galleryImageReference")
def gallery_image_reference(self) -> pulumi.Output[Optional['outputs.GalleryImageReferenceResponse']]:
"""
The Microsoft Azure Marketplace image reference of the virtual machine.
"""
return pulumi.get(self, "gallery_image_reference")
@property
@pulumi.getter(name="isAuthenticationWithSshKey")
def is_authentication_with_ssh_key(self) -> pulumi.Output[Optional[bool]]:
"""
A value indicating whether this virtual machine uses an SSH key for authentication.
"""
return pulumi.get(self, "is_authentication_with_ssh_key")
@property
@pulumi.getter(name="labSubnetName")
def lab_subnet_name(self) -> pulumi.Output[Optional[str]]:
"""
The lab subnet name of the virtual machine.
"""
return pulumi.get(self, "lab_subnet_name")
@property
@pulumi.getter(name="labVirtualNetworkId")
def lab_virtual_network_id(self) -> pulumi.Output[Optional[str]]:
"""
The lab virtual network identifier of the virtual machine.
"""
return pulumi.get(self, "lab_virtual_network_id")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> pulumi.Output[Optional[str]]:
"""
The notes of the virtual machine.
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter(name="osType")
def os_type(self) -> pulumi.Output[Optional[str]]:
"""
The OS type of the virtual machine.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="ownerObjectId")
def owner_object_id(self) -> pulumi.Output[Optional[str]]:
"""
The object identifier of the owner of the virtual machine.
"""
return pulumi.get(self, "owner_object_id")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
"""
The password of the virtual machine administrator.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def size(self) -> pulumi.Output[Optional[str]]:
"""
The size of the virtual machine.
"""
return pulumi.get(self, "size")
@property
@pulumi.getter(name="sshKey")
def ssh_key(self) -> pulumi.Output[Optional[str]]:
"""
The SSH key of the virtual machine administrator.
"""
return pulumi.get(self, "ssh_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userName")
def user_name(self) -> pulumi.Output[Optional[str]]:
"""
The user name of the virtual machine.
"""
return pulumi.get(self, "user_name")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 175 | 15,178 | 23 |
85fcbea729438d4ba0adb8419eee9393e726705d | 2,289 | py | Python | watchdog/watchdog.py | rnme/centralization-sw | d0c0bafad37f15e23663a7c1fc599c61592ab389 | [
"MIT"
] | null | null | null | watchdog/watchdog.py | rnme/centralization-sw | d0c0bafad37f15e23663a7c1fc599c61592ab389 | [
"MIT"
] | null | null | null | watchdog/watchdog.py | rnme/centralization-sw | d0c0bafad37f15e23663a7c1fc599c61592ab389 | [
"MIT"
] | null | null | null | import os
import asyncio
from datetime import datetime, timedelta
from influxdb import InfluxDBClient
asyncio.run(watch())
| 32.7 | 85 | 0.492355 | import os
import asyncio
from datetime import datetime, timedelta
from influxdb import InfluxDBClient
class Measurement:
def __init__(self, point):
self.__ventilator = point[0][1]['ventilator']
self.__point = next(point[1])
def ventilator(self):
return self.__ventilator
def is_older_than(self, seconds):
return datetime.strptime(
self.__point['time'], "%Y-%m-%dT%H:%M:%S.%fZ"
) < (datetime.utcnow() - timedelta(seconds=60))
class Measurements:
def __init__(self, result_set):
self.__result_set = result_set
def measurements(self):
return list(
map(lambda point: Measurement(point), self.__result_set.items())
)
async def watch():
client = InfluxDBClient(
os.environ['WATCHDOG_TSDB_HOST'],
int(os.environ['WATCHDOG_TSDB_PORT']),
os.environ['WATCHDOG_TSDB_USER'],
os.environ['WATCHDOG_TSDB_PASSWORD'],
os.environ['WATCHDOG_TSDB_DB']
)
while True:
print('Obteniendo mediciones...')
latest = Measurements(
client.query(
'''
SELECT "status", "fr", "ie", "pause", "vc", "fio2", "peep"
FROM "ventilator_measurement"
GROUP BY "ventilator"
ORDER BY time DESC
LIMIT 1
'''
)
)
for measurement in latest.measurements():
if measurement.is_older_than(
int(os.environ['WATCHDOG_DELTA'])
):
print(f'El respirador {measurement.ventilator()} está desconectado.')
client.write_points(
[
{
'measurement': 'ventilator_measurement',
'tags': {
'ventilator': measurement.ventilator()
},
'time': datetime.utcnow().isoformat(),
'fields': { 'status': -1 }
}
]
)
await asyncio.sleep(int(os.environ['WATCHDOG_PERIOD']))
asyncio.run(watch())
| 1,968 | -5 | 202 |
24efa0cb0f823331453c19591ce02aecff56c737 | 182 | py | Python | Exercise_06_ListSorting.py | lukas9557/learning_python | 5a8d61658bc9121314fd6633e1f782cf3e9641b6 | [
"MIT"
] | 2 | 2020-11-29T22:38:18.000Z | 2020-12-01T19:09:53.000Z | Exercise_06_ListSorting.py | lukas9557/learning_python | 5a8d61658bc9121314fd6633e1f782cf3e9641b6 | [
"MIT"
] | null | null | null | Exercise_06_ListSorting.py | lukas9557/learning_python | 5a8d61658bc9121314fd6633e1f782cf3e9641b6 | [
"MIT"
] | null | null | null | #Sort list, and sort list in reverse order
a = list()
a = [5, 2, 9, 1, 7, 6, 3, 8, 4]
b = list()
c = list()
b = sorted(a)
c = sorted(a, reverse = 1)
print(b)
print(c)
| 14 | 43 | 0.516484 | #Sort list, and sort list in reverse order
a = list()
a = [5, 2, 9, 1, 7, 6, 3, 8, 4]
b = list()
c = list()
b = sorted(a)
c = sorted(a, reverse = 1)
print(b)
print(c)
| 0 | 0 | 0 |
8bdb1d4535d1958f3b43243d1265f21126e3415d | 542 | py | Python | Q8/savefeatmap.py | avicennax/CSE190_src | 71e7a899d43e3b61aa48cfd5284c3dda75d20127 | [
"MIT"
] | null | null | null | Q8/savefeatmap.py | avicennax/CSE190_src | 71e7a899d43e3b61aa48cfd5284c3dda75d20127 | [
"MIT"
] | null | null | null | Q8/savefeatmap.py | avicennax/CSE190_src | 71e7a899d43e3b61aa48cfd5284c3dda75d20127 | [
"MIT"
] | null | null | null | import matplotlib as mt
mt.use('Agg')
import caffe
import numpy as np
import sys
if __name__ == "__main__":
# sys.argv[1] = net prefix
# sys.argv[2] = model prefix
# sys.argv[3] = model _iter_
# sys.argv[4] = output directory
if len(sys.argv) != 5:
raise ValueError("4 args required")
net = caffe.Net(sys.argv[1]+".prototxt", sys.argv[2]+"_iter_"+sys.argv[3]+".caffemodel", caffe.TEST)
c1 = net.params['conv1'][0].data
c2 = net.params['conv2'][0].data
np.save(sys.argv[4]+'c1map.npy', c1)
np.save(sys.argv[4]+'c2map.npy', c2)
| 24.636364 | 101 | 0.660517 | import matplotlib as mt
mt.use('Agg')
import caffe
import numpy as np
import sys
if __name__ == "__main__":
# sys.argv[1] = net prefix
# sys.argv[2] = model prefix
# sys.argv[3] = model _iter_
# sys.argv[4] = output directory
if len(sys.argv) != 5:
raise ValueError("4 args required")
net = caffe.Net(sys.argv[1]+".prototxt", sys.argv[2]+"_iter_"+sys.argv[3]+".caffemodel", caffe.TEST)
c1 = net.params['conv1'][0].data
c2 = net.params['conv2'][0].data
np.save(sys.argv[4]+'c1map.npy', c1)
np.save(sys.argv[4]+'c2map.npy', c2)
| 0 | 0 | 0 |
7395f00fb45248c8e8b93ae3c0832e5913efdf9c | 4,804 | py | Python | src/commercetools/services/stores.py | labd/commercetools-python-sdk | d8ec285f08d56ede2e4cad45c74833f5b609ab5c | [
"MIT"
] | 15 | 2018-11-02T14:35:52.000Z | 2022-03-16T07:51:44.000Z | src/commercetools/services/stores.py | lime-green/commercetools-python-sdk | 63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6 | [
"MIT"
] | 84 | 2018-11-02T12:50:32.000Z | 2022-03-22T01:25:54.000Z | src/commercetools/services/stores.py | lime-green/commercetools-python-sdk | 63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6 | [
"MIT"
] | 13 | 2019-01-03T09:16:50.000Z | 2022-02-15T18:37:19.000Z | # DO NOT EDIT! This file is automatically generated
import typing
from commercetools.helpers import RemoveEmptyValuesMixin
from commercetools.platform.models.store import (
Store,
StoreDraft,
StorePagedQueryResponse,
StoreUpdate,
StoreUpdateAction,
)
from commercetools.typing import OptionalListStr
from . import abstract, traits
class StoreService(abstract.AbstractService):
"""Stores let you model the context your customers shop in."""
def query(
self,
*,
expand: OptionalListStr = None,
sort: OptionalListStr = None,
limit: int = None,
offset: int = None,
with_total: bool = None,
where: OptionalListStr = None,
predicate_var: typing.Dict[str, str] = None,
) -> StorePagedQueryResponse:
"""Stores let you model the context your customers shop in."""
params = self._serialize_params(
{
"expand": expand,
"sort": sort,
"limit": limit,
"offset": offset,
"with_total": with_total,
"where": where,
"predicate_var": predicate_var,
},
_StoreQuerySchema,
)
return self._client._get(
endpoint="stores", params=params, response_class=StorePagedQueryResponse
)
def create(self, draft: StoreDraft, *, expand: OptionalListStr = None) -> Store:
"""Stores let you model the context your customers shop in."""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._post(
endpoint="stores", params=params, data_object=draft, response_class=Store
)
| 30.598726 | 85 | 0.601374 | # DO NOT EDIT! This file is automatically generated
import typing
from commercetools.helpers import RemoveEmptyValuesMixin
from commercetools.platform.models.store import (
Store,
StoreDraft,
StorePagedQueryResponse,
StoreUpdate,
StoreUpdateAction,
)
from commercetools.typing import OptionalListStr
from . import abstract, traits
class _StoreQuerySchema(
traits.ExpandableSchema,
traits.SortableSchema,
traits.PagingSchema,
traits.QuerySchema,
):
pass
class _StoreUpdateSchema(traits.ExpandableSchema, traits.VersionedSchema):
pass
class _StoreDeleteSchema(traits.VersionedSchema, traits.ExpandableSchema):
pass
class StoreService(abstract.AbstractService):
"""Stores let you model the context your customers shop in."""
def get_by_id(self, id: str, *, expand: OptionalListStr = None) -> Store:
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._get(
endpoint=f"stores/{id}", params=params, response_class=Store
)
def get_by_key(self, key: str, *, expand: OptionalListStr = None) -> Store:
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._get(
endpoint=f"stores/key={key}", params=params, response_class=Store
)
def query(
self,
*,
expand: OptionalListStr = None,
sort: OptionalListStr = None,
limit: int = None,
offset: int = None,
with_total: bool = None,
where: OptionalListStr = None,
predicate_var: typing.Dict[str, str] = None,
) -> StorePagedQueryResponse:
"""Stores let you model the context your customers shop in."""
params = self._serialize_params(
{
"expand": expand,
"sort": sort,
"limit": limit,
"offset": offset,
"with_total": with_total,
"where": where,
"predicate_var": predicate_var,
},
_StoreQuerySchema,
)
return self._client._get(
endpoint="stores", params=params, response_class=StorePagedQueryResponse
)
def create(self, draft: StoreDraft, *, expand: OptionalListStr = None) -> Store:
"""Stores let you model the context your customers shop in."""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._post(
endpoint="stores", params=params, data_object=draft, response_class=Store
)
def update_by_id(
self,
id: str,
version: int,
actions: typing.List[StoreUpdateAction],
*,
expand: OptionalListStr = None,
force_update: bool = False,
) -> Store:
params = self._serialize_params({"expand": expand}, _StoreUpdateSchema)
update_action = StoreUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"stores/{id}",
params=params,
data_object=update_action,
response_class=Store,
force_update=force_update,
)
def update_by_key(
self,
key: str,
version: int,
actions: typing.List[StoreUpdateAction],
*,
expand: OptionalListStr = None,
force_update: bool = False,
) -> Store:
params = self._serialize_params({"expand": expand}, _StoreUpdateSchema)
update_action = StoreUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"stores/key={key}",
params=params,
data_object=update_action,
response_class=Store,
force_update=force_update,
)
def delete_by_id(
self,
id: str,
version: int,
*,
expand: OptionalListStr = None,
force_delete: bool = False,
) -> Store:
params = self._serialize_params(
{"version": version, "expand": expand}, _StoreDeleteSchema
)
return self._client._delete(
endpoint=f"stores/{id}",
params=params,
response_class=Store,
force_delete=force_delete,
)
def delete_by_key(
self,
key: str,
version: int,
*,
expand: OptionalListStr = None,
force_delete: bool = False,
) -> Store:
params = self._serialize_params(
{"version": version, "expand": expand}, _StoreDeleteSchema
)
return self._client._delete(
endpoint=f"stores/key={key}",
params=params,
response_class=Store,
force_delete=force_delete,
)
| 2,589 | 244 | 231 |
c96ada2e1d2d45f3f0fcc2ab9ef34a8bfde8c9c6 | 1,049 | py | Python | routers/person.py | carlosjosecjr/desafio-backend | 8c7d70cc22f27fa9d25f85613507e3199c4b1b30 | [
"Unlicense"
] | null | null | null | routers/person.py | carlosjosecjr/desafio-backend | 8c7d70cc22f27fa9d25f85613507e3199c4b1b30 | [
"Unlicense"
] | null | null | null | routers/person.py | carlosjosecjr/desafio-backend | 8c7d70cc22f27fa9d25f85613507e3199c4b1b30 | [
"Unlicense"
] | null | null | null | import bcrypt
from fastapi import APIRouter
from fastapi import HTTPException
from peewee import IntegrityError
from db import Person as PersonORM
from db import SQLITE_DB
from schema import Person as PersonSchema
person = APIRouter()
@person.post("/person")
| 32.78125 | 110 | 0.684461 | import bcrypt
from fastapi import APIRouter
from fastapi import HTTPException
from peewee import IntegrityError
from db import Person as PersonORM
from db import SQLITE_DB
from schema import Person as PersonSchema
person = APIRouter()
@person.post("/person")
def receive_person(person_schema: PersonSchema):
try:
hashed_password = bcrypt.hashpw(bytes(person_schema.password, 'UTF-8'), bcrypt.gensalt())
hashed_password = hashed_password.decode('UTF-8')
with SQLITE_DB.atomic():
PersonORM.create(
user_id=person_schema.user_id,
first_name=person_schema.first_name,
last_name=person_schema.last_name,
birthday=person_schema.birthday,
username=person_schema.username,
password=hashed_password,
)
except IntegrityError:
raise HTTPException(status_code=401, detail="registered user: \'{}\'.".format(person_schema.username))
else:
return dict(message="registered successfully")
| 764 | 0 | 22 |
fde4fe62827e0a533171b144622cf4cc6a26ac41 | 2,524 | py | Python | arvestust/serializers/tests/comment.py | lehvitus/arvestust | 2d508317b744eaf12a643a398ff95723893a046a | [
"BSD-3-Clause"
] | 1 | 2021-09-17T23:45:27.000Z | 2021-09-17T23:45:27.000Z | arvestust/serializers/tests/comment.py | lehvitus/arvestust | 2d508317b744eaf12a643a398ff95723893a046a | [
"BSD-3-Clause"
] | 3 | 2020-07-25T05:40:54.000Z | 2020-08-11T04:01:19.000Z | arvestust/serializers/tests/comment.py | lehvitus/arvestust | 2d508317b744eaf12a643a398ff95723893a046a | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from django.contrib.auth import get_user_model
from ..comment import Comment
| 33.653333 | 96 | 0.644216 | from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from django.contrib.auth import get_user_model
from ..comment import Comment
class CommentTestCase(APITestCase):
# The client used to connect to the API
client = APIClient()
def setUp(self):
"""
Prepare database and client.
"""
# API endpoint
self.namespace = '/v1/comments'
@classmethod
def setUpTestData(cls):
# Create users
cls.alice = get_user_model().objects.create(username="alice", email="alice@example.org")
cls.bob = get_user_model().objects.create(username="bob", email="bob@example.org")
# Create comments
# cls.comment1 = Comment.objects.create(...)
# cls.comment2 = Comment.objects.create(...)
# cls.comment3 = Comment.objects.create(...)
#################################################################
# Require authentication
def test_must_authenticate_to_read_comments(self):
res = self.client.get(self.namespace)
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
def test_must_authenticate_to_create_comments(self):
res = self.client.post(self.namespace)
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
#################################################################
# Allowed requests
def test_create_comment(self):
self.client.force_authenticate(user=self.alice)
res = self.client.post(self.namespace, data={})
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
def test_list_comment(self):
self.client.force_authenticate(user=self.alice)
res = self.client.get(self.namespace)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_retrieve_comment(self):
self.client.force_authenticate(user=self.alice)
url = self.namespace + '/1'
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_update_comment(self):
self.client.force_authenticate(user=self.alice)
url = self.namespace + '/1'
res = self.client.patch(url, data={})
self.assertEqual(res.status_code, status.HTTP_202_ACCEPTED)
def test_delete_comment(self):
self.client.force_authenticate(user=self.alice)
url = self.namespace + '/1'
res = self.client.delete(url)
self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)
| 1,678 | 656 | 23 |
d9c57548efc336293152f2f9666dc6125db58f5b | 535 | py | Python | magic_square.py | shabidkhan/magic_square | 636e6b76d6cbd9b053ff00917c02a14e4675dd4d | [
"MIT"
] | null | null | null | magic_square.py | shabidkhan/magic_square | 636e6b76d6cbd9b053ff00917c02a14e4675dd4d | [
"MIT"
] | null | null | null | magic_square.py | shabidkhan/magic_square | 636e6b76d6cbd9b053ff00917c02a14e4675dd4d | [
"MIT"
] | null | null | null | marks = [
[1, 0, 4, 8],
[0, 2, 0, 6],
[2, 4, 5, 2],
[9, 5, 8, 3]
]
d1=0
d2=0
c=[]
r=[]
for i in range(len(marks)):
r.append(0)
for j in range(len(marks)):
c.append(0)
if(i==j):
d1=marks[i][j]+d1
if (i+j==len(marks)-1):
d2+=marks[i][j]
r[i]+=marks[i][j]
c[j]+=marks[i][j]
print('r',i,'=',r[i])
print('c',i,'=',c[i])
print('d1=',d1)
print('d2=',d2)
for i in range(len(c)-1):
if(r[i]!=r[i+1] or c[i]!=c[i+1] or r[i]!=c[i] or d1!=d2):
print('not magic square')
break
else:
print('magic square')
| 16.71875 | 58 | 0.485981 | marks = [
[1, 0, 4, 8],
[0, 2, 0, 6],
[2, 4, 5, 2],
[9, 5, 8, 3]
]
d1=0
d2=0
c=[]
r=[]
for i in range(len(marks)):
r.append(0)
for j in range(len(marks)):
c.append(0)
if(i==j):
d1=marks[i][j]+d1
if (i+j==len(marks)-1):
d2+=marks[i][j]
r[i]+=marks[i][j]
c[j]+=marks[i][j]
print('r',i,'=',r[i])
print('c',i,'=',c[i])
print('d1=',d1)
print('d2=',d2)
for i in range(len(c)-1):
if(r[i]!=r[i+1] or c[i]!=c[i+1] or r[i]!=c[i] or d1!=d2):
print('not magic square')
break
else:
print('magic square')
| 0 | 0 | 0 |
25e380d33ad943e3e9070630faf13a60d0bc09c1 | 984 | py | Python | library/source1/vvd/header.py | anderlli0053/SourceIO | 3c0c4839939ce698439987ac52154f89ee2f5341 | [
"MIT"
] | 199 | 2019-04-02T02:30:58.000Z | 2022-03-30T21:29:49.000Z | library/source1/vvd/header.py | anderlli0053/SourceIO | 3c0c4839939ce698439987ac52154f89ee2f5341 | [
"MIT"
] | 113 | 2019-03-03T19:36:25.000Z | 2022-03-31T19:44:05.000Z | library/source1/vvd/header.py | anderlli0053/SourceIO | 3c0c4839939ce698439987ac52154f89ee2f5341 | [
"MIT"
] | 38 | 2019-05-15T16:49:30.000Z | 2022-03-22T03:40:43.000Z | from typing import List
from ...utils.byte_io_mdl import ByteIO
| 32.8 | 78 | 0.632114 | from typing import List
from ...utils.byte_io_mdl import ByteIO
class Header:
def __init__(self):
self.id = ""
self.version = 0
self.checksum = 0
self.lod_count = 0
self.lod_vertex_count = [] # type: List[int]
self.fixup_count = 0
self.fixup_table_offset = 0
self.vertex_data_offset = 0
self.tangent_data_offset = 0
def read(self, reader: ByteIO):
self.id = reader.read_fourcc()
if self.id != 'IDSV':
raise NotImplementedError('Invalid VVD magic {}!'.format(self.id))
self.version = reader.read_uint32()
self.checksum = reader.read_uint32()
self.lod_count = reader.read_uint32()
self.lod_vertex_count = reader.read_fmt("8I")
self.fixup_count = reader.read_uint32()
self.fixup_table_offset = reader.read_uint32()
self.vertex_data_offset = reader.read_uint32()
self.tangent_data_offset = reader.read_uint32()
| 850 | -8 | 76 |
0d60c40e37650182d37feba8ea9716e0f0242573 | 51 | py | Python | git_p.py | qszhuan/gity | 1659417a75f78fd47a8672a0411e70bdeb057af3 | [
"MIT"
] | null | null | null | git_p.py | qszhuan/gity | 1659417a75f78fd47a8672a0411e70bdeb057af3 | [
"MIT"
] | 1 | 2019-07-24T15:23:02.000Z | 2019-07-24T15:23:02.000Z | git_p.py | qszhuan/gity | 1659417a75f78fd47a8672a0411e70bdeb057af3 | [
"MIT"
] | null | null | null | import cli
if __name__ == "__main__":
cli.p()
| 10.2 | 26 | 0.607843 | import cli
if __name__ == "__main__":
cli.p()
| 0 | 0 | 0 |
2bf1dc7b4095e6103c2ef4116bd3b338beb3b002 | 3,112 | py | Python | test/run_notebook.py | awslabs/sagemaker-privacy-for-nlp | 899f178748401eaf2713cec83d37306f8a1327a8 | [
"Apache-2.0",
"CC0-1.0"
] | 10 | 2020-07-08T18:59:05.000Z | 2022-02-03T21:49:33.000Z | test/run_notebook.py | awslabs/sagemaker-privacy-for-nlp | 899f178748401eaf2713cec83d37306f8a1327a8 | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | test/run_notebook.py | awslabs/sagemaker-privacy-for-nlp | 899f178748401eaf2713cec83d37306f8a1327a8 | [
"Apache-2.0",
"CC0-1.0"
] | 5 | 2021-01-06T07:12:20.000Z | 2022-01-27T17:24:36.000Z | from pathlib import Path
import os
import time
import logging
import boto3
import papermill as pm
import watchtower
from package import config, utils
if __name__ == "__main__":
run_on_start = False if config.TEST_OUTPUTS_S3_BUCKET == "" else True
if not run_on_start:
exit()
cfn_client = boto3.client('cloudformation', region_name=config.AWS_REGION)
# Set up logging through watchtower
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
log_group = "/aws/sagemaker/NotebookInstances"
stream_name = "{}/run-notebook.log".format(utils.get_notebook_name())
logger.addHandler(
watchtower.CloudWatchLogHandler(log_group=log_group, stream_name=stream_name))
# Add papermill logging to CloudWatch as well
pm_logger = logging.getLogger('papermill')
pm_logger.addHandler(
watchtower.CloudWatchLogHandler(log_group=log_group, stream_name=stream_name))
# Wait for the stack to finish launching
logger.info("Waiting for stack to finish launching...")
waiter = cfn_client.get_waiter('stack_create_complete')
waiter.wait(StackName=config.STACK_NAME)
logger.info("Starting notebook execution through papermill")
# Run the notebook
bucket = config.TEST_OUTPUTS_S3_BUCKET
solution_notebooks = [
"1.Data_Privatization",
"2.Model_Training"
]
kernel_name = 'python3'
test_prefix = "/home/ec2-user/SageMaker/test/"
notebooks_directory = '/home/ec2-user/SageMaker/sagemaker/'
for notebook_name in solution_notebooks:
start_time = time.time()
stdout_path = os.path.join(test_prefix, "{}-output_stdout.txt".format(notebook_name))
stderr_path = os.path.join(test_prefix, "{}-output_stderr.txt".format(notebook_name))
with open(stdout_path, 'w') as stdoutfile, open(stderr_path, 'w') as stderrfile:
output_notebook_path = "{}-output.ipynb".format(os.path.join(test_prefix, notebook_name))
try:
nb = pm.execute_notebook(
"{}.ipynb".format(os.path.join(notebooks_directory, notebook_name)),
output_notebook_path,
cwd=notebooks_directory,
kernel_name=kernel_name,
stdout_file=stdoutfile, stderr_file=stderrfile, log_output=True
)
except pm.PapermillExecutionError as err:
logger.warn("Notebook {} encountered execution error: {}".format(notebook_name, err))
raise
finally:
end_time = time.time()
logger.info("Notebook {} execution time: {} sec.".format(notebook_name, end_time - start_time))
s3 = boto3.resource('s3')
# Upload notebook output file to S3
s3.meta.client.upload_file(output_notebook_path, bucket, Path(output_notebook_path).name)
s3.meta.client.upload_file(stdout_path, bucket, Path(stdout_path).name)
s3.meta.client.upload_file(stderr_path, bucket, Path(stderr_path).name)
| 39.392405 | 111 | 0.666774 | from pathlib import Path
import os
import time
import logging
import boto3
import papermill as pm
import watchtower
from package import config, utils
if __name__ == "__main__":
run_on_start = False if config.TEST_OUTPUTS_S3_BUCKET == "" else True
if not run_on_start:
exit()
cfn_client = boto3.client('cloudformation', region_name=config.AWS_REGION)
# Set up logging through watchtower
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
log_group = "/aws/sagemaker/NotebookInstances"
stream_name = "{}/run-notebook.log".format(utils.get_notebook_name())
logger.addHandler(
watchtower.CloudWatchLogHandler(log_group=log_group, stream_name=stream_name))
# Add papermill logging to CloudWatch as well
pm_logger = logging.getLogger('papermill')
pm_logger.addHandler(
watchtower.CloudWatchLogHandler(log_group=log_group, stream_name=stream_name))
# Wait for the stack to finish launching
logger.info("Waiting for stack to finish launching...")
waiter = cfn_client.get_waiter('stack_create_complete')
waiter.wait(StackName=config.STACK_NAME)
logger.info("Starting notebook execution through papermill")
# Run the notebook
bucket = config.TEST_OUTPUTS_S3_BUCKET
solution_notebooks = [
"1.Data_Privatization",
"2.Model_Training"
]
kernel_name = 'python3'
test_prefix = "/home/ec2-user/SageMaker/test/"
notebooks_directory = '/home/ec2-user/SageMaker/sagemaker/'
for notebook_name in solution_notebooks:
start_time = time.time()
stdout_path = os.path.join(test_prefix, "{}-output_stdout.txt".format(notebook_name))
stderr_path = os.path.join(test_prefix, "{}-output_stderr.txt".format(notebook_name))
with open(stdout_path, 'w') as stdoutfile, open(stderr_path, 'w') as stderrfile:
output_notebook_path = "{}-output.ipynb".format(os.path.join(test_prefix, notebook_name))
try:
nb = pm.execute_notebook(
"{}.ipynb".format(os.path.join(notebooks_directory, notebook_name)),
output_notebook_path,
cwd=notebooks_directory,
kernel_name=kernel_name,
stdout_file=stdoutfile, stderr_file=stderrfile, log_output=True
)
except pm.PapermillExecutionError as err:
logger.warn("Notebook {} encountered execution error: {}".format(notebook_name, err))
raise
finally:
end_time = time.time()
logger.info("Notebook {} execution time: {} sec.".format(notebook_name, end_time - start_time))
s3 = boto3.resource('s3')
# Upload notebook output file to S3
s3.meta.client.upload_file(output_notebook_path, bucket, Path(output_notebook_path).name)
s3.meta.client.upload_file(stdout_path, bucket, Path(stdout_path).name)
s3.meta.client.upload_file(stderr_path, bucket, Path(stderr_path).name)
| 0 | 0 | 0 |
69ab093be724156c5703a75aaa907329e62d25f1 | 1,419 | py | Python | mojeid_registration/tests/test_mojeid_registration.py | silverhound4d/cz_nic | 9ed7529a6e2a4881d6151072d78ca9c9e33ac86e | [
"Unlicense"
] | null | null | null | mojeid_registration/tests/test_mojeid_registration.py | silverhound4d/cz_nic | 9ed7529a6e2a4881d6151072d78ca9c9e33ac86e | [
"Unlicense"
] | null | null | null | mojeid_registration/tests/test_mojeid_registration.py | silverhound4d/cz_nic | 9ed7529a6e2a4881d6151072d78ca9c9e33ac86e | [
"Unlicense"
] | null | null | null | from mojeid_registration.page_objects.reg_page import RegistrationPage
from mojeid_registration.config import config as c
| 38.351351 | 85 | 0.718111 | from mojeid_registration.page_objects.reg_page import RegistrationPage
from mojeid_registration.config import config as c
class Test_003_Registration:
text_username_id = "id_username"
text_firstname_id = "id_first_name"
text_lastname_id = "id_last_name"
text_email_id = "id_email-default-email"
text_countrycode_id = "id_phone-default-number_0"
text_phonenumber_id = "id_phone-default-number_1"
text_addres_street_id = "id_address-default-street1"
text_city_id = "id_address-default-city"
text_zipcode_id = "id_address-default-postal_code"
select_country_id = "id_address-default-country"
text_captcha_id = "id_captcha_1"
checkbox_confirmation_id = "id_confirmation"
def test_captcha(self, setup):
reg = RegistrationPage(setup)
reg.driver.get("https://mojeid.regtest.nic.cz/registration/")
reg.set_username(c.username)
reg.set_first_name(c.firstname)
reg.set_last_name(c.lastname)
reg.set_email(c.email)
reg.set_country_code(c.countrycode)
reg.set_phone_number(c.phonenumber)
reg.set_street(c.addres_street)
reg.set_city(c.city)
reg.set_zip(c.zipcode)
reg.set_country(c.country)
reg.set_captcha(c.captcha)
reg.check_confirmation()
reg.submit_form()
assert "Kontrolní kód nesouhlasí, zkuste to znovu." in reg.driver.page_source
| 669 | 598 | 23 |
b335e0180c38c2d8bb0d5ab26b2d8af5c77aa46f | 9,509 | py | Python | implicitresnet/solvers/linear.py | vreshniak/ImplicitResNet | 62e3c2f047f2572a0d0a0ee7cd3c8dd6e340080e | [
"MIT"
] | 2 | 2021-01-01T00:42:17.000Z | 2021-01-01T17:32:01.000Z | implicitresnet/solvers/linear.py | vreshniak/ImplicitResNet | 62e3c2f047f2572a0d0a0ee7cd3c8dd6e340080e | [
"MIT"
] | null | null | null | implicitresnet/solvers/linear.py | vreshniak/ImplicitResNet | 62e3c2f047f2572a0d0a0ee7cd3c8dd6e340080e | [
"MIT"
] | null | null | null | # import warnings
import torch
# from .nonlinear import lbfgs
# import scipy.sparse.linalg as sla
# import numpy as np
def rotmat(a,b):
"""
Adapted from http://www.netlib.org/templates/matlab/rotmat.m
Compute the Givens rotation matrix parameters for a and b.
"""
c = torch.zeros_like(a)
s = torch.zeros_like(a)
temp = torch.zeros_like(a)
mask = (b.abs()>a.abs())
temp[mask] = a[mask] / b[mask]
s[mask] = 1.0 / torch.sqrt(1.0+temp[mask]**2)
c[mask] = temp[mask] * s[mask]
mask = (b.abs()<=a.abs())
temp[mask] = b[mask] / a[mask]
c[mask] = 1.0 / torch.sqrt(1.0+temp[mask]**2)
s[mask] = temp[mask] * c[mask]
mask = (b==0)
c[mask] = 1.0
s[mask] = 0.0
# if b==0.0:
# c = 1.0
# s = 0.0
# elif b.abs()>a.abs():
# temp = a / b
# s = 1.0 / torch.sqrt(1.0+temp**2)
# c = temp * s
# else:
# temp = b / a
# c = 1.0 / torch.sqrt(1.0+temp**2)
# s = temp * c
return c, s
def gmres( A, x, b, max_iters=None, min_iters=3, max_restarts=1, tol=None, M=None ):
"""
Adapted from http://www.netlib.org/templates/matlab/gmres.m
% -- Iterative template routine --
% Univ. of Tennessee and Oak Ridge National Laboratory
% October 1, 1993
% Details of this algorithm are described in "Templates for the
% Solution of Linear Systems: Building Blocks for Iterative
% Methods", Barrett, Berry, Chan, Demmel, Donato, Dongarra,
% Eijkhout, Pozo, Romine, and van der Vorst, SIAM Publications,
% 1993. (ftp netlib2.cs.utk.edu; cd linalg; get templates.ps).
%
% [x, error, iter, flag] = gmres( A, x, b, M, restrt, max_it, tol )
%
% gmres.m solves the linear system Ax=b
% using the Generalized Minimal residual ( GMRESm ) method with restarts .
%
% input A REAL nonsymmetric positive definite matrix
% x REAL initial guess vector
% b REAL right hand side vector
% M REAL preconditioner matrix
% max_iters INTEGER number of iterations between restarts
% max_restarts INTEGER maximum number of iterations
% tol REAL error tolerance
%
% output x REAL solution vector
% error REAL error norm
% iter INTEGER number of iterations performed
% flag INTEGER: 0 = solution found to tolerance
% 1 = no convergence given max_it
"""
# dummy preconditioner (might replace with something real later)
if M is None: M = lambda x: x
assert x.ndim==2, "x must have batch dimension, x.ndim = "+str(x.ndim)
assert b.ndim==2, "b must have batch dimension, b.ndim = "+str(b.ndim)
# dimensions, dtype and device of the problem
batch_dim = x.size(0)
n = x.size(1)
dtype = x.dtype
device = x.device
if n==1:
x = b / (A(torch.ones_like(x))+1.e-12)
r = M(b-A(x))
return x, r.norm(dim=1).amax(), 1, 0
if tol is None: tol = 1*torch.finfo(dtype).eps
tol = max(tol*b.norm(dim=1).amax(), tol)
# set max_iters if not given, and perform sanity checks
assert max_restarts>0, "max_restarts must be greater than 0, max_restarts = "+str(max_restarts)
assert max_restarts<=n, "max_restarts should not exceed size of the problem n, max_restarts = "+str(max_restarts)
if max_iters is None: max_iters = n//max_restarts
if max_iters<n:
max_restarts = n//max_iters + 1
elif max_iters>=n:
max_iters = n
max_restarts = 1
# initialization
iters = 0
flag = 0
# norm of the RHS
bnrm2 = b.norm(dim=1)
bnrm2[bnrm2==0.0] = 1.0
# terminate if tolerance achieved
# r = M(b-A(x))
# error = r.norm(dim=1) / bnrm2
# error = r.norm(dim=1)
# if error.amax()<tol: return x, error.amax(), iters, flag
# initialize workspace
# orthogonal basis matrix of the Krylov subspace
Q = torch.zeros((batch_dim,n,max_iters+1), dtype=dtype, device=device)
# H is upper Hessenberg matrix, H is A on basis Q
H = torch.zeros((batch_dim,max_iters+1,max_iters), dtype=dtype, device=device)
# cosines and sines of the rotation matrix
cs = torch.zeros((batch_dim,max_iters,), dtype=dtype, device=device)
sn = torch.zeros((batch_dim,max_iters,), dtype=dtype, device=device)
#
e1 = torch.zeros((batch_dim,n+1,), dtype=dtype, device=device)
e1[:,0] = 1.0
# perform outer iterations
for _ in range(max_restarts):
r = M(b-A(x))
rnrm2 = r.norm(dim=1,keepdim=True)
rnrm2[rnrm2==0.0] = 1.0
# first basis vector
Q[...,0] = r / rnrm2
s = rnrm2 * e1
# restart method and perform inner iterations
for i in range(max_iters):
iters += 1
################################################
# find next basis vector with Arnoldi iteration
# (i+1)-st Krylov vector
w = M(A(Q[...,i]))
# Gram-Schmidt othogonalization
for k in range(i+1):
H[:,k,i] = (w*Q[...,k]).sum(dim=1)
w -= H[:,k,i].unsqueeze(1) * Q[...,k]
w += 1.e-12 # to make possible 0/0=1 (Why can this happen?)
H[:,i+1,i] = w.norm(dim=1)
# (i+1)-st basis vector
Q[:,:,i+1] = w / H[:,i+1,i].unsqueeze(1)
################################################
# apply Givens rotation to eliminate the last element in H ith row
# rotate kth column
for k in range(i):
temp = cs[:,k]*H[:,k,i] + sn[:,k]*H[:,k+1,i]
H[:,k+1,i] = -sn[:,k]*H[:,k,i] + cs[:,k]*H[:,k+1,i]
H[:,k,i] = temp
# form i-th rotation matrix
cs[:,i], sn[:,i] = rotmat( H[:,i,i], H[:,i+1,i] )
# eliminate H[i+1,i]
H[:,i,i] = cs[:,i]*H[:,i,i] + sn[:,i]*H[:,i+1,i]
H[:,i+1,i] = 0.0
################################################
# update the residual vector
s[:,i+1] = -sn[:,i]*s[:,i]
s[:,i] = cs[:,i]*s[:,i]
error = s[:,i+1].abs().amax()
# yy, _ = torch.triangular_solve(s[:,:i+1].unsqueeze(2), H[:,:i+1,:i+1], upper=True)
# xx = torch.baddbmm(x.unsqueeze(2), Q[:,:,:i+1], yy).squeeze(2)
# error = (s[:,i+1].abs()/bnrm2).amax()
# print(i, "%.2e"%(error.item()), "%.2e"%((M(b-A(xx)).norm(dim=1)).amax().item()))
if error<tol and (i+1)>min_iters: break
# update approximation
y, _ = torch.triangular_solve(s[:,:i+1].unsqueeze(2), H[:,:i+1,:i+1], upper=True)
x = torch.baddbmm(x.unsqueeze(2), Q[:,:,:i+1], y).squeeze(2)
r = M(b-A(x))
error = r.norm(dim=1).amax()
# s[:,i+1] = r.norm(dim=1)
# error = (s[:,i+1].abs() / bnrm2).amax()
if error<tol: break
if error>tol: flag = 1
return x, error, iters, flag
# class neumann_backprop(Function):
# @staticmethod
# def forward(ctx, y, y_fp):
# # ctx.obj = obj
# ctx.save_for_backward(y, y_fp)
# return y
# @staticmethod
# def backward(ctx, dy):
# y, y_fp, = ctx.saved_tensors
# # residual = lambda dx: (dx-A_dot(dx)-dy).flatten().norm() # \| (I-A) * dx - dy \|
# A_dot = lambda x: torch.autograd.grad(y_fp, y, grad_outputs=x, retain_graph=True, only_inputs=True)[0]
# residual = lambda Adx: (Adx-dy).reshape((dy.size()[0],-1)).norm(dim=1).max() #.flatten().norm() # \| (I-A) * dx - dy \|
# tol = atol = torch.tensor(_TOL)
# TOL = torch.max(tol*dy.norm(), atol)
# #######################################################################
# # Neumann series
# dx = dy
# Ady = A_dot(dy)
# Adx = Ady
# r1 = residual(dx-Adx)
# neu_iters = 1
# while r1>=TOL and neu_iters<_max_iters:
# r0 = r1
# dx = dx + Ady
# Ady = A_dot(Ady)
# Adx = Adx + Ady
# r1 = residual(dx-Adx)
# neu_iters += 1
# assert r1<r0, "Neumann series hasn't converged at iteration "+str(neu_iters)+" out of "+str(_max_iters)+" max iterations"
# if _collect_stat:
# global _backward_stat
# _backward_stat['steps'] = _backward_stat.get('steps',0) + 1
# _backward_stat['neu_residual'] = _backward_stat.get('neu_residual',0) + r1
# _backward_stat['neu_iters'] = _backward_stat.get('neu_iters',0) + neu_iters
# return None, dx
| 31.075163 | 142 | 0.600694 | # import warnings
import torch
# from .nonlinear import lbfgs
# import scipy.sparse.linalg as sla
# import numpy as np
def rotmat(a,b):
"""
Adapted from http://www.netlib.org/templates/matlab/rotmat.m
Compute the Givens rotation matrix parameters for a and b.
"""
c = torch.zeros_like(a)
s = torch.zeros_like(a)
temp = torch.zeros_like(a)
mask = (b.abs()>a.abs())
temp[mask] = a[mask] / b[mask]
s[mask] = 1.0 / torch.sqrt(1.0+temp[mask]**2)
c[mask] = temp[mask] * s[mask]
mask = (b.abs()<=a.abs())
temp[mask] = b[mask] / a[mask]
c[mask] = 1.0 / torch.sqrt(1.0+temp[mask]**2)
s[mask] = temp[mask] * c[mask]
mask = (b==0)
c[mask] = 1.0
s[mask] = 0.0
# if b==0.0:
# c = 1.0
# s = 0.0
# elif b.abs()>a.abs():
# temp = a / b
# s = 1.0 / torch.sqrt(1.0+temp**2)
# c = temp * s
# else:
# temp = b / a
# c = 1.0 / torch.sqrt(1.0+temp**2)
# s = temp * c
return c, s
def gmres( A, x, b, max_iters=None, min_iters=3, max_restarts=1, tol=None, M=None ):
"""
Adapted from http://www.netlib.org/templates/matlab/gmres.m
% -- Iterative template routine --
% Univ. of Tennessee and Oak Ridge National Laboratory
% October 1, 1993
% Details of this algorithm are described in "Templates for the
% Solution of Linear Systems: Building Blocks for Iterative
% Methods", Barrett, Berry, Chan, Demmel, Donato, Dongarra,
% Eijkhout, Pozo, Romine, and van der Vorst, SIAM Publications,
% 1993. (ftp netlib2.cs.utk.edu; cd linalg; get templates.ps).
%
% [x, error, iter, flag] = gmres( A, x, b, M, restrt, max_it, tol )
%
% gmres.m solves the linear system Ax=b
% using the Generalized Minimal residual ( GMRESm ) method with restarts .
%
% input A REAL nonsymmetric positive definite matrix
% x REAL initial guess vector
% b REAL right hand side vector
% M REAL preconditioner matrix
% max_iters INTEGER number of iterations between restarts
% max_restarts INTEGER maximum number of iterations
% tol REAL error tolerance
%
% output x REAL solution vector
% error REAL error norm
% iter INTEGER number of iterations performed
% flag INTEGER: 0 = solution found to tolerance
% 1 = no convergence given max_it
"""
# dummy preconditioner (might replace with something real later)
if M is None: M = lambda x: x
assert x.ndim==2, "x must have batch dimension, x.ndim = "+str(x.ndim)
assert b.ndim==2, "b must have batch dimension, b.ndim = "+str(b.ndim)
# dimensions, dtype and device of the problem
batch_dim = x.size(0)
n = x.size(1)
dtype = x.dtype
device = x.device
if n==1:
x = b / (A(torch.ones_like(x))+1.e-12)
r = M(b-A(x))
return x, r.norm(dim=1).amax(), 1, 0
if tol is None: tol = 1*torch.finfo(dtype).eps
tol = max(tol*b.norm(dim=1).amax(), tol)
# set max_iters if not given, and perform sanity checks
assert max_restarts>0, "max_restarts must be greater than 0, max_restarts = "+str(max_restarts)
assert max_restarts<=n, "max_restarts should not exceed size of the problem n, max_restarts = "+str(max_restarts)
if max_iters is None: max_iters = n//max_restarts
if max_iters<n:
max_restarts = n//max_iters + 1
elif max_iters>=n:
max_iters = n
max_restarts = 1
# initialization
iters = 0
flag = 0
# norm of the RHS
bnrm2 = b.norm(dim=1)
bnrm2[bnrm2==0.0] = 1.0
# terminate if tolerance achieved
# r = M(b-A(x))
# error = r.norm(dim=1) / bnrm2
# error = r.norm(dim=1)
# if error.amax()<tol: return x, error.amax(), iters, flag
# initialize workspace
# orthogonal basis matrix of the Krylov subspace
Q = torch.zeros((batch_dim,n,max_iters+1), dtype=dtype, device=device)
# H is upper Hessenberg matrix, H is A on basis Q
H = torch.zeros((batch_dim,max_iters+1,max_iters), dtype=dtype, device=device)
# cosines and sines of the rotation matrix
cs = torch.zeros((batch_dim,max_iters,), dtype=dtype, device=device)
sn = torch.zeros((batch_dim,max_iters,), dtype=dtype, device=device)
#
e1 = torch.zeros((batch_dim,n+1,), dtype=dtype, device=device)
e1[:,0] = 1.0
# perform outer iterations
for _ in range(max_restarts):
r = M(b-A(x))
rnrm2 = r.norm(dim=1,keepdim=True)
rnrm2[rnrm2==0.0] = 1.0
# first basis vector
Q[...,0] = r / rnrm2
s = rnrm2 * e1
# restart method and perform inner iterations
for i in range(max_iters):
iters += 1
################################################
# find next basis vector with Arnoldi iteration
# (i+1)-st Krylov vector
w = M(A(Q[...,i]))
# Gram-Schmidt othogonalization
for k in range(i+1):
H[:,k,i] = (w*Q[...,k]).sum(dim=1)
w -= H[:,k,i].unsqueeze(1) * Q[...,k]
w += 1.e-12 # to make possible 0/0=1 (Why can this happen?)
H[:,i+1,i] = w.norm(dim=1)
# (i+1)-st basis vector
Q[:,:,i+1] = w / H[:,i+1,i].unsqueeze(1)
################################################
# apply Givens rotation to eliminate the last element in H ith row
# rotate kth column
for k in range(i):
temp = cs[:,k]*H[:,k,i] + sn[:,k]*H[:,k+1,i]
H[:,k+1,i] = -sn[:,k]*H[:,k,i] + cs[:,k]*H[:,k+1,i]
H[:,k,i] = temp
# form i-th rotation matrix
cs[:,i], sn[:,i] = rotmat( H[:,i,i], H[:,i+1,i] )
# eliminate H[i+1,i]
H[:,i,i] = cs[:,i]*H[:,i,i] + sn[:,i]*H[:,i+1,i]
H[:,i+1,i] = 0.0
################################################
# update the residual vector
s[:,i+1] = -sn[:,i]*s[:,i]
s[:,i] = cs[:,i]*s[:,i]
error = s[:,i+1].abs().amax()
# yy, _ = torch.triangular_solve(s[:,:i+1].unsqueeze(2), H[:,:i+1,:i+1], upper=True)
# xx = torch.baddbmm(x.unsqueeze(2), Q[:,:,:i+1], yy).squeeze(2)
# error = (s[:,i+1].abs()/bnrm2).amax()
# print(i, "%.2e"%(error.item()), "%.2e"%((M(b-A(xx)).norm(dim=1)).amax().item()))
if error<tol and (i+1)>min_iters: break
# update approximation
y, _ = torch.triangular_solve(s[:,:i+1].unsqueeze(2), H[:,:i+1,:i+1], upper=True)
x = torch.baddbmm(x.unsqueeze(2), Q[:,:,:i+1], y).squeeze(2)
r = M(b-A(x))
error = r.norm(dim=1).amax()
# s[:,i+1] = r.norm(dim=1)
# error = (s[:,i+1].abs() / bnrm2).amax()
if error<tol: break
if error>tol: flag = 1
return x, error, iters, flag
def scipy_lgmres( A, x, b, max_iters=None, tol=None, M=None ):
assert x.ndim==2, "x must have batch dimension, x.ndim = "+str(x.ndim)
assert b.ndim==2, "b must have batch dimension, b.ndim = "+str(b.ndim)
# dimensions, dtype and device of the problem
batch_dim = x.size(0)
n = x.size(1)
ndof = x.nelement()
dtype = x.dtype
device = x.device
iters = [0]
flag = 0
if max_iters is None: max_iters = n
if max_iters>n:
max_iters = n
if tol is None: tol = 10*torch.finfo(dtype).eps
# torch to numpy dtypes
numpy_dtype = {torch.float: np.float, torch.float32: np.float32, torch.float64: np.float64}
# 'Matrix-vector' product of the linear operator
def matvec(v):
iters[0] += 1
v0 = torch.from_numpy(v).view_as(x).to(device=x.device, dtype=x.dtype)
return A(v0).cpu().detach().numpy().ravel()
A_dot = sla.LinearOperator(dtype=numpy_dtype[b.dtype], shape=(ndof,ndof), matvec=matvec)
# Note that norm(residual) <= max(tol*norm(b), atol. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lgmres.html
x, info = sla.lgmres( A_dot, b.cpu().detach().numpy().ravel(), x0=b.cpu().detach().numpy().ravel(), maxiter=max_iters, tol=tol, M=None )
x = torch.from_numpy(x).view_as(b).to(device=b.device, dtype=b.dtype)
error = (A(x)-b).reshape((b.size(0),-1)).norm(dim=1).max().detach() # \| A*x - b \|
if error>tol*b.norm(1).amax(): flag=1
return x, error, iters[0], flag
def linsolve(A, x0, b, method='gmres', **kwargs):
if method=='gmres':
return gmres( A, x0, b, **kwargs )
elif method=='scipy_lgmres':
return scipy_lgmres( A, x0, b, **kwargs )
elif method=='lbfgs':
batch_dim = x0.size(1)
return lbfgs( lambda x: (A(x)-b).pow(2).reshape((batch_dim,-1)).sum(dim=1), x0, **kwargs )
# class neumann_backprop(Function):
# @staticmethod
# def forward(ctx, y, y_fp):
# # ctx.obj = obj
# ctx.save_for_backward(y, y_fp)
# return y
# @staticmethod
# def backward(ctx, dy):
# y, y_fp, = ctx.saved_tensors
# # residual = lambda dx: (dx-A_dot(dx)-dy).flatten().norm() # \| (I-A) * dx - dy \|
# A_dot = lambda x: torch.autograd.grad(y_fp, y, grad_outputs=x, retain_graph=True, only_inputs=True)[0]
# residual = lambda Adx: (Adx-dy).reshape((dy.size()[0],-1)).norm(dim=1).max() #.flatten().norm() # \| (I-A) * dx - dy \|
# tol = atol = torch.tensor(_TOL)
# TOL = torch.max(tol*dy.norm(), atol)
# #######################################################################
# # Neumann series
# dx = dy
# Ady = A_dot(dy)
# Adx = Ady
# r1 = residual(dx-Adx)
# neu_iters = 1
# while r1>=TOL and neu_iters<_max_iters:
# r0 = r1
# dx = dx + Ady
# Ady = A_dot(Ady)
# Adx = Adx + Ady
# r1 = residual(dx-Adx)
# neu_iters += 1
# assert r1<r0, "Neumann series hasn't converged at iteration "+str(neu_iters)+" out of "+str(_max_iters)+" max iterations"
# if _collect_stat:
# global _backward_stat
# _backward_stat['steps'] = _backward_stat.get('steps',0) + 1
# _backward_stat['neu_residual'] = _backward_stat.get('neu_residual',0) + r1
# _backward_stat['neu_iters'] = _backward_stat.get('neu_iters',0) + neu_iters
# return None, dx
| 1,718 | 0 | 46 |
7dca872fb6e827f89e4706178e94445562d46fc4 | 591 | py | Python | anubis/site.py | pekrau/Anubis | 32290f243a12f37c08f2b28a189011e98ac6e709 | [
"MIT"
] | 2 | 2020-12-24T21:04:30.000Z | 2021-12-14T17:09:39.000Z | anubis/site.py | pekrau/Anubis | 32290f243a12f37c08f2b28a189011e98ac6e709 | [
"MIT"
] | 474 | 2019-11-05T12:38:14.000Z | 2022-03-30T12:55:26.000Z | anubis/site.py | pekrau/Anubis | 32290f243a12f37c08f2b28a189011e98ac6e709 | [
"MIT"
] | null | null | null | "Endpoint for site-specific static files."
import http.client
import os.path
import flask
blueprint = flask.Blueprint('site', __name__)
@blueprint.route('/static/<filename>')
def static(filename):
"Return the given site-specific static file."
dirpath = flask.current_app.config['SITE_STATIC_DIR']
if not dirpath:
raise ValueError('misconfiguration: SITE_STATIC_DIR not set')
dirpath = os.path.expandvars(os.path.expanduser(dirpath))
if dirpath:
return flask.send_from_directory(dirpath, filename)
else:
flask.abort(http.client.NOT_FOUND)
| 26.863636 | 69 | 0.72758 | "Endpoint for site-specific static files."
import http.client
import os.path
import flask
blueprint = flask.Blueprint('site', __name__)
@blueprint.route('/static/<filename>')
def static(filename):
"Return the given site-specific static file."
dirpath = flask.current_app.config['SITE_STATIC_DIR']
if not dirpath:
raise ValueError('misconfiguration: SITE_STATIC_DIR not set')
dirpath = os.path.expandvars(os.path.expanduser(dirpath))
if dirpath:
return flask.send_from_directory(dirpath, filename)
else:
flask.abort(http.client.NOT_FOUND)
| 0 | 0 | 0 |
ed09571c4ecb14ddfbe50176211502c50462ebe0 | 8,769 | py | Python | Ch04.py | econcarol/ISLR | 497b65efdab5291111b8d8472606ea4bc40bba30 | [
"MIT"
] | 3 | 2020-04-18T21:15:56.000Z | 2022-02-03T12:09:54.000Z | Ch04.py | econcarol/ISLR | 497b65efdab5291111b8d8472606ea4bc40bba30 | [
"MIT"
] | null | null | null | Ch04.py | econcarol/ISLR | 497b65efdab5291111b8d8472606ea4bc40bba30 | [
"MIT"
] | 1 | 2021-12-27T23:59:18.000Z | 2021-12-27T23:59:18.000Z | # ISLR Ch 4 by Carol Cui
%reset
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, accuracy_score
# ----------------------------------------------------------------------------
# Q10
Weekly = pd.read_csv('C:\\Users\\Carol\\Desktop\\Weekly.csv')
# (a)
Weekly.describe()
pd.crosstab(index=Weekly["Direction"], columns="count")
Weekly.corr() # Volume increases in year.
# (b)
import statsmodels.api as sm
x01 = sm.add_constant(Weekly.iloc[:, 2:8])
y01 = np.where(Weekly['Direction']=='Up', 1, 0)
glm0 = sm.Logit(y01, x01)
print(glm0.fit().summary())
# Lag2 is statistically significant.
# (c)
x = pd.DataFrame(Weekly, columns=Weekly.columns[2:8])
y = Weekly['Direction']
glm1 = LogisticRegression()
glm1.pred = glm1.fit(x, y).predict(x)
print(pd.DataFrame(confusion_matrix(y, glm1.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y, glm1.pred)) # 56%
# (d)
train = Weekly[Weekly['Year'] < 2009]
x_train = train.iloc[:,3]
x_train = x_train.reshape(len(x_train),1)
y_train = train.loc[:,'Direction']
test = Weekly[Weekly['Year'] >= 2009]
x_test = test.iloc[:,3]
x_test = x_test.reshape(len(x_test),1)
y_test = test.loc[:,'Direction']
glm2 = LogisticRegression()
glm2.pred = glm2.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, glm2.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, glm2.pred)) # 62.5%
# (e)
lda = LinearDiscriminantAnalysis()
lda.pred = lda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, lda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, lda.pred)) # 62.5%
# (f)
qda = QuadraticDiscriminantAnalysis()
qda.pred = qda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, qda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, qda.pred)) # 58.7%
# (g)
knn = KNeighborsClassifier(n_neighbors=1)
knn.pred = knn.fit(x_train, y_train).predict(x_test)
print('error rate: ', accuracy_score(y_test, knn.pred)) # 49%
# (h): Logistic and LDA models are the best.
# (i)
# KNN
error_rate = np.array([])
k_value = np.array([])
for i in (5, 10, 20):
knn = KNeighborsClassifier(n_neighbors=i)
knn.pred = knn.fit(x_train, y_train).predict(x_test)
k_value = np.append(k_value, i)
error_rate = np.append(error_rate, 1-accuracy_score(y_test, knn.pred))
best_k = k_value[error_rate.argmin()]
print('KNN best when k=%i' %best_k)
# LDA
train = Weekly[Weekly['Year'] < 2009]
x_train = train.iloc[:,2:4]
x_train['Lag12'] = x_train.Lag1 * x_train.Lag2
y_train = train.loc[:,'Direction']
test = Weekly[Weekly['Year'] >= 2009]
x_test = test.iloc[:,2:4]
x_test['Lag12'] = x_test.Lag1 * x_test.Lag2
y_test = test.loc[:,'Direction']
lda = LinearDiscriminantAnalysis()
lda.pred = lda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, lda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, lda.pred)) # 57.7%
# QDA
qda = QuadraticDiscriminantAnalysis()
qda.pred = qda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, qda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, qda.pred)) # 46.2%
# ----------------------------------------------------------------------------
# Q11
Auto = pd.read_csv('C:\\Users\\Carol\\Desktop\\Auto.csv', na_values='?').dropna()
# (a)
Auto['mpg01'] = np.where(Auto['mpg'] > Auto['mpg'].median(), 1, 0)
# (b)
pd.plotting.scatter_matrix(Auto.iloc[:,0:10], figsize=(10,10))
# select: displacement, horsepower, weight, acceleration
# (c)
x_name = ['displacement', 'horsepower', 'weight', 'acceleration']
x = pd.DataFrame(Auto, columns=x_name)
y = np.array(Auto['mpg01'])
np.random.seed(1)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# (d) LDA
lda = LinearDiscriminantAnalysis()
lda.pred = lda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, lda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, lda.pred)) # 7.6%
# (e) QDA
qda = QuadraticDiscriminantAnalysis()
qda.pred = qda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, qda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, qda.pred)) # 3.8%
# (f) Logit
glm = LogisticRegression()
glm.pred = glm.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, glm.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, glm.pred)) # 7.6%
# (g) KNN
error_rate = np.array([])
k_value = np.array([])
for i in range(1, 110, 10):
knn = KNeighborsClassifier(n_neighbors=i)
knn.pred = knn.fit(x_train, y_train).predict(x_test)
k_value = np.append(k_value, i)
error_rate = np.append(error_rate, 1-accuracy_score(y_test, knn.pred))
best_k = k_value[error_rate.argmin()]
print('KNN best when k=%i' %best_k)
# k = 31 is the best
# ----------------------------------------------------------------------------
# Q12
# (a)
Power()
# (b)
Power2(3,8)
# (c)
Power2(10,3) # 1000
Power2(8,17) # 2.2518e+15
Power2(131,3) # 2248091
# (d)
# (e)
x = np.arange(1, 11, 1)
y = Power3(x,2)
fig = plt.figure()
fig.add_subplot(2, 2, 1)
plt.scatter(x, y)
plt.title('log(x^2) vs x')
plt.xlabel('x')
plt.ylabel('log(x^2)')
ax = fig.add_subplot(2, 2, 2)
ax.set_xscale('log')
plt.scatter(x, y)
plt.title('log(x^2) vs x on xlog-scale')
plt.xlabel('x')
plt.ylabel('log(x^2)')
ax = fig.add_subplot(2, 2, 3)
ax.set_yscale('log')
plt.scatter(x, y)
plt.title('log(x^2) vs x on ylog-scale')
plt.xlabel('x')
plt.ylabel('log(x^2)')
ax = fig.add_subplot(2, 2, 4)
ax.set_xscale('log')
ax.set_yscale('log')
plt.scatter(x, y)
plt.title('log(x^2) vs x on xylog-scale')
plt.xlabel('x')
plt.ylabel('log(x^2)')
# (f)
PlotPower(np.arange(1,11,1), 3)
# ----------------------------------------------------------------------------
# Q13
Boston = pd.read_csv('C:\\Users\\Carol\\Desktop\\Boston.csv')
Boston['crim01'] = np.where(Boston['crim'] > Boston['crim'].median(), 1, 0)
Boston.corr() # indus, nox, age, dis, rad, tax
pd.plotting.scatter_matrix(Boston.iloc[:,2:17]) # nox, rm, dis, tax, black, lstat, medv
# pick: indus, nox, dis, tax, lstat
# data setup
x_name = ['indus', 'nox', 'dis', 'tax', 'lstat']
x = pd.DataFrame(Boston, columns=x_name)
y = np.array(Boston['crim01'])
np.random.seed(1)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
# Logit
glm = LogisticRegression()
glm.pred = glm.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, glm.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, glm.pred)) # 21.7%
# LDA
lda = LinearDiscriminantAnalysis()
lda.pred = lda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, lda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, lda.pred)) # 17.1%
# QDA
qda = QuadraticDiscriminantAnalysis()
qda.pred = qda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, qda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, qda.pred)) # 15.1%
# KNN
error_rate = np.array([])
k_value = np.array([])
for i in range(1, 110, 10):
knn = KNeighborsClassifier(n_neighbors=i)
knn.pred = knn.fit(x_train, y_train).predict(x_test)
k_value = np.append(k_value, i)
error_rate = np.append(error_rate, 1-accuracy_score(y_test, knn.pred))
best_k = k_value[error_rate.argmin()]
print('KNN best when k=%i' %best_k)
# k = 1 is the best | 32.00365 | 112 | 0.641464 | # ISLR Ch 4 by Carol Cui
%reset
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, accuracy_score
# ----------------------------------------------------------------------------
# Q10
Weekly = pd.read_csv('C:\\Users\\Carol\\Desktop\\Weekly.csv')
# (a)
Weekly.describe()
pd.crosstab(index=Weekly["Direction"], columns="count")
Weekly.corr() # Volume increases in year.
# (b)
import statsmodels.api as sm
x01 = sm.add_constant(Weekly.iloc[:, 2:8])
y01 = np.where(Weekly['Direction']=='Up', 1, 0)
glm0 = sm.Logit(y01, x01)
print(glm0.fit().summary())
# Lag2 is statistically significant.
# (c)
x = pd.DataFrame(Weekly, columns=Weekly.columns[2:8])
y = Weekly['Direction']
glm1 = LogisticRegression()
glm1.pred = glm1.fit(x, y).predict(x)
print(pd.DataFrame(confusion_matrix(y, glm1.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y, glm1.pred)) # 56%
# (d)
train = Weekly[Weekly['Year'] < 2009]
x_train = train.iloc[:,3]
x_train = x_train.reshape(len(x_train),1)
y_train = train.loc[:,'Direction']
test = Weekly[Weekly['Year'] >= 2009]
x_test = test.iloc[:,3]
x_test = x_test.reshape(len(x_test),1)
y_test = test.loc[:,'Direction']
glm2 = LogisticRegression()
glm2.pred = glm2.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, glm2.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, glm2.pred)) # 62.5%
# (e)
lda = LinearDiscriminantAnalysis()
lda.pred = lda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, lda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, lda.pred)) # 62.5%
# (f)
qda = QuadraticDiscriminantAnalysis()
qda.pred = qda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, qda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, qda.pred)) # 58.7%
# (g)
knn = KNeighborsClassifier(n_neighbors=1)
knn.pred = knn.fit(x_train, y_train).predict(x_test)
print('error rate: ', accuracy_score(y_test, knn.pred)) # 49%
# (h): Logistic and LDA models are the best.
# (i)
# KNN
error_rate = np.array([])
k_value = np.array([])
for i in (5, 10, 20):
knn = KNeighborsClassifier(n_neighbors=i)
knn.pred = knn.fit(x_train, y_train).predict(x_test)
k_value = np.append(k_value, i)
error_rate = np.append(error_rate, 1-accuracy_score(y_test, knn.pred))
best_k = k_value[error_rate.argmin()]
print('KNN best when k=%i' %best_k)
# LDA
train = Weekly[Weekly['Year'] < 2009]
x_train = train.iloc[:,2:4]
x_train['Lag12'] = x_train.Lag1 * x_train.Lag2
y_train = train.loc[:,'Direction']
test = Weekly[Weekly['Year'] >= 2009]
x_test = test.iloc[:,2:4]
x_test['Lag12'] = x_test.Lag1 * x_test.Lag2
y_test = test.loc[:,'Direction']
lda = LinearDiscriminantAnalysis()
lda.pred = lda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, lda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, lda.pred)) # 57.7%
# QDA
qda = QuadraticDiscriminantAnalysis()
qda.pred = qda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, qda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, qda.pred)) # 46.2%
# ----------------------------------------------------------------------------
# Q11
Auto = pd.read_csv('C:\\Users\\Carol\\Desktop\\Auto.csv', na_values='?').dropna()
# (a)
Auto['mpg01'] = np.where(Auto['mpg'] > Auto['mpg'].median(), 1, 0)
# (b)
pd.plotting.scatter_matrix(Auto.iloc[:,0:10], figsize=(10,10))
# select: displacement, horsepower, weight, acceleration
# (c)
x_name = ['displacement', 'horsepower', 'weight', 'acceleration']
x = pd.DataFrame(Auto, columns=x_name)
y = np.array(Auto['mpg01'])
np.random.seed(1)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# (d) LDA
lda = LinearDiscriminantAnalysis()
lda.pred = lda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, lda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, lda.pred)) # 7.6%
# (e) QDA
qda = QuadraticDiscriminantAnalysis()
qda.pred = qda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, qda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, qda.pred)) # 3.8%
# (f) Logit
glm = LogisticRegression()
glm.pred = glm.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, glm.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, glm.pred)) # 7.6%
# (g) KNN
error_rate = np.array([])
k_value = np.array([])
for i in range(1, 110, 10):
knn = KNeighborsClassifier(n_neighbors=i)
knn.pred = knn.fit(x_train, y_train).predict(x_test)
k_value = np.append(k_value, i)
error_rate = np.append(error_rate, 1-accuracy_score(y_test, knn.pred))
best_k = k_value[error_rate.argmin()]
print('KNN best when k=%i' %best_k)
# k = 31 is the best
# ----------------------------------------------------------------------------
# Q12
# (a)
def Power():
print(2**3)
Power()
# (b)
def Power2(x, a):
print(x**a)
Power2(3,8)
# (c)
Power2(10,3) # 1000
Power2(8,17) # 2.2518e+15
Power2(131,3) # 2248091
# (d)
def Power3(x, a):
return x**a
# (e)
x = np.arange(1, 11, 1)
y = Power3(x,2)
fig = plt.figure()
fig.add_subplot(2, 2, 1)
plt.scatter(x, y)
plt.title('log(x^2) vs x')
plt.xlabel('x')
plt.ylabel('log(x^2)')
ax = fig.add_subplot(2, 2, 2)
ax.set_xscale('log')
plt.scatter(x, y)
plt.title('log(x^2) vs x on xlog-scale')
plt.xlabel('x')
plt.ylabel('log(x^2)')
ax = fig.add_subplot(2, 2, 3)
ax.set_yscale('log')
plt.scatter(x, y)
plt.title('log(x^2) vs x on ylog-scale')
plt.xlabel('x')
plt.ylabel('log(x^2)')
ax = fig.add_subplot(2, 2, 4)
ax.set_xscale('log')
ax.set_yscale('log')
plt.scatter(x, y)
plt.title('log(x^2) vs x on xylog-scale')
plt.xlabel('x')
plt.ylabel('log(x^2)')
# (f)
def PlotPower(x, a):
y = Power3(x, a)
plt.scatter(x, y)
plt.title('x^%.0f vs x' %a)
plt.xlabel('x')
plt.ylabel('x^%.0f' %a)
PlotPower(np.arange(1,11,1), 3)
# ----------------------------------------------------------------------------
# Q13
Boston = pd.read_csv('C:\\Users\\Carol\\Desktop\\Boston.csv')
Boston['crim01'] = np.where(Boston['crim'] > Boston['crim'].median(), 1, 0)
Boston.corr() # indus, nox, age, dis, rad, tax
pd.plotting.scatter_matrix(Boston.iloc[:,2:17]) # nox, rm, dis, tax, black, lstat, medv
# pick: indus, nox, dis, tax, lstat
# data setup
x_name = ['indus', 'nox', 'dis', 'tax', 'lstat']
x = pd.DataFrame(Boston, columns=x_name)
y = np.array(Boston['crim01'])
np.random.seed(1)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
# Logit
glm = LogisticRegression()
glm.pred = glm.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, glm.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, glm.pred)) # 21.7%
# LDA
lda = LinearDiscriminantAnalysis()
lda.pred = lda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, lda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, lda.pred)) # 17.1%
# QDA
qda = QuadraticDiscriminantAnalysis()
qda.pred = qda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, qda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, qda.pred)) # 15.1%
# KNN
error_rate = np.array([])
k_value = np.array([])
for i in range(1, 110, 10):
knn = KNeighborsClassifier(n_neighbors=i)
knn.pred = knn.fit(x_train, y_train).predict(x_test)
k_value = np.append(k_value, i)
error_rate = np.append(error_rate, 1-accuracy_score(y_test, knn.pred))
best_k = k_value[error_rate.argmin()]
print('KNN best when k=%i' %best_k)
# k = 1 is the best | 153 | 0 | 88 |
791b6dd92bbb31cebd779a17307f7f84e2d5ee44 | 1,014 | py | Python | cajas/webclient/views/movement_withdraw_requirement_list.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | cajas/webclient/views/movement_withdraw_requirement_list.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | cajas/webclient/views/movement_withdraw_requirement_list.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from cajas.movement.models.movement_withdraw import MovementWithdraw
from cajas.office.models.officeCountry import OfficeCountry
from cajas.users.models.partner import Partner
from cajas.webclient.views.utils import get_president_user
president = get_president_user()
class MovementWithdrawRequireList(LoginRequiredMixin, TemplateView):
"""
"""
login_url = '/accounts/login/'
redirect_field_name = 'redirect_to'
template_name = 'webclient/movement_withdraw_require_list.html'
| 36.214286 | 85 | 0.766272 |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from cajas.movement.models.movement_withdraw import MovementWithdraw
from cajas.office.models.officeCountry import OfficeCountry
from cajas.users.models.partner import Partner
from cajas.webclient.views.utils import get_president_user
president = get_president_user()
class MovementWithdrawRequireList(LoginRequiredMixin, TemplateView):
"""
"""
login_url = '/accounts/login/'
redirect_field_name = 'redirect_to'
template_name = 'webclient/movement_withdraw_require_list.html'
def get_context_data(self, **kwargs):
context = super(MovementWithdrawRequireList, self).get_context_data(**kwargs)
movements = MovementWithdraw.objects.all()
context['movements'] = movements
context['all_offices'] = OfficeCountry.objects.all().order_by('office')
context['partners_offices'] = Partner.objects.all().exclude(user=president)
return context
| 381 | 0 | 27 |
4209e089d8e339853461195c1e6519d689c092bf | 2,289 | py | Python | scrapping_Task4.py | priyanshumishra1009/scrapping | 84e2866bdc07a48a4967a712711eecb8a66a32c8 | [
"MIT"
] | null | null | null | scrapping_Task4.py | priyanshumishra1009/scrapping | 84e2866bdc07a48a4967a712711eecb8a66a32c8 | [
"MIT"
] | null | null | null | scrapping_Task4.py | priyanshumishra1009/scrapping | 84e2866bdc07a48a4967a712711eecb8a66a32c8 | [
"MIT"
] | null | null | null | import json
from bs4 import BeautifulSoup
import requests
op=open('scrap.json')
data_dic,ask,check={},int(input('tell the rank of the movie you wanna see:- ')),json.load(op)
for i in check:
if i['Rank']==ask:
url=i["Link"]
page=requests.get(url)
soup=BeautifulSoup(page.text,"html.parser")
body_of_web=soup.find('div',{'id':'__next'})
structure=body_of_web.find('main')
sturct=structure.find('div', class_='ipc-page-content-container ipc-page-content-container--full BaseLayout__NextPageContentContainer-sc-180q5jf-0 fWxmdE')
section=sturct.find('section', class_='ipc-page-background ipc-page-background--base TitlePage__StyledPageBackground-wzlr49-0 dDUGgO')
sctor=section.find('section', class_='ipc-page-background ipc-page-background--baseAlt TitleMainHeroGroup__StyledPageBackground-w70azj-0 hEHQFC')
division=sctor.find('div', class_="TitleBlock__TitleContainer-sc-1nlhx7j-1 jxsVNt")
genre=sctor.find('div', class_="GenresAndPlot__ContentParent-cum89p-8 bFvaWW Hero__GenresAndPlotContainer-kvkd64-11 twqaW")
b=genre.find_all('a', class_='GenresAndPlot__GenreChip-cum89p-3 fzmeux ipc-chip ipc-chip--on-baseAlt')
genr=[]
for i in b:
genr.append(i.text)
bio=genre.find('span', class_='GenresAndPlot__TextContainerBreakpointXS_TO_M-cum89p-0 dcFkRD').text
if len(division.text[-7::])!=7:
time=division.text[-7::]
else:
time=division.text[-8::]
name=division.find('h1').text
print(time)
time2=int(time[0])*60
if 'min' in time:
a=time.strip('min').split('h')
run_time=str(time2+int(a[1]))+' minutes'
director=structure.find('a',class_='ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link').text
country_and_more= structure.find_all('li', class_='ipc-metadata-list__item')
for i in country_and_more:
if 'Country of origin' in i.text:
country=i.text[17:]
if 'Language' in i.text:
language=i.text[8:]
if 'Read all' in bio:
bio=bio[0:-8]
img=structure.find('div',class_='Media__PosterContainer-sc-1x98dcb-1 dGdktI')
link='https://www.imdb.com/'+(body_of_web.find_all("div",class_="ipc-poster ipc-poster--baseAlt ipc-poster--dynamic-width ipc-sub-grid-item ipc-sub-grid-item--span-2")[0].a['href'])
print(language,'\n',country,'\n',run_time,'\n',director,'\n',genr,'\n',bio,'\n',name,'\n',link)
| 50.866667 | 181 | 0.741372 | import json
from bs4 import BeautifulSoup
import requests
op=open('scrap.json')
data_dic,ask,check={},int(input('tell the rank of the movie you wanna see:- ')),json.load(op)
for i in check:
if i['Rank']==ask:
url=i["Link"]
page=requests.get(url)
soup=BeautifulSoup(page.text,"html.parser")
body_of_web=soup.find('div',{'id':'__next'})
structure=body_of_web.find('main')
sturct=structure.find('div', class_='ipc-page-content-container ipc-page-content-container--full BaseLayout__NextPageContentContainer-sc-180q5jf-0 fWxmdE')
section=sturct.find('section', class_='ipc-page-background ipc-page-background--base TitlePage__StyledPageBackground-wzlr49-0 dDUGgO')
sctor=section.find('section', class_='ipc-page-background ipc-page-background--baseAlt TitleMainHeroGroup__StyledPageBackground-w70azj-0 hEHQFC')
division=sctor.find('div', class_="TitleBlock__TitleContainer-sc-1nlhx7j-1 jxsVNt")
genre=sctor.find('div', class_="GenresAndPlot__ContentParent-cum89p-8 bFvaWW Hero__GenresAndPlotContainer-kvkd64-11 twqaW")
b=genre.find_all('a', class_='GenresAndPlot__GenreChip-cum89p-3 fzmeux ipc-chip ipc-chip--on-baseAlt')
genr=[]
for i in b:
genr.append(i.text)
bio=genre.find('span', class_='GenresAndPlot__TextContainerBreakpointXS_TO_M-cum89p-0 dcFkRD').text
if len(division.text[-7::])!=7:
time=division.text[-7::]
else:
time=division.text[-8::]
name=division.find('h1').text
print(time)
time2=int(time[0])*60
if 'min' in time:
a=time.strip('min').split('h')
run_time=str(time2+int(a[1]))+' minutes'
director=structure.find('a',class_='ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link').text
country_and_more= structure.find_all('li', class_='ipc-metadata-list__item')
for i in country_and_more:
if 'Country of origin' in i.text:
country=i.text[17:]
if 'Language' in i.text:
language=i.text[8:]
if 'Read all' in bio:
bio=bio[0:-8]
img=structure.find('div',class_='Media__PosterContainer-sc-1x98dcb-1 dGdktI')
link='https://www.imdb.com/'+(body_of_web.find_all("div",class_="ipc-poster ipc-poster--baseAlt ipc-poster--dynamic-width ipc-sub-grid-item ipc-sub-grid-item--span-2")[0].a['href'])
print(language,'\n',country,'\n',run_time,'\n',director,'\n',genr,'\n',bio,'\n',name,'\n',link)
| 0 | 0 | 0 |
26d38c700f0094c9b1d8583aca1c94b5b82470bf | 14,154 | py | Python | python/MinVelWrapper.py | usgs/MinVel | c56a247e6b612d81a678ca19b2def64ce3aef107 | [
"CC0-1.0"
] | 1 | 2020-05-15T23:48:19.000Z | 2020-05-15T23:48:19.000Z | python/MinVelWrapper.py | usgs/MinVel | c56a247e6b612d81a678ca19b2def64ce3aef107 | [
"CC0-1.0"
] | 1 | 2019-02-22T18:33:46.000Z | 2019-07-17T22:16:06.000Z | python/MinVelWrapper.py | usgs/MinVel | c56a247e6b612d81a678ca19b2def64ce3aef107 | [
"CC0-1.0"
] | 1 | 2020-10-05T15:17:15.000Z | 2020-10-05T15:17:15.000Z | # Import the modules
import sys
import MinVel as mv
import numpy as np
# NOTES: May want to update temperature dependence of thermal expansivity using Holland and Powell's (2011)
# new revised equations (see figure 1 in that article). This will necessitate recalculating the first
# Gruneisen parameters. This could provide more realistic temperature dependence of material
# properties within the mantle.
if len(sys.argv) > 1:
if sys.argv[1] == "-h":
print('MinVel -- Program to calculate mineral aggregate moduli and density')
print('')
print(' Written by Oliver Boyd')
print('')
print(' This program calculates the velocity and density of a mineral assemblage ')
print(' at a given pressure and temperature (which may be vectors).')
print(' The velocities are expressed as Voigt, Reuss, and Voigt-Reuss-Hill averages.')
print('')
print(' The data required for this analysis is taken from Hacker and Abers (2003),')
print(' updated by Abers and Hacker in 2016, and expanded by Boyd in 2018.')
print(' The moduli at pressure and temperature are calculated based on the')
print(' procedures of Hacker and Abers (2004), Bina and Helffrich (1992) and')
print(' Holland and Powell (1998) as outlined in the supplementary section of ')
print(' Boyd et al. (2004) with updates by Abers and Hacker (2016) for quartz.')
print('')
print(' OUTPUT (SI Units)')
print(' results.npy - numpy binary file containing the following vectors:')
print(' Voigt-Reuss-Hill averages')
print(' K - Bulk modulus')
print(' G - Shear modulus')
print(' E - Youngs modulus')
print(' l - Lambda')
print(' v - Poissons ratio')
print(' Vp - P-wave velocity')
print(' Vs - S-wave velocity')
print(' p - Density')
print(' a - Thermal Expansivity')
print(' Voigt(v) and Reuss(r) bounds on velocity')
print(' Vpv - P-wave velocity')
print(' Vpr - P-wave velocity')
print(' Vsv - S-wave velocity')
print(' Vsr - S-wave velocity')
print('')
print(' INPUTS')
print(' Command line options')
print(' -h Help about this program.')
print('')
print(' -f InputFile - File containing composition, temperature, and pressure ')
print(' information with the following format')
print(' MinIndx 1, MinIndx 2, ..., MinIndx N')
print(' VolFrac 1, VolFrac 2, ..., VolFrac N')
print(' T1, P1')
print(' T2, P2')
print(' ...')
print(' TN, PN')
print('')
print(' -p Pressure - desired pressure or comma separated vector of pressures (Pa)')
print(' -t Temperature - desired temperature or comma separated vector of temperatures (K)')
print('')
print(' Composition parmeters - a composition structure with the following fields: ')
print(' -cm Min - The mineral index comma separated vector.')
print(' -cv Fr - Volume fraction for each mineral in Min (0 to 1), comma separated.')
print('')
print(' Mineral Indexes')
print(' Quartz')
print(' 1. Alpha Quartz ')
print(' 2. Beta Quartz ')
print(' 3. Coesite ')
print(' Feldspar group')
print(' Plagioclase')
print(' 4. High Albite ')
print(' 5. Low Albite ')
print(' 6. Anorthite ')
print('')
print(' 7. Orthoclase ')
print(' 8. Sanidine ')
print(' Garnet structural group')
print(' 9. Almandine ')
print(' 10. Grossular ')
print(' 11. Pyrope ')
print(' Olivine group')
print(' 12. Forsterite ')
print(' 13. Fayalite ')
print(' Pyroxene group')
print(' 14. Diopside ')
print(' 15. Enstatite ')
print(' 16. Ferrosilite ')
print(' 79. Mg-Tschermak ')
print(' 17. Jadeite ')
print(' 18. Hedenbergite ')
print(' 80. Acmite ')
print(' 81. Ca-Tschermak ')
print(' Amphibole supergroup')
print(' 19. Glaucophane ')
print(' 20. Ferroglaucophane ')
print(' 21. Tremolite ')
print(' 22. Ferroactinolite ')
print(' 23. Tshermakite ')
print(' 24. Pargasite ')
print(' 25. Hornblende ')
print(' 26. Anthophyllite ')
print(' Mica group')
print(' 27. Phlogopite ')
print(' 28. Annite ')
print(' 29. Muscovite ')
print(' 30. Celadonite ')
print(' Other')
print(' 31. Talc ')
print(' 32. Clinochlore ')
print(' 33. Daphnite ')
print(' 34. Antigorite ')
print(' 35. Zoisite ')
print(' 36. Clinozoisite ')
print(' 37. Epidote ')
print(' 38. Lawsonite ')
print(' 39. Prehnite ')
print(' 40. Pumpellyite ')
print(' 41. Laumontite ')
print(' 42. Wairakite ')
print(' 43. Brucite ')
print(' 44. Clinohumite ')
print(' 45. Phase A ')
print(' 46. Sillimanite ')
print(' 47. Kyanite ')
print(' 48. Spinel ')
print(' 49. Hercynite ')
print(' 50. Magnetite ')
print(' 51. Calcite ')
print(' 52. Aragonite ')
print(' 82. Magnesite ')
print(' 83. En79Fs09Ts12 ')
print(' 84. Di75He9Jd3Ts12 ')
print(' 85. ilmenite ')
print(' 86. cordierite ')
print(' 87. scapolite (meionite) ')
print(' 88. rutile ')
print(' 89. sphene ')
print(' 53. Corundum ')
print(' 54. Dolomite ')
print(' 74. Halite ')
print(' 77. Pyrite ')
print(' 78. Gypsum ')
print(' 90. Anhydrite ')
print(' 0. Water ')
print(' -1. Ice ')
print(' Clays')
print(' 55. Montmorillonite (Saz-1)')
print(' 56. Montmorillonite (S Wy-2)')
print(' 57. Montmorillonite (STX-1)')
print(' 58. Montmorillonite (S Wy-1)')
print(' 59. Montmorillonite (Shca-1)')
print(' 60. Kaolinite (Kga-2)')
print(' 61. Kaolinite (Kga-1b)')
print(' 62. Illite (IMT-2)')
print(' 63. Illite (ISMT-2)')
print(' 66. Smectite (S Wa-1)')
print(' 70. Montmorillonite (S YN-1)')
print(' 71. Chrysotile ')
print(' 72. Lizardite ')
print(' 76. Dickite ')
print('')
print(' Example:');
print(' Geophysical parameters for 20% Quartz, 20% low Albite, 30% Forsterite, and 30% Fayalite at')
print(' 300, 400, and 500K and 0.1, 0.3, and 0.5 MPa')
print(' > python MinVelWrapper.py -t 300,400,500 -p 0.1e6,0.3e6,0.5e6 -cm 1,5,12,13 -cv 0.2,0.2,0.3,0.3')
print('')
sys.exit()
nMin = 1
nPT = 1
nT = 0
nP = 0
if len(sys.argv) > 1:
for j in range(1,len(sys.argv),2):
if sys.argv[j] == "-t":
entries = sys.argv[j+1].split(",")
nT = len(entries)
T = np.zeros((nT),dtype=np.float64)
for k in range(0,nT):
T[k] = entries[k]
if sys.argv[j] == "-p":
entries = sys.argv[j+1].split(",")
nP = len(entries)
P = np.zeros((nP),dtype=np.float64)
for k in range(0,nP):
P[k] = entries[k]
if sys.argv[j] == "-cm":
entries = sys.argv[j+1].split(",")
nMin = len(entries)
Cm = np.zeros((nMin),dtype=np.int8)
for k in range(0,nMin):
Cm[k] = entries[k]
if sys.argv[j] == "-cv":
entries = sys.argv[j+1].split(",")
nFr = len(entries)
Cv = np.zeros((nFr),dtype=np.float64)
for k in range(0,nFr):
Cv[k] = entries[k]
if sys.argv[j] == "-f":
fl = sys.argv[j+1]
print('Reading {0:s}'.format(fl))
f = open(fl,"r")
if f.mode == "r":
nPT = 0
ln = 0
for line in f:
line = line.strip()
columns = line.split(",")
if ln < 2:
nMin = len(columns)
else:
nPT = nPT + 1
ln = ln + 1
nT = nPT
nP = nPT
nFr = nMin
f.close()
T = np.zeros((nPT),dtype=np.float64)
P = np.zeros((nPT),dtype=np.float64)
Cm = np.zeros((nMin),dtype=np.int8)
Cv = np.zeros((nMin),dtype=np.float64)
f = open(fl,"r")
if f.mode == "r":
ln = 0
jT = 0
for line in f:
line = line.strip()
columns = line.split(",")
if ln == 0:
for j in range(0,len(columns)):
Cm[j] = columns[j]
elif ln == 1:
for j in range(0,len(columns)):
Cv[j] = columns[j]
else:
T[jT] = columns[0]
P[jT] = columns[1]
jT = jT + 1
ln = ln + 1
f.close()
# MAke sure volume fractions sum to 1
if sum(Cv) < 1:
print('Composition does not sum to one. - Exiting')
sys.exit()
if nT != nP:
print('Number of temperature inputs must be equal to the number of pressure inputs')
sys.exit()
else:
nPT = nT
if nMin != nFr:
print('Number of minerals types must be equal to the number of mineral fractional volumes')
sys.exit()
Par, MinNames, nPar, nAllMin = mv.loadPar('../database/MineralPhysicsDatabase.nc')
MinIndex = Par[0,:];
print('{0:21s}{1:20s}'.format('Mineral','Volume fraction'))
for j in range(0,nMin):
k = mv.find(MinIndex,Cm[j]);
print(MinNames[:,k].tobytes().decode('utf-8'),'(',Cv[j],')')
if nPT > 1:
print('There are',nPT,'temperature and pressure points')
else:
print('Temperature',T)
print('Pressure',P)
print('')
K, G, E, l, v, Vp, Vs, den, Vpv, Vpr, Vsv, Vsr, a = mv.CalcMV(Cm,Cv,T,P);
print('K ',K)
print('G ',G)
print('E ',E)
print('l ',l)
print('v ',v)
print('Vp ',Vp)
print('Vs ',Vs)
print('den',den)
print('a ',a)
print('')
print('Voigt(v) and Reuss(r) bounds on velocity')
print('Vpv',Vpv)
print('Vpr',Vpr)
print('Vsv',Vsv)
print('Vsr',Vsr)
print('')
res = np.zeros((13,nPT),dtype=np.float64)
res[0,:] = K
res[1,:] = G
res[2,:] = E
res[3,:] = l
res[4,:] = v
res[5,:] = Vp
res[6,:] = Vs
res[7,:] = den
res[8,:] = a
res[9,:] = Vpv
res[10,:] = Vpr
res[11,:] = Vsv
res[12,:] = Vsr
f = 'results.npy'
np.save(f,res)
sys.exit()
| 45.954545 | 117 | 0.391409 | # Import the modules
import sys
import MinVel as mv
import numpy as np
# NOTES: May want to update temperature dependence of thermal expansivity using Holland and Powell's (2011)
# new revised equations (see figure 1 in that article). This will necessitate recalculating the first
# Gruneisen parameters. This could provide more realistic temperature dependence of material
# properties within the mantle.
if len(sys.argv) > 1:
if sys.argv[1] == "-h":
print('MinVel -- Program to calculate mineral aggregate moduli and density')
print('')
print(' Written by Oliver Boyd')
print('')
print(' This program calculates the velocity and density of a mineral assemblage ')
print(' at a given pressure and temperature (which may be vectors).')
print(' The velocities are expressed as Voigt, Reuss, and Voigt-Reuss-Hill averages.')
print('')
print(' The data required for this analysis is taken from Hacker and Abers (2003),')
print(' updated by Abers and Hacker in 2016, and expanded by Boyd in 2018.')
print(' The moduli at pressure and temperature are calculated based on the')
print(' procedures of Hacker and Abers (2004), Bina and Helffrich (1992) and')
print(' Holland and Powell (1998) as outlined in the supplementary section of ')
print(' Boyd et al. (2004) with updates by Abers and Hacker (2016) for quartz.')
print('')
print(' OUTPUT (SI Units)')
print(' results.npy - numpy binary file containing the following vectors:')
print(' Voigt-Reuss-Hill averages')
print(' K - Bulk modulus')
print(' G - Shear modulus')
print(' E - Youngs modulus')
print(' l - Lambda')
print(' v - Poissons ratio')
print(' Vp - P-wave velocity')
print(' Vs - S-wave velocity')
print(' p - Density')
print(' a - Thermal Expansivity')
print(' Voigt(v) and Reuss(r) bounds on velocity')
print(' Vpv - P-wave velocity')
print(' Vpr - P-wave velocity')
print(' Vsv - S-wave velocity')
print(' Vsr - S-wave velocity')
print('')
print(' INPUTS')
print(' Command line options')
print(' -h Help about this program.')
print('')
print(' -f InputFile - File containing composition, temperature, and pressure ')
print(' information with the following format')
print(' MinIndx 1, MinIndx 2, ..., MinIndx N')
print(' VolFrac 1, VolFrac 2, ..., VolFrac N')
print(' T1, P1')
print(' T2, P2')
print(' ...')
print(' TN, PN')
print('')
print(' -p Pressure - desired pressure or comma separated vector of pressures (Pa)')
print(' -t Temperature - desired temperature or comma separated vector of temperatures (K)')
print('')
print(' Composition parmeters - a composition structure with the following fields: ')
print(' -cm Min - The mineral index comma separated vector.')
print(' -cv Fr - Volume fraction for each mineral in Min (0 to 1), comma separated.')
print('')
print(' Mineral Indexes')
print(' Quartz')
print(' 1. Alpha Quartz ')
print(' 2. Beta Quartz ')
print(' 3. Coesite ')
print(' Feldspar group')
print(' Plagioclase')
print(' 4. High Albite ')
print(' 5. Low Albite ')
print(' 6. Anorthite ')
print('')
print(' 7. Orthoclase ')
print(' 8. Sanidine ')
print(' Garnet structural group')
print(' 9. Almandine ')
print(' 10. Grossular ')
print(' 11. Pyrope ')
print(' Olivine group')
print(' 12. Forsterite ')
print(' 13. Fayalite ')
print(' Pyroxene group')
print(' 14. Diopside ')
print(' 15. Enstatite ')
print(' 16. Ferrosilite ')
print(' 79. Mg-Tschermak ')
print(' 17. Jadeite ')
print(' 18. Hedenbergite ')
print(' 80. Acmite ')
print(' 81. Ca-Tschermak ')
print(' Amphibole supergroup')
print(' 19. Glaucophane ')
print(' 20. Ferroglaucophane ')
print(' 21. Tremolite ')
print(' 22. Ferroactinolite ')
print(' 23. Tshermakite ')
print(' 24. Pargasite ')
print(' 25. Hornblende ')
print(' 26. Anthophyllite ')
print(' Mica group')
print(' 27. Phlogopite ')
print(' 28. Annite ')
print(' 29. Muscovite ')
print(' 30. Celadonite ')
print(' Other')
print(' 31. Talc ')
print(' 32. Clinochlore ')
print(' 33. Daphnite ')
print(' 34. Antigorite ')
print(' 35. Zoisite ')
print(' 36. Clinozoisite ')
print(' 37. Epidote ')
print(' 38. Lawsonite ')
print(' 39. Prehnite ')
print(' 40. Pumpellyite ')
print(' 41. Laumontite ')
print(' 42. Wairakite ')
print(' 43. Brucite ')
print(' 44. Clinohumite ')
print(' 45. Phase A ')
print(' 46. Sillimanite ')
print(' 47. Kyanite ')
print(' 48. Spinel ')
print(' 49. Hercynite ')
print(' 50. Magnetite ')
print(' 51. Calcite ')
print(' 52. Aragonite ')
print(' 82. Magnesite ')
print(' 83. En79Fs09Ts12 ')
print(' 84. Di75He9Jd3Ts12 ')
print(' 85. ilmenite ')
print(' 86. cordierite ')
print(' 87. scapolite (meionite) ')
print(' 88. rutile ')
print(' 89. sphene ')
print(' 53. Corundum ')
print(' 54. Dolomite ')
print(' 74. Halite ')
print(' 77. Pyrite ')
print(' 78. Gypsum ')
print(' 90. Anhydrite ')
print(' 0. Water ')
print(' -1. Ice ')
print(' Clays')
print(' 55. Montmorillonite (Saz-1)')
print(' 56. Montmorillonite (S Wy-2)')
print(' 57. Montmorillonite (STX-1)')
print(' 58. Montmorillonite (S Wy-1)')
print(' 59. Montmorillonite (Shca-1)')
print(' 60. Kaolinite (Kga-2)')
print(' 61. Kaolinite (Kga-1b)')
print(' 62. Illite (IMT-2)')
print(' 63. Illite (ISMT-2)')
print(' 66. Smectite (S Wa-1)')
print(' 70. Montmorillonite (S YN-1)')
print(' 71. Chrysotile ')
print(' 72. Lizardite ')
print(' 76. Dickite ')
print('')
print(' Example:');
print(' Geophysical parameters for 20% Quartz, 20% low Albite, 30% Forsterite, and 30% Fayalite at')
print(' 300, 400, and 500K and 0.1, 0.3, and 0.5 MPa')
print(' > python MinVelWrapper.py -t 300,400,500 -p 0.1e6,0.3e6,0.5e6 -cm 1,5,12,13 -cv 0.2,0.2,0.3,0.3')
print('')
sys.exit()
nMin = 1
nPT = 1
nT = 0
nP = 0
if len(sys.argv) > 1:
for j in range(1,len(sys.argv),2):
if sys.argv[j] == "-t":
entries = sys.argv[j+1].split(",")
nT = len(entries)
T = np.zeros((nT),dtype=np.float64)
for k in range(0,nT):
T[k] = entries[k]
if sys.argv[j] == "-p":
entries = sys.argv[j+1].split(",")
nP = len(entries)
P = np.zeros((nP),dtype=np.float64)
for k in range(0,nP):
P[k] = entries[k]
if sys.argv[j] == "-cm":
entries = sys.argv[j+1].split(",")
nMin = len(entries)
Cm = np.zeros((nMin),dtype=np.int8)
for k in range(0,nMin):
Cm[k] = entries[k]
if sys.argv[j] == "-cv":
entries = sys.argv[j+1].split(",")
nFr = len(entries)
Cv = np.zeros((nFr),dtype=np.float64)
for k in range(0,nFr):
Cv[k] = entries[k]
if sys.argv[j] == "-f":
fl = sys.argv[j+1]
print('Reading {0:s}'.format(fl))
f = open(fl,"r")
if f.mode == "r":
nPT = 0
ln = 0
for line in f:
line = line.strip()
columns = line.split(",")
if ln < 2:
nMin = len(columns)
else:
nPT = nPT + 1
ln = ln + 1
nT = nPT
nP = nPT
nFr = nMin
f.close()
T = np.zeros((nPT),dtype=np.float64)
P = np.zeros((nPT),dtype=np.float64)
Cm = np.zeros((nMin),dtype=np.int8)
Cv = np.zeros((nMin),dtype=np.float64)
f = open(fl,"r")
if f.mode == "r":
ln = 0
jT = 0
for line in f:
line = line.strip()
columns = line.split(",")
if ln == 0:
for j in range(0,len(columns)):
Cm[j] = columns[j]
elif ln == 1:
for j in range(0,len(columns)):
Cv[j] = columns[j]
else:
T[jT] = columns[0]
P[jT] = columns[1]
jT = jT + 1
ln = ln + 1
f.close()
# MAke sure volume fractions sum to 1
if sum(Cv) < 1:
print('Composition does not sum to one. - Exiting')
sys.exit()
if nT != nP:
print('Number of temperature inputs must be equal to the number of pressure inputs')
sys.exit()
else:
nPT = nT
if nMin != nFr:
print('Number of minerals types must be equal to the number of mineral fractional volumes')
sys.exit()
Par, MinNames, nPar, nAllMin = mv.loadPar('../database/MineralPhysicsDatabase.nc')
MinIndex = Par[0,:];
print('{0:21s}{1:20s}'.format('Mineral','Volume fraction'))
for j in range(0,nMin):
k = mv.find(MinIndex,Cm[j]);
print(MinNames[:,k].tobytes().decode('utf-8'),'(',Cv[j],')')
if nPT > 1:
print('There are',nPT,'temperature and pressure points')
else:
print('Temperature',T)
print('Pressure',P)
print('')
K, G, E, l, v, Vp, Vs, den, Vpv, Vpr, Vsv, Vsr, a = mv.CalcMV(Cm,Cv,T,P);
print('K ',K)
print('G ',G)
print('E ',E)
print('l ',l)
print('v ',v)
print('Vp ',Vp)
print('Vs ',Vs)
print('den',den)
print('a ',a)
print('')
print('Voigt(v) and Reuss(r) bounds on velocity')
print('Vpv',Vpv)
print('Vpr',Vpr)
print('Vsv',Vsv)
print('Vsr',Vsr)
print('')
res = np.zeros((13,nPT),dtype=np.float64)
res[0,:] = K
res[1,:] = G
res[2,:] = E
res[3,:] = l
res[4,:] = v
res[5,:] = Vp
res[6,:] = Vs
res[7,:] = den
res[8,:] = a
res[9,:] = Vpv
res[10,:] = Vpr
res[11,:] = Vsv
res[12,:] = Vsr
f = 'results.npy'
np.save(f,res)
sys.exit()
| 0 | 0 | 0 |
2daa18d637292aa93b13f7400412c154d0b1a284 | 3,484 | py | Python | matroid/src/images.py | matroid/matroid-python | e354cd9fe2fe6d8d425a4f82b1c0b9e15e0a5dac | [
"MIT"
] | 16 | 2017-03-29T12:00:26.000Z | 2021-04-25T21:13:11.000Z | matroid/src/images.py | matroid/matroid-python | e354cd9fe2fe6d8d425a4f82b1c0b9e15e0a5dac | [
"MIT"
] | 2 | 2019-06-13T23:14:59.000Z | 2020-06-19T20:07:43.000Z | matroid/src/images.py | matroid/matroid-python | e354cd9fe2fe6d8d425a4f82b1c0b9e15e0a5dac | [
"MIT"
] | 7 | 2017-04-03T09:32:47.000Z | 2020-04-15T21:23:21.000Z | import os
import requests
from matroid import error
from matroid.src.helpers import api_call, batch_file_request
# https://staging.dev.matroid.com/docs/api/index.html#api-Images-Classify
@api_call(error.InvalidQueryError)
def classify_image(self, detectorId, file=None, url=None, **options):
"""
Classify an image with a detector
detectorId: a unique id for the detector
file: path to local image file to classify
url: internet URL for the image to classify
"""
(endpoint, method) = self.endpoints['classify_image']
if not url and not file:
raise error.InvalidQueryError(
message='Missing required parameter: file or url')
endpoint = endpoint.replace(':key', detectorId)
try:
headers = {'Authorization': self.token.authorization_header()}
data = {'detectorId': detectorId}
data.update(options)
if url:
data['url'] = url
if file:
if not isinstance(file, list):
file = [file]
return batch_file_request(file, method, endpoint, headers, data)
else:
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except IOError as e:
raise e
except error.InvalidQueryError as e:
raise e
except Exception as e:
raise error.APIConnectionError(message=e)
# https://staging.dev.matroid.com/docs/api/index.html#api-Images-PostLocalize
@api_call(error.InvalidQueryError)
def localize_image(self, localizer, localizerLabel, **options):
"""
Note: this API is very similar to Images/Classify;
however, it can be used to update bounding boxes of existing training images
by supplying update=true, labelId, and one of imageId or imageIds, and it has
access to the internal face localizer
(localizer="DEFAULT_FACE" and localizerLabel="face").
"""
(endpoint, method) = self.endpoints['localize_image']
data = {
'localizer': localizer,
'localizerLabel': localizerLabel,
}
update = options.get('update')
if update:
image_id = options.get('imageId')
image_ids = options.get('imageIds')
if not image_id and not image_ids:
raise error.InvalidQueryError(
message='Missing required parameter for update: imageId or imageIds')
if image_id:
data['imageId'] = image_id
else:
data['imageIds'] = image_ids
else:
files = options.get('file')
urls = options.get('url')
if not files and not urls:
raise error.InvalidQueryError(
message='Missing required parameter: files or urls')
data.update({'files': files,
'urls': urls, })
try:
headers = {'Authorization': self.token.authorization_header()}
data.update({'confidence': options.get('confidence'),
'update': 'true' if update else '',
'maxFaces': options.get('maxFaces'),
'labelId': options.get('labelId')
})
if update:
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
if files:
if not isinstance(files, list):
files = [files]
return batch_file_request(files, method, endpoint, headers, data)
else:
if isinstance(urls, list):
data['urls'] = urls
else:
data['url'] = urls
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except IOError as e:
raise e
except error.InvalidQueryError as e:
raise e
except Exception as e:
raise error.APIConnectionError(message=e)
| 29.777778 | 85 | 0.667049 | import os
import requests
from matroid import error
from matroid.src.helpers import api_call, batch_file_request
# https://staging.dev.matroid.com/docs/api/index.html#api-Images-Classify
@api_call(error.InvalidQueryError)
def classify_image(self, detectorId, file=None, url=None, **options):
"""
Classify an image with a detector
detectorId: a unique id for the detector
file: path to local image file to classify
url: internet URL for the image to classify
"""
(endpoint, method) = self.endpoints['classify_image']
if not url and not file:
raise error.InvalidQueryError(
message='Missing required parameter: file or url')
endpoint = endpoint.replace(':key', detectorId)
try:
headers = {'Authorization': self.token.authorization_header()}
data = {'detectorId': detectorId}
data.update(options)
if url:
data['url'] = url
if file:
if not isinstance(file, list):
file = [file]
return batch_file_request(file, method, endpoint, headers, data)
else:
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except IOError as e:
raise e
except error.InvalidQueryError as e:
raise e
except Exception as e:
raise error.APIConnectionError(message=e)
# https://staging.dev.matroid.com/docs/api/index.html#api-Images-PostLocalize
@api_call(error.InvalidQueryError)
def localize_image(self, localizer, localizerLabel, **options):
"""
Note: this API is very similar to Images/Classify;
however, it can be used to update bounding boxes of existing training images
by supplying update=true, labelId, and one of imageId or imageIds, and it has
access to the internal face localizer
(localizer="DEFAULT_FACE" and localizerLabel="face").
"""
(endpoint, method) = self.endpoints['localize_image']
data = {
'localizer': localizer,
'localizerLabel': localizerLabel,
}
update = options.get('update')
if update:
image_id = options.get('imageId')
image_ids = options.get('imageIds')
if not image_id and not image_ids:
raise error.InvalidQueryError(
message='Missing required parameter for update: imageId or imageIds')
if image_id:
data['imageId'] = image_id
else:
data['imageIds'] = image_ids
else:
files = options.get('file')
urls = options.get('url')
if not files and not urls:
raise error.InvalidQueryError(
message='Missing required parameter: files or urls')
data.update({'files': files,
'urls': urls, })
try:
headers = {'Authorization': self.token.authorization_header()}
data.update({'confidence': options.get('confidence'),
'update': 'true' if update else '',
'maxFaces': options.get('maxFaces'),
'labelId': options.get('labelId')
})
if update:
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
if files:
if not isinstance(files, list):
files = [files]
return batch_file_request(files, method, endpoint, headers, data)
else:
if isinstance(urls, list):
data['urls'] = urls
else:
data['url'] = urls
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except IOError as e:
raise e
except error.InvalidQueryError as e:
raise e
except Exception as e:
raise error.APIConnectionError(message=e)
| 0 | 0 | 0 |
b793a7d18609a5ee00bb5ed731a29673f11a853c | 657 | py | Python | etl/db.py | cfh294/ElectionModeling | 714da9ea004f042f9f775804168e3761e34f64f0 | [
"MIT"
] | null | null | null | etl/db.py | cfh294/ElectionModeling | 714da9ea004f042f9f775804168e3761e34f64f0 | [
"MIT"
] | null | null | null | etl/db.py | cfh294/ElectionModeling | 714da9ea004f042f9f775804168e3761e34f64f0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Initialize tables in SQLITE db
"""
import argparse
from models import Base
from sqlalchemy import create_engine
from utils import database_string
@with_args
main() | 28.565217 | 115 | 0.726027 | #!/usr/bin/env python3
"""
Initialize tables in SQLITE db
"""
import argparse
from models import Base
from sqlalchemy import create_engine
from utils import database_string
def with_args(f):
def with_args_(*args, **kwargs):
ap = argparse.ArgumentParser(description="Provide a connection string to create all needed DB objects.")
ap.add_argument("-db", "--database", default=database_string, type=str, help="Database connection string.")
return f(ap.parse_args(), *args, **kwargs)
return with_args_
@with_args
def main(args):
engine = create_engine(args.database, echo=True)
Base.metadata.create_all(engine)
main() | 419 | 0 | 45 |
cce5ed81d0115df0b6c721df9d259ad23fb9f8b8 | 1,744 | py | Python | yard/skills/66-python/cookbook/yvhai/demo/std/misc.py | paser4se/bbxyard | d09bc6efb75618b2cef047bad9c8b835043446cb | [
"Apache-2.0"
] | 1 | 2016-03-29T02:01:58.000Z | 2016-03-29T02:01:58.000Z | yard/skills/66-python/cookbook/yvhai/demo/std/misc.py | paser4se/bbxyard | d09bc6efb75618b2cef047bad9c8b835043446cb | [
"Apache-2.0"
] | 18 | 2019-02-13T09:15:25.000Z | 2021-12-09T21:32:13.000Z | yard/skills/66-python/cookbook/yvhai/demo/std/misc.py | paser4se/bbxyard | d09bc6efb75618b2cef047bad9c8b835043446cb | [
"Apache-2.0"
] | 2 | 2020-07-05T01:01:30.000Z | 2020-07-08T22:33:06.000Z | #!/usr/bin/env python3
# 杂项
import math
import glob
from timeit import Timer
from yvhai.demo.base import YHDemo
| 31.709091 | 92 | 0.554472 | #!/usr/bin/env python3
# 杂项
import math
import glob
from timeit import Timer
from yvhai.demo.base import YHDemo
class MiscDemo(YHDemo):
def __init__(self):
super(MiscDemo, self).__init__('Misc')
@staticmethod
def test_math():
_sec = MiscDemo.mark_section('数学库')
print(' -- math.pi: ', math.pi)
print(' -- math.e: ', math.e)
print(' -- math.pow(2, 10): ', math.pow(2, 10))
print(' -- math.log(1024, 2): ', math.log(1024, 2))
print(' -- math.cos(math.pi / 4): ', math.cos(math.pi / 4))
print(' -- math.asin(3 / 5): ', math.asin(3 / 5))
@staticmethod
def test_timer():
_sec = MiscDemo.mark_section('Timer')
print(Timer('t=a; a=b; b=t', 'a=1;b=2').timeit())
print(Timer('a,b=b,a', 'a=1;b=2').timeit())
@staticmethod
def test_random():
_sec = MiscDemo.mark_section('随机数测试')
import random
for x in range(3):
print(' -- random.choice: ', random.choice(['mysql', 'mongo', 'redis', 'ssdb']))
print(' -- random.sample(range(100), 10): ', random.sample(range(100), 10))
print(' -- random.random(): ', random.random() )
print(' -- int(random.random() * 100): ', int(random.random() * 100))
print(' -- random.randrange(100): ', random.randrange(100))
@staticmethod
def enum_files():
_sec = MiscDemo.mark_section('枚举文件列表')
print(' -- glob.glob("*.py"): ', glob.glob('*.py'))
print(' -- glob.glob("*.py", recursive=True): ', glob.glob('*.py', recursive=True))
@staticmethod
def demo(args=[]):
MiscDemo.test_math()
MiscDemo.test_timer()
MiscDemo.test_random()
MiscDemo.enum_files()
pass
| 1,381 | 253 | 23 |
111de6773d8b6f5fd3cd5bf72499a88f8440046d | 736 | py | Python | tools/img2hex.py | TNKSoftware/helix_remix | 592756de82aba5f8ccb79d0087066f2d7dc4f1d4 | [
"MIT"
] | 1 | 2021-01-13T00:00:26.000Z | 2021-01-13T00:00:26.000Z | tools/img2hex.py | TNKSoftware/helix_remix | 592756de82aba5f8ccb79d0087066f2d7dc4f1d4 | [
"MIT"
] | null | null | null | tools/img2hex.py | TNKSoftware/helix_remix | 592756de82aba5f8ccb79d0087066f2d7dc4f1d4 | [
"MIT"
] | null | null | null | from PIL import Image
import numpy
import sys
argvs = sys.argv
argc = len(argvs)
src = "src.png" if argc <= 1 else argvs[1]
dest = "res.txt" if argc <= 2 else argvs[2]
print("{} => {}".format(src, dest))
img = Image.open(src).convert('RGB')
data = numpy.array(img)
w,h = img.size
sw = w
if sw % 8 != 0:
sw = sw + (8 - (sw % 8))
print("(Width, Height, Stride) => ({},{},{})".format(w, h, sw))
v = 0x00
pos = 0
ot = 0
f = open(dest, "w")
for y in range(h):
for x in range(sw):
if x >= w:
c = 0
else:
c = 1 if data[y,x][0] < 128 else 0
v = v | (c << pos)
pos = pos + 1
if pos >= 8:
f.write("0x{:02X},".format(v))
v = 0x00
pos = 0
ot = ot + 1
if ot == 20:
f.write("\n")
ot = 0
f.close()
| 15.020408 | 63 | 0.519022 | from PIL import Image
import numpy
import sys
argvs = sys.argv
argc = len(argvs)
src = "src.png" if argc <= 1 else argvs[1]
dest = "res.txt" if argc <= 2 else argvs[2]
print("{} => {}".format(src, dest))
img = Image.open(src).convert('RGB')
data = numpy.array(img)
w,h = img.size
sw = w
if sw % 8 != 0:
sw = sw + (8 - (sw % 8))
print("(Width, Height, Stride) => ({},{},{})".format(w, h, sw))
v = 0x00
pos = 0
ot = 0
f = open(dest, "w")
for y in range(h):
for x in range(sw):
if x >= w:
c = 0
else:
c = 1 if data[y,x][0] < 128 else 0
v = v | (c << pos)
pos = pos + 1
if pos >= 8:
f.write("0x{:02X},".format(v))
v = 0x00
pos = 0
ot = ot + 1
if ot == 20:
f.write("\n")
ot = 0
f.close()
| 0 | 0 | 0 |
7a1830607101f5ddb6f0f21ea2d15e595f05db4b | 1,675 | py | Python | vencode/tasks.py | AndreMacedo88/VEnCode-Web | c4c760f4aaea213efcebbf8ab9277e1884aa85ec | [
"BSD-3-Clause"
] | null | null | null | vencode/tasks.py | AndreMacedo88/VEnCode-Web | c4c760f4aaea213efcebbf8ab9277e1884aa85ec | [
"BSD-3-Clause"
] | null | null | null | vencode/tasks.py | AndreMacedo88/VEnCode-Web | c4c760f4aaea213efcebbf8ab9277e1884aa85ec | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from os import remove
from celery import shared_task
from VEnCode import DataTpm, Vencodes
from .models import Promoters154CellsBinarized, Enhancers154CellsBinarized
from .utils_views import *
# Create your tasks here
@shared_task
@shared_task
| 31.603774 | 82 | 0.741493 | from __future__ import absolute_import, unicode_literals
from os import remove
from celery import shared_task
from VEnCode import DataTpm, Vencodes
from .models import Promoters154CellsBinarized, Enhancers154CellsBinarized
from .utils_views import *
# Create your tasks here
@shared_task
def fantom_results_task(data_type, cell_type, algorithm, k, num_ven):
k_int = int(k)
num_ven_int = int(num_ven)
if data_type == "promoters":
qs = Promoters154CellsBinarized.pdobjects.all()
elif data_type == "enhancers":
qs = Enhancers154CellsBinarized.pdobjects.all()
else:
raise TypeError("Wrong data_type, please check the form data.")
df = qs.to_dataframe(index='index')
return prep_and_get_ven(df, cell_type, algorithm, k_int, num_ven_int)
@shared_task
def uploaded_results_task(file_name, cell_type, algorithm, k, num_ven):
k_int = int(k)
num_ven_int = int(num_ven)
result = prep_and_get_ven(file_name, cell_type, algorithm, k_int, num_ven_int)
remove(file_name)
return result
def prep_and_get_ven(file_name, cell_type, algorithm, k, num_ven):
data = DataTpm(file_name)
data.load_data()
data.make_data_celltype_specific(cell_type, replicates=True)
data.filter_by_target_celltype_activity(threshold=0.9)
vencodes = Vencodes(data, algorithm=algorithm, number_of_re=k)
vencodes.next(num_ven)
vencode = vencodes.get_vencode_data(method="return")
ven_html = list()
for ven in vencode:
ven.reset_index(level=0, inplace=True)
ven_html.append(prepare_df(ven))
vencodes.determine_e_values()
result = [ven_html, vencodes.e_values]
return result
| 1,297 | 0 | 67 |
8147357c425e85b20d79943649fc725636d52ced | 699 | py | Python | src/portfolio.py | dimastatz/portfolio-manager | b3b114a26ba2bb6e032354d2756be0b3eb74deba | [
"Apache-2.0"
] | null | null | null | src/portfolio.py | dimastatz/portfolio-manager | b3b114a26ba2bb6e032354d2756be0b3eb74deba | [
"Apache-2.0"
] | 1 | 2021-08-31T05:51:09.000Z | 2021-08-31T05:51:09.000Z | src/portfolio.py | dimastatz/portfolio-manager | b3b114a26ba2bb6e032354d2756be0b3eb74deba | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from typing import List
from pandas.core.frame import DataFrame
from src.models.base_models import Model, RandomModel
| 25.888889 | 63 | 0.656652 | import pandas as pd
from typing import List
from pandas.core.frame import DataFrame
from src.models.base_models import Model, RandomModel
def get_models() -> List[Model]:
return [
RandomModel('Model_1'),
RandomModel('Model_2'),
RandomModel('Model_3'),
RandomModel('Model_4'),
RandomModel('Model_5')
]
def evaluate_portfolio(portfolio: List[str]) -> DataFrame:
data = {'Stock': portfolio}
for model in get_models():
data[model.name] = [model.run(p) for p in portfolio]
df = pd.DataFrame.from_dict(data=data)
df.set_index('Stock', inplace=True)
df['Voting'] = df.apply(lambda row: row['Model_1'], axis=1)
return df
| 512 | 0 | 46 |
5fd0124afbd403884eca84748171cfdf8a06af60 | 5,003 | py | Python | library/library_emission.py | ucl-exoplanets/TauREx_public | 28d47f829a2873cf15e3bfb0419b8bc4e5bc03dd | [
"CC-BY-4.0"
] | 18 | 2019-07-22T01:35:24.000Z | 2022-02-10T11:25:42.000Z | library/library_emission.py | ucl-exoplanets/TauREx_public | 28d47f829a2873cf15e3bfb0419b8bc4e5bc03dd | [
"CC-BY-4.0"
] | null | null | null | library/library_emission.py | ucl-exoplanets/TauREx_public | 28d47f829a2873cf15e3bfb0419b8bc4e5bc03dd | [
"CC-BY-4.0"
] | 1 | 2017-10-19T15:14:06.000Z | 2017-10-19T15:14:06.000Z | import scipy,itertools,sys,os,time,logging
import scipy.constants as con
import numpy as np
import ctypes as C
from scipy.stats.mstats_basic import tmean
def fit_brightness_temp(wave,flux):
'''
function fitting a black body to given flux-wavelength
value.
Input: Flux at wavelength (microns)
Output: brightness temperature.
'''
tempfit = scipy.optimize.fmin(chi,1000,args=(flux,wave),maxiter=1000,disp=0)
return tempfit
def black_body_to_temp(wave,flux):
'''
Does the opposite to black_body. Converts flux to temperature.
Input: wavelength grid, flux grid
Output: tempeatrure grid
@todo check if conversion is correct. There is probably a /m^2/micron offset
'''
h = 6.62606957e-34
c = 299792458
k = 1.3806488e-23
pi= 3.14159265359
wave *= 1e-6
logpart = np.log(((2.0*h*c**2)/(flux*wave**5))+1.0)
T = (h*c)/(wave*k) * 1.0/ logpart
return T
def iterate_TP_profile(TP_params, TP_params_std, TP_bounds, TP_function,iterate=True):
'''
function iterating through all lower and upper bounds of parameters
to determine which combination gives the lowest/highest attainable
TP profile. Returns mean TP profile with errorbars on each pressure level
'''
Tmean = TP_function(TP_params)
bounds = [] #list of lower and upper parameter bounds
lowpar = []
highpar= []
for i in range(len(TP_params)):
low = TP_params[i]-TP_params_std[i]
high = TP_params[i]+TP_params_std[i]
lowpar.append(low)
highpar.append(high)
if low < TP_bounds[i][0]:
low = TP_bounds[i][0]+1e-10
if high > TP_bounds[i][1]:
high = TP_bounds[i][1]-1e-10
bounds.append((low,high))
if iterate:
iterlist = list(itertools.product(*bounds))
iter_num = np.shape(iterlist)[0] #number of possible combinations
T_iter = np.zeros((len(Tmean), iter_num))
T_minmax = np.zeros((len(Tmean), 2))
for i in range(iter_num):
T_iter[:,i] = TP_function(iterlist[i])
Tmean = np.mean(T_iter,1)
T_minmax[:,0] = np.min(T_iter,1)
T_minmax[:,1] = np.max(T_iter,1)
T_sigma = (T_minmax[:,1] - T_minmax[:,0])/2.0
# T_sigma = np.std(T_iter,1)
else:
Tmin = TP_function(lowpar)
Tmax = TP_function(highpar)
T_sigma = Tmax-Tmin
T_sigma /= 2.0
return Tmean, T_sigma
def generate_tp_covariance(outob):
'''
Function generating TP_profile covariance matrix from previous best fit.
This can be used by _TP_rodgers200 or _TP_hybrid TP profiles in a second stage fit.
'''
# todo needs to be adapted to new output class
#translating fitting parameters to mean temperature and lower/upper bounds
fit_TPparam_bounds = outob.fitting.fit_bounds[outob.fitting.fit_X_nparams:]
if outob.NEST:
T_mean, T_sigma = iterate_TP_profile(outob.NEST_TP_params_values[0], outob.NEST_TP_params_std[0],fit_TPparam_bounds,
outob.fitting.forwardmodel.atmosphere.TP_profile)
elif outob.MCMC:
T_mean, T_sigma = iterate_TP_profile(outob.MCMC_TP_params_values[0], outob.MCMC_TP_params_std[0],fit_TPparam_bounds,
outob.fitting.forwardmodel.atmosphere.TP_profile)
elif outob.DOWN:
FIT_std = np.zeros_like(outob.DOWN_TP_params_values)
T_mean, T_sigma = iterate_TP_profile(outob.DOWN_TP_params_values, FIT_std,fit_TPparam_bounds,
outob.fitting.forwardmodel.atmosphere.TP_profile)
else:
logging.error('Cannot compute TP-covariance. No Stage 0 fit (NS/MCMC/MLE) can be found.')
exit()
#getting temperature error
nlayers = outob.fitting.forwardmodel.atmosphere.nlayers
#setting up arrays
Ent_arr = np.zeros((nlayers,nlayers))
Sig_arr = np.zeros((nlayers,nlayers))
#populating arrays
for i in range(nlayers):
Ent_arr[i,:] = np.abs((T_mean[i])-(T_mean[:]))
Sig_arr[i,:] = np.abs(T_sigma[i]+T_sigma[:])
Diff_arr = np.sqrt(Ent_arr**2+Sig_arr**2)
Diff_norm = ((Diff_arr-np.min(Diff_arr))/np.max(Diff_arr-np.min(Diff_arr)))
Cov_array = 1.0 - Diff_norm
return Cov_array | 33.353333 | 124 | 0.630022 | import scipy,itertools,sys,os,time,logging
import scipy.constants as con
import numpy as np
import ctypes as C
from scipy.stats.mstats_basic import tmean
def black_body(lamb, temp):
#small function calculating plank black body
#input: microns, kelvin
#output: W/m^2/micron
h = 6.62606957e-34
c = 299792458
k = 1.3806488e-23
pi= 3.14159265359
exponent = np.exp((h * c) / (10000.0/lamb *1e-6 * k * temp))
BB = (pi* (2.0*h*c**2)/(10000.0/lamb*1e-6)**5) * (1.0/(exponent -1))
# exponent = np.exp((con.h * con.c) / (lamb *1e-6 * con.k * temp))
# BB = (np.pi* (2.0*con.h*con.c**2)/(lamb*1e-6)**5) * (1.0/(exponent -1))
return BB * 1e-6
def fit_brightness_temp(wave,flux):
'''
function fitting a black body to given flux-wavelength
value.
Input: Flux at wavelength (microns)
Output: brightness temperature.
'''
def chi(temp,flux,wave):
model = black_body(wave, temp)
res = flux - model
return res**2
tempfit = scipy.optimize.fmin(chi,1000,args=(flux,wave),maxiter=1000,disp=0)
return tempfit
def black_body_to_temp(wave,flux):
'''
Does the opposite to black_body. Converts flux to temperature.
Input: wavelength grid, flux grid
Output: tempeatrure grid
@todo check if conversion is correct. There is probably a /m^2/micron offset
'''
h = 6.62606957e-34
c = 299792458
k = 1.3806488e-23
pi= 3.14159265359
wave *= 1e-6
logpart = np.log(((2.0*h*c**2)/(flux*wave**5))+1.0)
T = (h*c)/(wave*k) * 1.0/ logpart
return T
def iterate_TP_profile(TP_params, TP_params_std, TP_bounds, TP_function,iterate=True):
'''
function iterating through all lower and upper bounds of parameters
to determine which combination gives the lowest/highest attainable
TP profile. Returns mean TP profile with errorbars on each pressure level
'''
Tmean = TP_function(TP_params)
bounds = [] #list of lower and upper parameter bounds
lowpar = []
highpar= []
for i in range(len(TP_params)):
low = TP_params[i]-TP_params_std[i]
high = TP_params[i]+TP_params_std[i]
lowpar.append(low)
highpar.append(high)
if low < TP_bounds[i][0]:
low = TP_bounds[i][0]+1e-10
if high > TP_bounds[i][1]:
high = TP_bounds[i][1]-1e-10
bounds.append((low,high))
if iterate:
iterlist = list(itertools.product(*bounds))
iter_num = np.shape(iterlist)[0] #number of possible combinations
T_iter = np.zeros((len(Tmean), iter_num))
T_minmax = np.zeros((len(Tmean), 2))
for i in range(iter_num):
T_iter[:,i] = TP_function(iterlist[i])
Tmean = np.mean(T_iter,1)
T_minmax[:,0] = np.min(T_iter,1)
T_minmax[:,1] = np.max(T_iter,1)
T_sigma = (T_minmax[:,1] - T_minmax[:,0])/2.0
# T_sigma = np.std(T_iter,1)
else:
Tmin = TP_function(lowpar)
Tmax = TP_function(highpar)
T_sigma = Tmax-Tmin
T_sigma /= 2.0
return Tmean, T_sigma
def generate_tp_covariance(outob):
'''
Function generating TP_profile covariance matrix from previous best fit.
This can be used by _TP_rodgers200 or _TP_hybrid TP profiles in a second stage fit.
'''
# todo needs to be adapted to new output class
#translating fitting parameters to mean temperature and lower/upper bounds
fit_TPparam_bounds = outob.fitting.fit_bounds[outob.fitting.fit_X_nparams:]
if outob.NEST:
T_mean, T_sigma = iterate_TP_profile(outob.NEST_TP_params_values[0], outob.NEST_TP_params_std[0],fit_TPparam_bounds,
outob.fitting.forwardmodel.atmosphere.TP_profile)
elif outob.MCMC:
T_mean, T_sigma = iterate_TP_profile(outob.MCMC_TP_params_values[0], outob.MCMC_TP_params_std[0],fit_TPparam_bounds,
outob.fitting.forwardmodel.atmosphere.TP_profile)
elif outob.DOWN:
FIT_std = np.zeros_like(outob.DOWN_TP_params_values)
T_mean, T_sigma = iterate_TP_profile(outob.DOWN_TP_params_values, FIT_std,fit_TPparam_bounds,
outob.fitting.forwardmodel.atmosphere.TP_profile)
else:
logging.error('Cannot compute TP-covariance. No Stage 0 fit (NS/MCMC/MLE) can be found.')
exit()
#getting temperature error
nlayers = outob.fitting.forwardmodel.atmosphere.nlayers
#setting up arrays
Ent_arr = np.zeros((nlayers,nlayers))
Sig_arr = np.zeros((nlayers,nlayers))
#populating arrays
for i in range(nlayers):
Ent_arr[i,:] = np.abs((T_mean[i])-(T_mean[:]))
Sig_arr[i,:] = np.abs(T_sigma[i]+T_sigma[:])
Diff_arr = np.sqrt(Ent_arr**2+Sig_arr**2)
Diff_norm = ((Diff_arr-np.min(Diff_arr))/np.max(Diff_arr-np.min(Diff_arr)))
Cov_array = 1.0 - Diff_norm
return Cov_array | 613 | 0 | 54 |
2276102bc149b052f122b1b434d684a307f11c8f | 191 | py | Python | daltonapi/__init__.py | TendTo/dalton | 81a0ee9877c0142ccc1f9d92f938e38c465f1724 | [
"MIT"
] | null | null | null | daltonapi/__init__.py | TendTo/dalton | 81a0ee9877c0142ccc1f9d92f938e38c465f1724 | [
"MIT"
] | null | null | null | daltonapi/__init__.py | TendTo/dalton | 81a0ee9877c0142ccc1f9d92f938e38c465f1724 | [
"MIT"
] | null | null | null | """Dalton API Wrapper for WAX
This module provides a Python wrapper for the Atomic Asset API,
with plans to expand to WAX and Atomic Market API endpoints.
"""
__version__ = "0.3.0"
| 23.875 | 65 | 0.717277 | """Dalton API Wrapper for WAX
This module provides a Python wrapper for the Atomic Asset API,
with plans to expand to WAX and Atomic Market API endpoints.
"""
__version__ = "0.3.0"
| 0 | 0 | 0 |
3eed3d8d123e605bd0c74c373d5313298c02dd09 | 207 | py | Python | generators/gen_nums.py | UltiRequiem/professional-phython-platzi | 0bf8f97b172d0799d6906193090ef69beb1c8b4b | [
"MIT"
] | 4 | 2021-08-02T21:34:46.000Z | 2021-09-24T03:26:35.000Z | generators/gen_nums.py | UltiRequiem/professional-phython-platzi | 0bf8f97b172d0799d6906193090ef69beb1c8b4b | [
"MIT"
] | null | null | null | generators/gen_nums.py | UltiRequiem/professional-phython-platzi | 0bf8f97b172d0799d6906193090ef69beb1c8b4b | [
"MIT"
] | 4 | 2021-08-02T21:34:47.000Z | 2021-08-11T03:21:37.000Z |
my_nums = num_generators(10)
while True:
try:
print(next(my_nums))
except StopIteration:
break
| 15.923077 | 33 | 0.63285 | def num_generators(max_num: int):
for num in range(1,max_num):
yield num
my_nums = num_generators(10)
while True:
try:
print(next(my_nums))
except StopIteration:
break
| 63 | 0 | 22 |
1de6239082805f3d24a765d3299b9bf0356bdc0e | 663 | py | Python | gui_e.g/try_wx_prc1.py | lukasdean/robust_python | 033d4fb84a3e7dcd4b8986125291b6f32d780c5c | [
"MIT"
] | null | null | null | gui_e.g/try_wx_prc1.py | lukasdean/robust_python | 033d4fb84a3e7dcd4b8986125291b6f32d780c5c | [
"MIT"
] | null | null | null | gui_e.g/try_wx_prc1.py | lukasdean/robust_python | 033d4fb84a3e7dcd4b8986125291b6f32d780c5c | [
"MIT"
] | null | null | null | #!/user/bin/env python
# -*-coding:utf-8 -*-
# @CreateTime : 2021/10/25 0:25
# @Author : xujiahui
# @Project : robust_python
# @File : try_wx_prc1.py
# @Version : V0.0.1
# @Desc : ?
import wx
if __name__ == "__main__":
app = MyApp()
app.MainLoop()
| 19.5 | 54 | 0.53997 | #!/user/bin/env python
# -*-coding:utf-8 -*-
# @CreateTime : 2021/10/25 0:25
# @Author : xujiahui
# @Project : robust_python
# @File : try_wx_prc1.py
# @Version : V0.0.1
# @Desc : ?
import wx
class MainFrame(wx.Frame):
def __init__(self, parent, id, title, size):
super().__init__(parent, id, title, size)
class MyApp(wx.App):
def __init__(self):
super().__init__()
def OnInit(self):
frame = MainFrame(None, -1, "试试", (300, 500))
frame.Show()
frame.Center(True)
return True
if __name__ == "__main__":
app = MyApp()
app.MainLoop()
| 228 | 4 | 133 |
d0df816dd0c90dd0f25d52021a7d810faf5c8da7 | 1,388 | py | Python | treadmill/api/cron.py | gaocegege/treadmill | 04325d319c0ee912c066f07b88b674e84485f154 | [
"Apache-2.0"
] | 2 | 2017-03-20T07:13:33.000Z | 2017-05-03T03:39:53.000Z | treadmill/api/cron.py | gaocegege/treadmill | 04325d319c0ee912c066f07b88b674e84485f154 | [
"Apache-2.0"
] | 12 | 2017-07-10T07:04:06.000Z | 2017-07-26T09:32:54.000Z | treadmill/api/cron.py | gaocegege/treadmill | 04325d319c0ee912c066f07b88b674e84485f154 | [
"Apache-2.0"
] | 2 | 2017-05-04T11:25:32.000Z | 2017-07-11T09:10:01.000Z | """Implementation of cron API.
"""
# import fnmatch
import logging
from treadmill import authz
# from treadmill import context
# from treadmill import cron
from treadmill import schema
# from treadmill.cron import model as cron_model
_LOGGER = logging.getLogger(__name__)
class API(object):
"""Treadmill Cron REST api."""
def init(authorizer):
"""Returns module API wrapped with authorizer function."""
api = API()
return authz.wrap(api, authorizer)
| 23.525424 | 62 | 0.556916 | """Implementation of cron API.
"""
# import fnmatch
import logging
from treadmill import authz
# from treadmill import context
# from treadmill import cron
from treadmill import schema
# from treadmill.cron import model as cron_model
_LOGGER = logging.getLogger(__name__)
class API(object):
"""Treadmill Cron REST api."""
def __init__(self):
def _list(match=None):
pass
@schema.schema({'$ref': 'cron.json#/resource_id'})
def get(rsrc_id):
pass
@schema.schema(
{'$ref': 'cron.json#/resource_id'},
{'allOf': [{'$ref': 'cron.json#/resource'},
{'$ref': 'cron.json#/verbs/create'}]},
)
def create(rsrc_id, rsrc):
pass
@schema.schema(
{'$ref': 'cron.json#/resource_id'},
{'allOf': [{'$ref': 'cron.json#/verbs/update'}]}
)
def update(rsrc_id, rsrc):
pass
@schema.schema({'$ref': 'cron.json#/resource_id'})
def delete(rsrc_id):
"""Delete configured instance."""
pass
self.list = _list
self.get = get
self.create = create
self.update = update
self.delete = delete
def init(authorizer):
"""Returns module API wrapped with authorizer function."""
api = API()
return authz.wrap(api, authorizer)
| 888 | 0 | 27 |
82ad456a9639256075ab802f83982de3249310a7 | 109 | py | Python | Src/PythonSharp.Tests/Examples/namedargs.py | ajlopez/PythonSharp | 7120ca465311bdb9e8cbb3c66f6214f28d4dd13e | [
"MIT"
] | 10 | 2017-09-21T04:31:20.000Z | 2022-01-29T16:46:41.000Z | Src/PythonSharp.Tests/Examples/namedargs.py | ajlopez/PythonSharp | 7120ca465311bdb9e8cbb3c66f6214f28d4dd13e | [
"MIT"
] | null | null | null | Src/PythonSharp.Tests/Examples/namedargs.py | ajlopez/PythonSharp | 7120ca465311bdb9e8cbb3c66f6214f28d4dd13e | [
"MIT"
] | 5 | 2016-07-31T20:01:13.000Z | 2021-12-01T01:37:49.000Z |
print(foo()) # 5
print(foo(b=3)) # 7
print(foo(b=3,a=2)) # 8
| 12.111111 | 24 | 0.477064 | def foo(a=1, b=2):
return a+b*2
print(foo()) # 5
print(foo(b=3)) # 7
print(foo(b=3,a=2)) # 8
| 15 | 0 | 26 |
3777daa4d07af17bbe8867fb0b79c7e85ff2eb18 | 1,178 | py | Python | tests/test_ws.py | kelar86/detectem | b1ecc3543b7c44ee76c4cac0d3896a7747bf86c1 | [
"MIT"
] | null | null | null | tests/test_ws.py | kelar86/detectem | b1ecc3543b7c44ee76c4cac0d3896a7747bf86c1 | [
"MIT"
] | 1 | 2021-03-26T00:23:57.000Z | 2021-03-26T00:23:57.000Z | tests/test_ws.py | kelar86/detectem | b1ecc3543b7c44ee76c4cac0d3896a7747bf86c1 | [
"MIT"
] | 1 | 2019-07-28T10:11:01.000Z | 2019-07-28T10:11:01.000Z | import json
from unittest.mock import patch
from boddle import boddle
from detectem.ws import do_detection
from detectem.exceptions import SplashError, NoPluginsError
"""
Tests run with `autospec` to match function signature in case of change
"""
@patch('detectem.ws.get_detection_results', autospec=True)
@patch('detectem.ws.get_detection_results', autospec=True)
@patch('detectem.ws.get_detection_results', autospec=True)
| 26.177778 | 78 | 0.686757 | import json
from unittest.mock import patch
from boddle import boddle
from detectem.ws import do_detection
from detectem.exceptions import SplashError, NoPluginsError
"""
Tests run with `autospec` to match function signature in case of change
"""
@patch('detectem.ws.get_detection_results', autospec=True)
def test_do_detection_with_normal_behavior(gdr):
gdr.return_value = []
with boddle(
method='post',
params={'url': 'http://domain.tld'}
):
assert do_detection() == json.dumps([])
@patch('detectem.ws.get_detection_results', autospec=True)
def test_do_detection_with_splash_exception(gdr):
gdr.side_effect = SplashError('splash')
with boddle(
method='post',
params={'url': 'http://domain.tld'}
):
assert do_detection() == json.dumps({'error': 'Splash error: splash'})
@patch('detectem.ws.get_detection_results', autospec=True)
def test_do_detection_with_noplugins_exception(gdr):
gdr.side_effect = NoPluginsError('No plugins')
with boddle(
method='post',
params={'url': 'http://domain.tld'}
):
assert do_detection() == json.dumps({'error': 'No plugins'})
| 679 | 0 | 66 |
30b176793b08f80c3bc32214bb465d1be846197c | 2,465 | py | Python | SpeedTest.py | kyleishie/ISPMonitor | a547913bdf1e980f17cadf86608dacf269bb3f60 | [
"MIT"
] | null | null | null | SpeedTest.py | kyleishie/ISPMonitor | a547913bdf1e980f17cadf86608dacf269bb3f60 | [
"MIT"
] | null | null | null | SpeedTest.py | kyleishie/ISPMonitor | a547913bdf1e980f17cadf86608dacf269bb3f60 | [
"MIT"
] | null | null | null | """SpeedTester."""
import datetime
import json
import dateutil.parser
class SpeedTestResult(object):
"""Represents the results of a test performed by a SpeedTester."""
def __init__(self, download, startTime, ping=None, upload=None, endTime=None):
"""Results of a speedtest performed by a SpeedTester."""
super(SpeedTestResult, self).__init__()
self.download = download
self.upload = upload
self.ping = ping
self.startTime = startTime
self.endTime = endTime
if self.endTime is None:
self.endTime = datetime.datetime.now()
self.duration = round((self.endTime - self.startTime).total_seconds(), 2)
def description(self):
"""Return string describing the test result."""
durationString = "{}s".format(self.duration)
downloadSpeedString = "{} Kbps".format(self.download)
uploadSpeedString = "{} Kbps".format(self.upload)
return "\t".join((self.startTime.isoformat(), downloadSpeedString, uploadSpeedString, durationString))
def toString(self):
"""Return JSON String."""
return json.dumps(self)
def toJSON(self):
"""Return JSON String of Test Result."""
return {
"start": self.startTime.isoformat(),
"end": self.endTime.isoformat(),
"duration": self.duration,
"download": self.download,
"upload": self.upload,
"ping": self.ping
}
def fromJSON(json):
"""Instantiate a SpeedTestResult from JSON."""
startTime = dateutil.parser.parse(json["start"])
endTime = dateutil.parser.parse(json["end"])
return SpeedTestResult(json["download"], startTime, ping=json["ping"], upload=json["upload"], endTime=endTime)
class SpeedTester:
"""Interface for an object that can produce SpeedTestResult(s)."""
def performTest(self):
"""Perform the speedtest and return a SpeedTestResult."""
raise NotImplementedError("performTest must be implemented.")
class SpeedTestResultArchive(object):
"""Interface for an object that can persist SpeedTestResult objects."""
def testResults(self):
"""Return all SpeedTestResultObjects."""
raise NotImplementedError("testResults must be implemented.")
def append(self):
"""Append a SpeedTestResult to the persistent store."""
raise NotImplementedError("append must be implemented.")
| 35.724638 | 118 | 0.647465 | """SpeedTester."""
import datetime
import json
import dateutil.parser
class SpeedTestResult(object):
"""Represents the results of a test performed by a SpeedTester."""
def __init__(self, download, startTime, ping=None, upload=None, endTime=None):
"""Results of a speedtest performed by a SpeedTester."""
super(SpeedTestResult, self).__init__()
self.download = download
self.upload = upload
self.ping = ping
self.startTime = startTime
self.endTime = endTime
if self.endTime is None:
self.endTime = datetime.datetime.now()
self.duration = round((self.endTime - self.startTime).total_seconds(), 2)
def description(self):
"""Return string describing the test result."""
durationString = "{}s".format(self.duration)
downloadSpeedString = "{} Kbps".format(self.download)
uploadSpeedString = "{} Kbps".format(self.upload)
return "\t".join((self.startTime.isoformat(), downloadSpeedString, uploadSpeedString, durationString))
def toString(self):
"""Return JSON String."""
return json.dumps(self)
def toJSON(self):
"""Return JSON String of Test Result."""
return {
"start": self.startTime.isoformat(),
"end": self.endTime.isoformat(),
"duration": self.duration,
"download": self.download,
"upload": self.upload,
"ping": self.ping
}
def fromJSON(json):
"""Instantiate a SpeedTestResult from JSON."""
startTime = dateutil.parser.parse(json["start"])
endTime = dateutil.parser.parse(json["end"])
return SpeedTestResult(json["download"], startTime, ping=json["ping"], upload=json["upload"], endTime=endTime)
class SpeedTester:
"""Interface for an object that can produce SpeedTestResult(s)."""
def performTest(self):
"""Perform the speedtest and return a SpeedTestResult."""
raise NotImplementedError("performTest must be implemented.")
class SpeedTestResultArchive(object):
"""Interface for an object that can persist SpeedTestResult objects."""
def testResults(self):
"""Return all SpeedTestResultObjects."""
raise NotImplementedError("testResults must be implemented.")
def append(self):
"""Append a SpeedTestResult to the persistent store."""
raise NotImplementedError("append must be implemented.")
| 0 | 0 | 0 |
fc36e5f276ceb852f85f61f79def217e7d9e1808 | 1,469 | py | Python | hedonometer/model.py | trabajo-grado/arcs-classification | 0d4b5f861c4a9f81151f1c9b30c410dd13de9646 | [
"MIT"
] | null | null | null | hedonometer/model.py | trabajo-grado/arcs-classification | 0d4b5f861c4a9f81151f1c9b30c410dd13de9646 | [
"MIT"
] | null | null | null | hedonometer/model.py | trabajo-grado/arcs-classification | 0d4b5f861c4a9f81151f1c9b30c410dd13de9646 | [
"MIT"
] | 2 | 2021-05-27T01:59:57.000Z | 2021-08-19T12:31:47.000Z | import torch.nn
from torch import nn
from transformers import BertModel
import numpy as np
| 32.644444 | 63 | 0.593601 | import torch.nn
from torch import nn
from transformers import BertModel
import numpy as np
class SentimentClassifier(nn.Module):
def __init__(self, pretrained_model, name):
super(SentimentClassifier, self).__init__()
self.name = name
self.bert = BertModel.from_pretrained(pretrained_model)
self.drop = nn.Dropout(p=0.3)
self.out = nn.Linear(self.bert.config.hidden_size, 1)
self.probability = nn.Sigmoid()
def forward(self, input_ids, attention_mask):
dect = self.bert(
input_ids=input_ids,
attention_mask=attention_mask
)
pooled_output = dect['pooler_output']
dropped = self.drop(pooled_output)
output = self.out(dropped)
return self.probability(output).squeeze(-1)
def get_sentiment_distribution(self, data_loader, device):
self.eval()
predictions = []
preds = []
with torch.no_grad():
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
outputs = self(
input_ids=input_ids,
attention_mask=attention_mask
)
predictions += [outputs]
for pre in predictions:
preds += [pre.cpu().numpy()]
preds = np.asarray(preds)
data = np.vstack((pred for pred in preds))
return data
| 1,258 | 16 | 103 |
6c038f1c4d41f50ee2e6474eb543ec844881d310 | 60 | py | Python | TUI/TCC/Catalog/__init__.py | StarkillerX42/stui | 668628cf7539e7d2be12846033141e4eb8616fe1 | [
"BSD-3-Clause"
] | null | null | null | TUI/TCC/Catalog/__init__.py | StarkillerX42/stui | 668628cf7539e7d2be12846033141e4eb8616fe1 | [
"BSD-3-Clause"
] | null | null | null | TUI/TCC/Catalog/__init__.py | StarkillerX42/stui | 668628cf7539e7d2be12846033141e4eb8616fe1 | [
"BSD-3-Clause"
] | null | null | null | """Catalog of user objects"""
from .CatalogMenuWdg import *
| 20 | 29 | 0.733333 | """Catalog of user objects"""
from .CatalogMenuWdg import *
| 0 | 0 | 0 |
545ec95f8f61929e8c9526a6c2c6b417d5ba52ea | 1,388 | py | Python | dictionaries/inventory.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | null | null | null | dictionaries/inventory.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | null | null | null | dictionaries/inventory.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | 1 | 2022-01-14T17:12:44.000Z | 2022-01-14T17:12:44.000Z | #You will receive a journal with some Collecting items, separated with ', ' (comma and space). After that, until receiving "Craft!" you will be receiving different commands.
#Commands (split by " - "):
#"Collect - {item}" – Receiving this command, you should add the given item in your inventory. If the item already exists, you should skip this line.
#"Drop - {item}" – You should remove the item from your inventory, if it exists.
#"Combine Items - {oldItem}:{newItem}" – You should check if the old item exists, if so, add the new item after the old one. Otherwise, ignore the command.
#"Renew – {item}" – If the given item exists, you should change its position and put it last in your inventory.
items = input().split(", ")
command = input()
while not command == "Craft!":
type_command = command.split(" - ")
item = command.split(" - ")
is_exist = check_item_exist(item, items)
if type_command == "Collect" and not is_exist:
items.append(item)
elif type_command == "Drop" and is_exist:
items.remove(item)
elif type_command == "Combine Items":
old_item = item.split(":")
new_item = item.split(":")
| 49.571429 | 173 | 0.68732 | #You will receive a journal with some Collecting items, separated with ', ' (comma and space). After that, until receiving "Craft!" you will be receiving different commands.
#Commands (split by " - "):
#"Collect - {item}" – Receiving this command, you should add the given item in your inventory. If the item already exists, you should skip this line.
#"Drop - {item}" – You should remove the item from your inventory, if it exists.
#"Combine Items - {oldItem}:{newItem}" – You should check if the old item exists, if so, add the new item after the old one. Otherwise, ignore the command.
#"Renew – {item}" – If the given item exists, you should change its position and put it last in your inventory.
def check_item_exist(check_item, all_items):
all_items = set(all_items) # set method is used to extract only the unique elements from a list
if check_item in all_items:
return True
else:
return False
items = input().split(", ")
command = input()
while not command == "Craft!":
type_command = command.split(" - ")
item = command.split(" - ")
is_exist = check_item_exist(item, items)
if type_command == "Collect" and not is_exist:
items.append(item)
elif type_command == "Drop" and is_exist:
items.remove(item)
elif type_command == "Combine Items":
old_item = item.split(":")
new_item = item.split(":")
| 206 | 0 | 22 |
406ad2a5e1ceed6576faa87fdec56b6b46fcb1c5 | 8,964 | py | Python | Captain/Captain.py | HerouFenix/rl_bots | 948b2b718a5777265f9571fa0297a17cec985064 | [
"MIT"
] | null | null | null | Captain/Captain.py | HerouFenix/rl_bots | 948b2b718a5777265f9571fa0297a17cec985064 | [
"MIT"
] | 1 | 2022-02-17T23:29:56.000Z | 2022-02-17T23:29:56.000Z | Captain/Captain.py | HerouFenix/rl_bots | 948b2b718a5777265f9571fa0297a17cec985064 | [
"MIT"
] | null | null | null | from typing import List
from rlbot.agents.base_agent import BaseAgent, GameTickPacket, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from tmcp import TMCPHandler, TMCPMessage
from util.vec import Vec3
from util.utilities import physics_object, Vector
from policy import base_policy, marujo_strategy
from tools.drawing import DrawingTool
from util.game_info import GameInfo
from policy.macros import ACK, KICKOFF, CLEAR, DEFENSE, UNDEFINED
try:
from rlutilities.linear_algebra import *
from rlutilities.mechanics import Aerial, AerialTurn, Dodge, Wavedash, Boostdash
from rlutilities.simulation import Game, Ball, Car, Input
except:
print("==========================================")
print("\nrlutilities import failed.")
print("\n==========================================")
quit()
RENDERING = True
| 42.483412 | 184 | 0.625614 | from typing import List
from rlbot.agents.base_agent import BaseAgent, GameTickPacket, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from tmcp import TMCPHandler, TMCPMessage
from util.vec import Vec3
from util.utilities import physics_object, Vector
from policy import base_policy, marujo_strategy
from tools.drawing import DrawingTool
from util.game_info import GameInfo
from policy.macros import ACK, KICKOFF, CLEAR, DEFENSE, UNDEFINED
try:
from rlutilities.linear_algebra import *
from rlutilities.mechanics import Aerial, AerialTurn, Dodge, Wavedash, Boostdash
from rlutilities.simulation import Game, Ball, Car, Input
except:
print("==========================================")
print("\nrlutilities import failed.")
print("\n==========================================")
quit()
RENDERING = True
class Captain(BaseAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
def initialize_agent(self):
self.tmcp_handler = TMCPHandler(self)
self.info = GameInfo(self.team)
self.info.set_mode("soccar")
self.draw = DrawingTool(self.renderer, self.team)
self.tick_counter = 0
self.last_latest_touch_time = 0
self.me = physics_object()
self.car = None
# Assume you're the captain, if you find an index lower than yours, adjust
self.captain = True
self.allies = []
self.enemies = []
self.policy = None
self.action = None
self.controls = SimpleControllerState()
# Team actions {index: Stance}
self.team_actions = {}
self.last_sent = {}
self.stance = UNDEFINED
self.negotiated = False
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
# Handle the packet
self.parse_packet(packet)
# Check if our action needs to change
self.check_resets(packet)
# Choosing the action: only the captain decides
if self.captain:
my_team = [i for i in range(self.info.num_cars) if self.info.cars[i].team == self.team]
# Send / Receive TMCP messages
self.handle_comms(packet)
# When you're finished with the action or if it has been cancelled or the game has just reset, reconsider team strategy
if self.action == None or self.action.finished:
if self.captain:
self.team_actions = base_policy.choose_stance(self.info, self.info.cars[self.index], my_team, self.last_sent)
# Send actions as captain
self.handle_comms(packet)
# Pick action according to previous orders
self.action = marujo_strategy.choose_action(self.info, self.info.cars[self.index], self.stance)
# Execute action
if self.action is not None:
self.action.step(self.info.time_delta)
self.controls = self.action.controls
if RENDERING:
self.renderer.draw_string_3d(self.info.cars[self.index].position + vec3(0,0,10), 2, 2, self.action.name, self.renderer.white())
self.renderer.draw_line_3d(self.info.cars[self.index].position, self.info.ball.position, self.renderer.white())
self.renderer.draw_string_3d(self.info.cars[self.index].position + vec3(0,0,-5), 1, 1, f'Speed: {norm(self.info.cars[self.index].velocity):.1f}', self.renderer.white())
self.renderer.draw_rect_3d(self.info.ball.position , 8, 8, True, self.renderer.cyan(), centered=True)
if RENDERING:
self.draw.execute()
return self.controls
def parse_packet(self, packet):
""" Updates information about the cars in the game from a given packet. Location, velocity, rotation and boost level.
Also useful to keep everyone in check with who the captain is.
"""
self.info.read_packet(packet, self.get_field_info())
self.ball_location = Vec3(packet.game_ball.physics.location)
for i in range(packet.num_cars):
car = packet.game_cars[i]
# Checking who the captain is
if self.captain and i < self.index and car.team == self.team:
self.captain = False
self.logger.info("Just got demoted.. captain now is " + str(i))
self.tmcp_handler.send_boost_action(ACK)
# Fetching relevant information about every car
_obj = physics_object()
_obj.index = i
_obj.team = car.team
_obj.location = Vector([car.physics.location.x, car.physics.location.y, car.physics.location.z])
_obj.velocity = Vector([car.physics.velocity.x, car.physics.velocity.y, car.physics.velocity.z])
_obj.rotation = Vector([car.physics.rotation.pitch, car.physics.rotation.yaw, car.physics.rotation.roll])
_obj.avelocity = Vector([car.physics.angular_velocity.x, car.physics.angular_velocity.y, car.physics.angular_velocity.z])
_obj.boostLevel = car.boost
#_obj.local_location = localizeVector(_obj,self.me)
if i != self.index:
if car.team == self.team:
self.allies.append(_obj)
else:
self.enemies.append(_obj)
else:
self.me = _obj
self.car = packet.game_cars[i]
def handle_comms(self, packet):
""" Responsible for handling the TMCP packets sent in the previous iteration.
Marujos read messages, captains send them. (general rule)
TMCP only supports a pre-defined set of messages, so we will be adding a few by changing certain parameters.
Also, the original implementation of TMCP does not support targeted messages, only broadcasts. So we are going to instance the message and replace
the index of the sender with the index of the desired receiver.
"""
# Decide what to do with your mateys
if self.captain:
for index in self.team_actions:
if index in self.last_sent and self.last_sent[index] == self.team_actions[index] and self.last_sent[index] != KICKOFF:
continue
message = TMCPMessage.boost_action(self.team, index, self.team_actions[index]) # Send the stance to maroojo
if index == self.index:
self.stance = message.target
else:
succ = self.tmcp_handler.send(message)
if not succ:
self.logger.warn("Failed to send message to" + str(index))
self.last_sent[index] = self.team_actions[index]
# Check if there are new orders
else:
# Receive and parse all new matchcomms messages into TMCPMessage objects.
while True:
new_messages: List[TMCPMessage] = self.tmcp_handler.recv()
# Handle TMCPMessages, which for marujos is pretty much just updating stance.
for message in new_messages:
if message.index == self.index and message.team == self.team:
if not (packet.game_info.is_kickoff_pause and message.target not in [KICKOFF, DEFENSE]):
self.stance = message.target
if self.stance != UNDEFINED:
self.negotiated = True
break
def check_resets(self, packet):
# cancel maneuver if a kickoff is happening and current maneuver isn't a kickoff maneuver
if packet.game_info.is_kickoff_pause and not self.negotiated and not self.captain:
self.action = None
self.stance = UNDEFINED
if self.negotiated and not packet.game_info.is_kickoff_pause and not self.captain:
self.negotiated = False
# reset action when another car hits the ball
touch = packet.game_ball.latest_touch
if (touch.time_seconds > self.last_latest_touch_time and touch.player_name != packet.game_cars[self.index].name):
self.last_latest_touch_time = touch.time_seconds
# don't reset when we're dodging, wavedashing or recovering
if self.action and self.action.interruptible():
self.action = None
return True
# reset action if we are not clearing, if its interruptible and the ball is entering the danger zone
# if ball is in a dangerous position, clear it, be it with a clear or with a well-alligned strike
dangerous = marujo_strategy.danger(self.info, self.info.cars[self.index])
if (dangerous and self.stance != CLEAR and self.action and self.action.interruptible()):
self.stance = CLEAR
self.action = None
return True
return False
| 4,099 | 3,962 | 23 |
dda5db1c1a6e695025c263bd0d91096552216c7b | 4,794 | py | Python | 0167_two_sum_2/python_source.py | arthurdysart/LeetCode | 69f90877c5466927e8b081c4268cbcda074813ec | [
"Unlicense"
] | null | null | null | 0167_two_sum_2/python_source.py | arthurdysart/LeetCode | 69f90877c5466927e8b081c4268cbcda074813ec | [
"Unlicense"
] | null | null | null | 0167_two_sum_2/python_source.py | arthurdysart/LeetCode | 69f90877c5466927e8b081c4268cbcda074813ec | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Leetcode - Two Sum II
https://leetcode.com/problems/two-sum-ii-input-array-is-sorted
Created on Sun Nov 18 20:39:12 2018
@author: Arthur Dysart
"""
## REQUIRED MODULES
import sys
## MODULE DEFINITIONS
class Solution:
"""
One-pointer with binary search of sorted array.
Time complexity: O(n)
- Amortized iterate over all elements in array
Space complexity: O(1)
- Constant pointer evaluation
"""
def two_sum(self, a, x):
"""
Determines indicies of elements whose sum equals target "x".
:param list[int] a: sorted array of integers
:type int x: target integer sum
:return: list of indicies (base index 1) of target values
:rtype: list[int]
"""
if not a:
return [-1, -1]
n = len(a)
for i in range(n):
t = x - a[i]
j = self.find_target(t, i, n, a)
if j != 0:
return [i + 1, j + 1]
return [-1, -1]
def find_target(self, t, i, n, a):
"""
Searches for target "t" by binary search of right-section array.
:param int t: target integer for two sum
:param int i: lower limit of binary search range
:param int n: length of input array
:param list[int] a: sorted array of integers
:return: integer representing index of target element
:rtype: int
"""
if (not a or
not n):
return 0
# Execute binary search
l = i + 1
r = n - 1
while l <= r:
m = l + (r - l) // 2
if a[m] == t:
# Target element found
return m
elif a[m] < t:
# Target element in right-half
l = m + 1
else:
# Target element in left-half
r = m - 1
return 0
class Solution2:
"""
One-pointer unitary search of sorted array (with memoization).
Time complexity: O(n)
- Amortized iterate over all elements in array
Space complexity: O(1)
- Constant pointer evaluation
"""
def two_sum(self, a, x):
"""
Determines indicies of elements whose sum equals target "x".
:param list[int] a: sorted array of integers
:type int x: target integer sum
:return: list of indicies (base index 1) of target values
:rtype: list[int]
"""
if not a:
return [-1, -1]
c = {}
n = len(a)
for j in range(n):
# Set target element
t = x - a[j]
if t in c:
# Complimentary element found
i = c[t]
return [i + 1, j + 1]
else:
# Add visited integer to dictionary
c[a[j]] = j
return [-1, -1]
class Solution3:
"""
Two-pointer unitary search of sorted array.
Time complexity: O(n)
- Amortized iterate over all elements in array
Space complexity: O(1)
- Constant pointer evaluation
"""
def two_sum(self, a, x):
"""
Determines indicies of elements whose sum equals target "x".
:param list[int] a: sorted array of integers
:type int x: target integer sum
:return: list of indicies (base index 1) of target values
:rtype: list[int]
"""
if not a:
return [-1, -1]
n = len(a)
l = 0
r = n - 1
# Perform two-pointer search
while l < r:
# Set current sum
t = a[l] + a[r]
if t == x:
# Target indicies found
return [l + 1, r + 1]
if t > x:
r -= 1
else:
l += 1
# Target indicies not found
return [-1, -1]
## MAIN MODULE
if __name__ == "__main__":
# Import exercise parameters
a, x = Input()\
.stdin(sys.stdin)
# Evaluate solution
z = Solution()\
.two_sum(a, x)
print(z)
## END OF FILE | 25.365079 | 73 | 0.477263 | # -*- coding: utf-8 -*-
"""
Leetcode - Two Sum II
https://leetcode.com/problems/two-sum-ii-input-array-is-sorted
Created on Sun Nov 18 20:39:12 2018
@author: Arthur Dysart
"""
## REQUIRED MODULES
import sys
## MODULE DEFINITIONS
class Solution:
"""
One-pointer with binary search of sorted array.
Time complexity: O(n)
- Amortized iterate over all elements in array
Space complexity: O(1)
- Constant pointer evaluation
"""
def two_sum(self, a, x):
"""
Determines indicies of elements whose sum equals target "x".
:param list[int] a: sorted array of integers
:type int x: target integer sum
:return: list of indicies (base index 1) of target values
:rtype: list[int]
"""
if not a:
return [-1, -1]
n = len(a)
for i in range(n):
t = x - a[i]
j = self.find_target(t, i, n, a)
if j != 0:
return [i + 1, j + 1]
return [-1, -1]
def find_target(self, t, i, n, a):
"""
Searches for target "t" by binary search of right-section array.
:param int t: target integer for two sum
:param int i: lower limit of binary search range
:param int n: length of input array
:param list[int] a: sorted array of integers
:return: integer representing index of target element
:rtype: int
"""
if (not a or
not n):
return 0
# Execute binary search
l = i + 1
r = n - 1
while l <= r:
m = l + (r - l) // 2
if a[m] == t:
# Target element found
return m
elif a[m] < t:
# Target element in right-half
l = m + 1
else:
# Target element in left-half
r = m - 1
return 0
class Solution2:
"""
One-pointer unitary search of sorted array (with memoization).
Time complexity: O(n)
- Amortized iterate over all elements in array
Space complexity: O(1)
- Constant pointer evaluation
"""
def two_sum(self, a, x):
"""
Determines indicies of elements whose sum equals target "x".
:param list[int] a: sorted array of integers
:type int x: target integer sum
:return: list of indicies (base index 1) of target values
:rtype: list[int]
"""
if not a:
return [-1, -1]
c = {}
n = len(a)
for j in range(n):
# Set target element
t = x - a[j]
if t in c:
# Complimentary element found
i = c[t]
return [i + 1, j + 1]
else:
# Add visited integer to dictionary
c[a[j]] = j
return [-1, -1]
class Solution3:
"""
Two-pointer unitary search of sorted array.
Time complexity: O(n)
- Amortized iterate over all elements in array
Space complexity: O(1)
- Constant pointer evaluation
"""
def two_sum(self, a, x):
"""
Determines indicies of elements whose sum equals target "x".
:param list[int] a: sorted array of integers
:type int x: target integer sum
:return: list of indicies (base index 1) of target values
:rtype: list[int]
"""
if not a:
return [-1, -1]
n = len(a)
l = 0
r = n - 1
# Perform two-pointer search
while l < r:
# Set current sum
t = a[l] + a[r]
if t == x:
# Target indicies found
return [l + 1, r + 1]
if t > x:
r -= 1
else:
l += 1
# Target indicies not found
return [-1, -1]
class Input:
def stdin(self, sys_stdin):
"""
Imports standard input.
:param _io.TextIOWrapper sys_stdin: standard input
:return: sorted array of integers and target integer sum
:rtype: tuple[list[int], int]
"""
inputs = [x.strip("[]\n") for x in sys_stdin]
a = [int(x.strip())
for x
in inputs[0].split(",")]
x = int(inputs[1])
return a, x
## MAIN MODULE
if __name__ == "__main__":
# Import exercise parameters
a, x = Input()\
.stdin(sys.stdin)
# Evaluate solution
z = Solution()\
.two_sum(a, x)
print(z)
## END OF FILE | 0 | 444 | 25 |
c6a64b8df51562d7cfa6dd28725199fe6d3b5a6d | 324 | py | Python | flight/migrations/0004_auto_20190719_0900.py | PatrickCmd/flight-booking-application | a521932530d622c6eef46ea6a0f968c8267b622e | [
"MIT"
] | null | null | null | flight/migrations/0004_auto_20190719_0900.py | PatrickCmd/flight-booking-application | a521932530d622c6eef46ea6a0f968c8267b622e | [
"MIT"
] | 8 | 2020-02-12T01:01:51.000Z | 2022-03-11T23:59:40.000Z | flight/migrations/0004_auto_20190719_0900.py | PatrickCmd/flight-booking-application | a521932530d622c6eef46ea6a0f968c8267b622e | [
"MIT"
] | 1 | 2019-08-13T19:04:11.000Z | 2019-08-13T19:04:11.000Z | # Generated by Django 2.2.3 on 2019-07-19 09:00
from django.db import migrations
| 21.6 | 72 | 0.660494 | # Generated by Django 2.2.3 on 2019-07-19 09:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("flight", "0003_auto_20190719_0814")]
operations = [
migrations.RenameField(
model_name="seat", old_name="number", new_name="seat_number"
)
]
| 0 | 218 | 23 |
320f6aa7caac035b61fa34a5b1c169e05b05a8d9 | 319 | py | Python | gryphon/tests/environment/exchange_wrappers/coinbase_live_orders.py | qiquanzhijia/gryphon | 7bb2c646e638212bd1352feb1b5d21536a5b918d | [
"Apache-2.0"
] | 1,109 | 2019-06-20T19:23:27.000Z | 2022-03-20T14:03:43.000Z | gryphon/tests/environment/exchange_wrappers/coinbase_live_orders.py | qiquanzhijia/gryphon | 7bb2c646e638212bd1352feb1b5d21536a5b918d | [
"Apache-2.0"
] | 63 | 2019-06-21T05:36:17.000Z | 2021-05-26T21:08:15.000Z | gryphon/tests/environment/exchange_wrappers/coinbase_live_orders.py | qiquanzhijia/gryphon | 7bb2c646e638212bd1352feb1b5d21536a5b918d | [
"Apache-2.0"
] | 181 | 2019-06-20T19:42:05.000Z | 2022-03-21T13:05:13.000Z | import pyximport; pyximport.install()
from gryphon.lib.exchange.coinbase_btc_usd import CoinbaseBTCUSDExchange
from gryphon.tests.environment.exchange_wrappers.live_orders import LiveOrdersTest
| 31.9 | 82 | 0.830721 | import pyximport; pyximport.install()
from gryphon.lib.exchange.coinbase_btc_usd import CoinbaseBTCUSDExchange
from gryphon.tests.environment.exchange_wrappers.live_orders import LiveOrdersTest
class TestCoinbaseBTCUSDLiveOrders(LiveOrdersTest):
def setUp(self):
self.exchange = CoinbaseBTCUSDExchange()
| 44 | 30 | 49 |
e84c648aa2207e9f18ac7f16ec41c37e06f68057 | 2,351 | py | Python | addons/project/tests/test_project_config.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/project/tests/test_project_config.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/project/tests/test_project_config.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
from .test_project_base import TestProjectCommon
_logger = logging.getLogger(__name__)
class TestProjectConfig(TestProjectCommon):
"""Test module configuration and its effects on projects."""
@classmethod
@classmethod
def _set_feature_status(cls, is_enabled):
"""Set enabled/disabled status of all optional features in the
project app config to is_enabled (boolean).
"""
features_config = cls.Settings.create(
{feature[0]: is_enabled for feature in cls.features})
features_config.execute()
def test_existing_projects_enable_features(self):
"""Check that *existing* projects have features enabled when
the user enables them in the module configuration.
"""
self._set_feature_status(is_enabled=True)
for config_flag, project_flag in self.features:
self.assertTrue(
self.project_pigs[project_flag],
"Existing project failed to adopt activation of "
f"{config_flag}/{project_flag} feature")
def test_new_projects_enable_features(self):
"""Check that after the user enables features in the module
configuration, *newly created* projects have those features
enabled as well.
"""
self._set_feature_status(is_enabled=True)
project_cows = self.Project.create({
"name": "Cows",
"partner_id": self.partner_1.id})
for config_flag, project_flag in self.features:
self.assertTrue(
project_cows[project_flag],
f"Newly created project failed to adopt activation of "
f"{config_flag}/{project_flag} feature")
| 37.919355 | 79 | 0.649085 | # -*- coding: utf-8 -*-
import logging
from .test_project_base import TestProjectCommon
_logger = logging.getLogger(__name__)
class TestProjectConfig(TestProjectCommon):
"""Test module configuration and its effects on projects."""
@classmethod
def setUpClass(cls):
super(TestProjectConfig, cls).setUpClass()
cls.Project = cls.env["project.project"]
cls.Settings = cls.env["res.config.settings"]
cls.features = (
# Pairs of associated (config_flag, project_flag)
("group_subtask_project", "allow_subtasks"),
("group_project_recurring_tasks", "allow_recurring_tasks"),
("group_project_rating", "rating_active"),
)
# Start with a known value on feature flags to ensure validity of tests
cls._set_feature_status(is_enabled=False)
@classmethod
def _set_feature_status(cls, is_enabled):
"""Set enabled/disabled status of all optional features in the
project app config to is_enabled (boolean).
"""
features_config = cls.Settings.create(
{feature[0]: is_enabled for feature in cls.features})
features_config.execute()
def test_existing_projects_enable_features(self):
"""Check that *existing* projects have features enabled when
the user enables them in the module configuration.
"""
self._set_feature_status(is_enabled=True)
for config_flag, project_flag in self.features:
self.assertTrue(
self.project_pigs[project_flag],
"Existing project failed to adopt activation of "
f"{config_flag}/{project_flag} feature")
def test_new_projects_enable_features(self):
"""Check that after the user enables features in the module
configuration, *newly created* projects have those features
enabled as well.
"""
self._set_feature_status(is_enabled=True)
project_cows = self.Project.create({
"name": "Cows",
"partner_id": self.partner_1.id})
for config_flag, project_flag in self.features:
self.assertTrue(
project_cows[project_flag],
f"Newly created project failed to adopt activation of "
f"{config_flag}/{project_flag} feature")
| 569 | 0 | 26 |